Commit | Line | Data |
---|---|---|
fb8e5b4c KO |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _BCACHEFS_SB_MEMBERS_H | |
3 | #define _BCACHEFS_SB_MEMBERS_H | |
4 | ||
037a2d9f | 5 | #include "darray.h" |
27c15ed2 | 6 | #include "bkey_types.h" |
037a2d9f | 7 | |
94119eeb KO |
8 | extern char * const bch2_member_error_strs[]; |
9 | ||
0f0fc312 KO |
10 | static inline struct bch_member * |
11 | __bch2_members_v2_get_mut(struct bch_sb_field_members_v2 *mi, unsigned i) | |
12 | { | |
13 | return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes)); | |
14 | } | |
15 | ||
f5d26fa3 | 16 | int bch2_sb_members_v2_init(struct bch_fs *c); |
94119eeb | 17 | int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb); |
3f7b9713 | 18 | struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i); |
1241df58 HS |
19 | struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i); |
20 | ||
fb8e5b4c KO |
21 | static inline bool bch2_dev_is_online(struct bch_dev *ca) |
22 | { | |
23 | return !percpu_ref_is_zero(&ca->io_ref); | |
24 | } | |
25 | ||
26 | static inline bool bch2_dev_is_readable(struct bch_dev *ca) | |
27 | { | |
28 | return bch2_dev_is_online(ca) && | |
29 | ca->mi.state != BCH_MEMBER_STATE_failed; | |
30 | } | |
31 | ||
32 | static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw) | |
33 | { | |
34 | if (!percpu_ref_tryget(&ca->io_ref)) | |
35 | return false; | |
36 | ||
37 | if (ca->mi.state == BCH_MEMBER_STATE_rw || | |
38 | (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ)) | |
39 | return true; | |
40 | ||
41 | percpu_ref_put(&ca->io_ref); | |
42 | return false; | |
43 | } | |
44 | ||
45 | static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs) | |
46 | { | |
47 | return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX); | |
48 | } | |
49 | ||
50 | static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs, | |
51 | unsigned dev) | |
52 | { | |
037a2d9f KO |
53 | darray_for_each(devs, i) |
54 | if (*i == dev) | |
fb8e5b4c | 55 | return true; |
fb8e5b4c KO |
56 | return false; |
57 | } | |
58 | ||
59 | static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs, | |
60 | unsigned dev) | |
61 | { | |
037a2d9f KO |
62 | darray_for_each(*devs, i) |
63 | if (*i == dev) { | |
64 | darray_remove_item(devs, i); | |
fb8e5b4c KO |
65 | return; |
66 | } | |
67 | } | |
68 | ||
69 | static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs, | |
70 | unsigned dev) | |
71 | { | |
72 | if (!bch2_dev_list_has_dev(*devs, dev)) { | |
037a2d9f KO |
73 | BUG_ON(devs->nr >= ARRAY_SIZE(devs->data)); |
74 | devs->data[devs->nr++] = dev; | |
fb8e5b4c KO |
75 | } |
76 | } | |
77 | ||
78 | static inline struct bch_devs_list bch2_dev_list_single(unsigned dev) | |
79 | { | |
037a2d9f | 80 | return (struct bch_devs_list) { .nr = 1, .data[0] = dev }; |
fb8e5b4c KO |
81 | } |
82 | ||
41b84fb4 KO |
83 | static inline struct bch_dev *__bch2_next_dev_idx(struct bch_fs *c, unsigned idx, |
84 | const struct bch_devs_mask *mask) | |
fb8e5b4c KO |
85 | { |
86 | struct bch_dev *ca = NULL; | |
87 | ||
41b84fb4 KO |
88 | while ((idx = mask |
89 | ? find_next_bit(mask->d, c->sb.nr_devices, idx) | |
90 | : idx) < c->sb.nr_devices && | |
91 | !(ca = rcu_dereference_check(c->devs[idx], | |
fb8e5b4c | 92 | lockdep_is_held(&c->state_lock)))) |
41b84fb4 | 93 | idx++; |
fb8e5b4c KO |
94 | |
95 | return ca; | |
96 | } | |
97 | ||
41b84fb4 KO |
98 | static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *ca, |
99 | const struct bch_devs_mask *mask) | |
100 | { | |
101 | return __bch2_next_dev_idx(c, ca ? ca->dev_idx + 1 : 0, mask); | |
102 | } | |
103 | ||
104 | #define for_each_member_device_rcu(_c, _ca, _mask) \ | |
105 | for (struct bch_dev *_ca = NULL; \ | |
106 | (_ca = __bch2_next_dev((_c), _ca, (_mask)));) | |
fb8e5b4c | 107 | |
9fea2274 | 108 | static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca) |
fb8e5b4c | 109 | { |
9fea2274 KO |
110 | if (ca) |
111 | percpu_ref_put(&ca->ref); | |
fb8e5b4c KO |
112 | |
113 | rcu_read_lock(); | |
41b84fb4 | 114 | if ((ca = __bch2_next_dev(c, ca, NULL))) |
fb8e5b4c KO |
115 | percpu_ref_get(&ca->ref); |
116 | rcu_read_unlock(); | |
117 | ||
118 | return ca; | |
119 | } | |
120 | ||
121 | /* | |
122 | * If you break early, you must drop your ref on the current device | |
123 | */ | |
9fea2274 KO |
124 | #define __for_each_member_device(_c, _ca) \ |
125 | for (; (_ca = bch2_get_next_dev(_c, _ca));) | |
126 | ||
127 | #define for_each_member_device(_c, _ca) \ | |
128 | for (struct bch_dev *_ca = NULL; \ | |
129 | (_ca = bch2_get_next_dev(_c, _ca));) | |
fb8e5b4c KO |
130 | |
131 | static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c, | |
9fea2274 KO |
132 | struct bch_dev *ca, |
133 | unsigned state_mask) | |
fb8e5b4c | 134 | { |
9fea2274 KO |
135 | if (ca) |
136 | percpu_ref_put(&ca->io_ref); | |
fb8e5b4c KO |
137 | |
138 | rcu_read_lock(); | |
41b84fb4 | 139 | while ((ca = __bch2_next_dev(c, ca, NULL)) && |
fb8e5b4c KO |
140 | (!((1 << ca->mi.state) & state_mask) || |
141 | !percpu_ref_tryget(&ca->io_ref))) | |
41b84fb4 | 142 | ; |
fb8e5b4c KO |
143 | rcu_read_unlock(); |
144 | ||
145 | return ca; | |
146 | } | |
147 | ||
9fea2274 KO |
148 | #define __for_each_online_member(_c, _ca, state_mask) \ |
149 | for (struct bch_dev *_ca = NULL; \ | |
150 | (_ca = bch2_get_next_online_dev(_c, _ca, state_mask));) | |
fb8e5b4c | 151 | |
9fea2274 KO |
152 | #define for_each_online_member(c, ca) \ |
153 | __for_each_online_member(c, ca, ~0) | |
fb8e5b4c | 154 | |
9fea2274 KO |
155 | #define for_each_rw_member(c, ca) \ |
156 | __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw)) | |
fb8e5b4c | 157 | |
9fea2274 KO |
158 | #define for_each_readable_member(c, ca) \ |
159 | __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro)) | |
fb8e5b4c KO |
160 | |
161 | /* | |
162 | * If a key exists that references a device, the device won't be going away and | |
163 | * we can omit rcu_read_lock(): | |
164 | */ | |
165 | static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx) | |
166 | { | |
167 | EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]); | |
168 | ||
169 | return rcu_dereference_check(c->devs[idx], 1); | |
170 | } | |
171 | ||
172 | static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx) | |
173 | { | |
174 | EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]); | |
175 | ||
176 | return rcu_dereference_protected(c->devs[idx], | |
177 | lockdep_is_held(&c->sb_lock) || | |
178 | lockdep_is_held(&c->state_lock)); | |
179 | } | |
180 | ||
181 | /* XXX kill, move to struct bch_fs */ | |
182 | static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c) | |
183 | { | |
184 | struct bch_devs_mask devs; | |
fb8e5b4c KO |
185 | |
186 | memset(&devs, 0, sizeof(devs)); | |
9fea2274 | 187 | for_each_online_member(c, ca) |
fb8e5b4c KO |
188 | __set_bit(ca->dev_idx, devs.d); |
189 | return devs; | |
190 | } | |
191 | ||
9af26120 | 192 | extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1; |
3f7b9713 HS |
193 | extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2; |
194 | ||
94119eeb KO |
195 | static inline bool bch2_member_exists(struct bch_member *m) |
196 | { | |
197 | return !bch2_is_zero(&m->uuid, sizeof(m->uuid)); | |
198 | } | |
199 | ||
0f0fc312 | 200 | static inline bool bch2_dev_exists(struct bch_sb *sb, unsigned dev) |
94119eeb KO |
201 | { |
202 | if (dev < sb->nr_devices) { | |
0f0fc312 | 203 | struct bch_member m = bch2_sb_member_get(sb, dev); |
94119eeb KO |
204 | return bch2_member_exists(&m); |
205 | } | |
206 | return false; | |
207 | } | |
208 | ||
209 | static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi) | |
210 | { | |
211 | return (struct bch_member_cpu) { | |
212 | .nbuckets = le64_to_cpu(mi->nbuckets), | |
213 | .first_bucket = le16_to_cpu(mi->first_bucket), | |
214 | .bucket_size = le16_to_cpu(mi->bucket_size), | |
215 | .group = BCH_MEMBER_GROUP(mi), | |
216 | .state = BCH_MEMBER_STATE(mi), | |
217 | .discard = BCH_MEMBER_DISCARD(mi), | |
218 | .data_allowed = BCH_MEMBER_DATA_ALLOWED(mi), | |
219 | .durability = BCH_MEMBER_DURABILITY(mi) | |
220 | ? BCH_MEMBER_DURABILITY(mi) - 1 | |
221 | : 1, | |
222 | .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi), | |
223 | .valid = bch2_member_exists(mi), | |
27c15ed2 KO |
224 | .btree_bitmap_shift = mi->btree_bitmap_shift, |
225 | .btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap), | |
94119eeb KO |
226 | }; |
227 | } | |
228 | ||
229 | void bch2_sb_members_from_cpu(struct bch_fs *); | |
230 | ||
231 | void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *); | |
232 | void bch2_dev_errors_reset(struct bch_dev *); | |
233 | ||
27c15ed2 KO |
234 | static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64 start, unsigned sectors) |
235 | { | |
236 | u64 end = start + sectors; | |
237 | ||
fa845c73 | 238 | if (end > 64ULL << ca->mi.btree_bitmap_shift) |
27c15ed2 KO |
239 | return false; |
240 | ||
fa845c73 KO |
241 | for (unsigned bit = start >> ca->mi.btree_bitmap_shift; |
242 | (u64) bit << ca->mi.btree_bitmap_shift < end; | |
27c15ed2 KO |
243 | bit++) |
244 | if (!(ca->mi.btree_allocated_bitmap & BIT_ULL(bit))) | |
245 | return false; | |
246 | return true; | |
247 | } | |
248 | ||
249 | bool bch2_dev_btree_bitmap_marked(struct bch_fs *, struct bkey_s_c); | |
250 | void bch2_dev_btree_bitmap_mark(struct bch_fs *, struct bkey_s_c); | |
251 | ||
fb8e5b4c | 252 | #endif /* _BCACHEFS_SB_MEMBERS_H */ |