Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | #ifndef _BCACHE_BSET_H |
2 | #define _BCACHE_BSET_H | |
3 | ||
89ebb4a2 KO |
4 | #include <linux/bcache.h> |
5 | #include <linux/kernel.h> | |
6 | #include <linux/types.h> | |
c37511b8 | 7 | |
67539e85 KO |
8 | #include "util.h" /* for time_stats */ |
9 | ||
cafe5635 KO |
10 | /* |
11 | * BKEYS: | |
12 | * | |
13 | * A bkey contains a key, a size field, a variable number of pointers, and some | |
14 | * ancillary flag bits. | |
15 | * | |
16 | * We use two different functions for validating bkeys, bch_ptr_invalid and | |
17 | * bch_ptr_bad(). | |
18 | * | |
19 | * bch_ptr_invalid() primarily filters out keys and pointers that would be | |
20 | * invalid due to some sort of bug, whereas bch_ptr_bad() filters out keys and | |
21 | * pointer that occur in normal practice but don't point to real data. | |
22 | * | |
23 | * The one exception to the rule that ptr_invalid() filters out invalid keys is | |
24 | * that it also filters out keys of size 0 - these are keys that have been | |
25 | * completely overwritten. It'd be safe to delete these in memory while leaving | |
26 | * them on disk, just unnecessary work - so we filter them out when resorting | |
27 | * instead. | |
28 | * | |
29 | * We can't filter out stale keys when we're resorting, because garbage | |
30 | * collection needs to find them to ensure bucket gens don't wrap around - | |
31 | * unless we're rewriting the btree node those stale keys still exist on disk. | |
32 | * | |
33 | * We also implement functions here for removing some number of sectors from the | |
34 | * front or the back of a bkey - this is mainly used for fixing overlapping | |
35 | * extents, by removing the overlapping sectors from the older key. | |
36 | * | |
37 | * BSETS: | |
38 | * | |
39 | * A bset is an array of bkeys laid out contiguously in memory in sorted order, | |
40 | * along with a header. A btree node is made up of a number of these, written at | |
41 | * different times. | |
42 | * | |
43 | * There could be many of them on disk, but we never allow there to be more than | |
44 | * 4 in memory - we lazily resort as needed. | |
45 | * | |
46 | * We implement code here for creating and maintaining auxiliary search trees | |
47 | * (described below) for searching an individial bset, and on top of that we | |
48 | * implement a btree iterator. | |
49 | * | |
50 | * BTREE ITERATOR: | |
51 | * | |
52 | * Most of the code in bcache doesn't care about an individual bset - it needs | |
53 | * to search entire btree nodes and iterate over them in sorted order. | |
54 | * | |
55 | * The btree iterator code serves both functions; it iterates through the keys | |
56 | * in a btree node in sorted order, starting from either keys after a specific | |
57 | * point (if you pass it a search key) or the start of the btree node. | |
58 | * | |
59 | * AUXILIARY SEARCH TREES: | |
60 | * | |
61 | * Since keys are variable length, we can't use a binary search on a bset - we | |
62 | * wouldn't be able to find the start of the next key. But binary searches are | |
63 | * slow anyways, due to terrible cache behaviour; bcache originally used binary | |
64 | * searches and that code topped out at under 50k lookups/second. | |
65 | * | |
66 | * So we need to construct some sort of lookup table. Since we only insert keys | |
67 | * into the last (unwritten) set, most of the keys within a given btree node are | |
68 | * usually in sets that are mostly constant. We use two different types of | |
69 | * lookup tables to take advantage of this. | |
70 | * | |
71 | * Both lookup tables share in common that they don't index every key in the | |
72 | * set; they index one key every BSET_CACHELINE bytes, and then a linear search | |
73 | * is used for the rest. | |
74 | * | |
75 | * For sets that have been written to disk and are no longer being inserted | |
76 | * into, we construct a binary search tree in an array - traversing a binary | |
77 | * search tree in an array gives excellent locality of reference and is very | |
78 | * fast, since both children of any node are adjacent to each other in memory | |
79 | * (and their grandchildren, and great grandchildren...) - this means | |
80 | * prefetching can be used to great effect. | |
81 | * | |
82 | * It's quite useful performance wise to keep these nodes small - not just | |
83 | * because they're more likely to be in L2, but also because we can prefetch | |
84 | * more nodes on a single cacheline and thus prefetch more iterations in advance | |
85 | * when traversing this tree. | |
86 | * | |
87 | * Nodes in the auxiliary search tree must contain both a key to compare against | |
88 | * (we don't want to fetch the key from the set, that would defeat the purpose), | |
89 | * and a pointer to the key. We use a few tricks to compress both of these. | |
90 | * | |
91 | * To compress the pointer, we take advantage of the fact that one node in the | |
92 | * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have | |
93 | * a function (to_inorder()) that takes the index of a node in a binary tree and | |
94 | * returns what its index would be in an inorder traversal, so we only have to | |
95 | * store the low bits of the offset. | |
96 | * | |
97 | * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To | |
98 | * compress that, we take advantage of the fact that when we're traversing the | |
99 | * search tree at every iteration we know that both our search key and the key | |
100 | * we're looking for lie within some range - bounded by our previous | |
101 | * comparisons. (We special case the start of a search so that this is true even | |
102 | * at the root of the tree). | |
103 | * | |
104 | * So we know the key we're looking for is between a and b, and a and b don't | |
105 | * differ higher than bit 50, we don't need to check anything higher than bit | |
106 | * 50. | |
107 | * | |
108 | * We don't usually need the rest of the bits, either; we only need enough bits | |
109 | * to partition the key range we're currently checking. Consider key n - the | |
110 | * key our auxiliary search tree node corresponds to, and key p, the key | |
111 | * immediately preceding n. The lowest bit we need to store in the auxiliary | |
112 | * search tree is the highest bit that differs between n and p. | |
113 | * | |
114 | * Note that this could be bit 0 - we might sometimes need all 80 bits to do the | |
115 | * comparison. But we'd really like our nodes in the auxiliary search tree to be | |
116 | * of fixed size. | |
117 | * | |
118 | * The solution is to make them fixed size, and when we're constructing a node | |
119 | * check if p and n differed in the bits we needed them to. If they don't we | |
120 | * flag that node, and when doing lookups we fallback to comparing against the | |
121 | * real key. As long as this doesn't happen to often (and it seems to reliably | |
122 | * happen a bit less than 1% of the time), we win - even on failures, that key | |
123 | * is then more likely to be in cache than if we were doing binary searches all | |
124 | * the way, since we're touching so much less memory. | |
125 | * | |
126 | * The keys in the auxiliary search tree are stored in (software) floating | |
127 | * point, with an exponent and a mantissa. The exponent needs to be big enough | |
128 | * to address all the bits in the original key, but the number of bits in the | |
129 | * mantissa is somewhat arbitrary; more bits just gets us fewer failures. | |
130 | * | |
131 | * We need 7 bits for the exponent and 3 bits for the key's offset (since keys | |
132 | * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes. | |
133 | * We need one node per 128 bytes in the btree node, which means the auxiliary | |
134 | * search trees take up 3% as much memory as the btree itself. | |
135 | * | |
136 | * Constructing these auxiliary search trees is moderately expensive, and we | |
137 | * don't want to be constantly rebuilding the search tree for the last set | |
138 | * whenever we insert another key into it. For the unwritten set, we use a much | |
139 | * simpler lookup table - it's just a flat array, so index i in the lookup table | |
140 | * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing | |
141 | * within each byte range works the same as with the auxiliary search trees. | |
142 | * | |
143 | * These are much easier to keep up to date when we insert a key - we do it | |
144 | * somewhat lazily; when we shift a key up we usually just increment the pointer | |
145 | * to it, only when it would overflow do we go to the trouble of finding the | |
146 | * first key in that range of bytes again. | |
147 | */ | |
148 | ||
a85e968e KO |
149 | struct btree_keys; |
150 | struct btree_iter; | |
151 | struct btree_iter_set; | |
ee811287 | 152 | struct bkey_float; |
cafe5635 | 153 | |
c37511b8 KO |
154 | #define MAX_BSETS 4U |
155 | ||
cafe5635 KO |
156 | struct bset_tree { |
157 | /* | |
158 | * We construct a binary tree in an array as if the array | |
159 | * started at 1, so that things line up on the same cachelines | |
160 | * better: see comments in bset.c at cacheline_to_bkey() for | |
161 | * details | |
162 | */ | |
163 | ||
164 | /* size of the binary tree and prev array */ | |
ee811287 | 165 | unsigned size; |
cafe5635 KO |
166 | |
167 | /* function of size - precalculated for to_inorder() */ | |
ee811287 | 168 | unsigned extra; |
cafe5635 KO |
169 | |
170 | /* copy of the last key in the set */ | |
ee811287 KO |
171 | struct bkey end; |
172 | struct bkey_float *tree; | |
cafe5635 KO |
173 | |
174 | /* | |
175 | * The nodes in the bset tree point to specific keys - this | |
176 | * array holds the sizes of the previous key. | |
177 | * | |
178 | * Conceptually it's a member of struct bkey_float, but we want | |
179 | * to keep bkey_float to 4 bytes and prev isn't used in the fast | |
180 | * path. | |
181 | */ | |
ee811287 | 182 | uint8_t *prev; |
cafe5635 KO |
183 | |
184 | /* The actual btree node, with pointers to each sorted set */ | |
ee811287 | 185 | struct bset *data; |
cafe5635 KO |
186 | }; |
187 | ||
a85e968e KO |
188 | struct btree_keys_ops { |
189 | bool (*sort_cmp)(struct btree_iter_set, | |
190 | struct btree_iter_set); | |
191 | struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *); | |
829a60b9 KO |
192 | bool (*insert_fixup)(struct btree_keys *, struct bkey *, |
193 | struct btree_iter *, struct bkey *); | |
a85e968e KO |
194 | bool (*key_invalid)(struct btree_keys *, |
195 | const struct bkey *); | |
196 | bool (*key_bad)(struct btree_keys *, const struct bkey *); | |
197 | bool (*key_merge)(struct btree_keys *, | |
198 | struct bkey *, struct bkey *); | |
dc9d98d6 KO |
199 | void (*key_to_text)(char *, size_t, const struct bkey *); |
200 | void (*key_dump)(struct btree_keys *, const struct bkey *); | |
a85e968e KO |
201 | |
202 | /* | |
203 | * Only used for deciding whether to use START_KEY(k) or just the key | |
204 | * itself in a couple places | |
205 | */ | |
206 | bool is_extents; | |
207 | }; | |
208 | ||
209 | struct btree_keys { | |
210 | const struct btree_keys_ops *ops; | |
211 | uint8_t page_order; | |
212 | uint8_t nsets; | |
213 | unsigned last_set_unwritten:1; | |
214 | bool *expensive_debug_checks; | |
215 | ||
216 | /* | |
217 | * Sets of sorted keys - the real btree node - plus a binary search tree | |
218 | * | |
219 | * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point | |
220 | * to the memory we have allocated for this btree node. Additionally, | |
221 | * set[0]->data points to the entire btree node as it exists on disk. | |
222 | */ | |
223 | struct bset_tree set[MAX_BSETS]; | |
224 | }; | |
225 | ||
226 | static inline struct bset_tree *bset_tree_last(struct btree_keys *b) | |
227 | { | |
228 | return b->set + b->nsets; | |
229 | } | |
230 | ||
231 | static inline bool bset_written(struct btree_keys *b, struct bset_tree *t) | |
232 | { | |
233 | return t <= b->set + b->nsets - b->last_set_unwritten; | |
234 | } | |
235 | ||
236 | static inline bool bkey_written(struct btree_keys *b, struct bkey *k) | |
237 | { | |
238 | return !b->last_set_unwritten || k < b->set[b->nsets].data->start; | |
239 | } | |
240 | ||
241 | static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i) | |
242 | { | |
243 | return ((size_t) i) - ((size_t) b->set->data); | |
244 | } | |
245 | ||
246 | static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i) | |
247 | { | |
248 | return bset_byte_offset(b, i) >> 9; | |
249 | } | |
250 | ||
ee811287 KO |
251 | #define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t)) |
252 | #define set_bytes(i) __set_bytes(i, i->keys) | |
253 | ||
254 | #define __set_blocks(i, k, block_bytes) \ | |
255 | DIV_ROUND_UP(__set_bytes(i, k), block_bytes) | |
256 | #define set_blocks(i, block_bytes) \ | |
257 | __set_blocks(i, (i)->keys, block_bytes) | |
258 | ||
59158fde KO |
259 | static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b) |
260 | { | |
261 | struct bset_tree *t = bset_tree_last(b); | |
262 | ||
263 | BUG_ON((PAGE_SIZE << b->page_order) < | |
264 | (bset_byte_offset(b, t->data) + set_bytes(t->data))); | |
265 | ||
266 | if (!b->last_set_unwritten) | |
267 | return 0; | |
268 | ||
269 | return ((PAGE_SIZE << b->page_order) - | |
270 | (bset_byte_offset(b, t->data) + set_bytes(t->data))) / | |
271 | sizeof(u64); | |
272 | } | |
273 | ||
a85e968e KO |
274 | static inline struct bset *bset_next_set(struct btree_keys *b, |
275 | unsigned block_bytes) | |
276 | { | |
277 | struct bset *i = bset_tree_last(b)->data; | |
278 | ||
279 | return ((void *) i) + roundup(set_bytes(i), block_bytes); | |
280 | } | |
281 | ||
282 | void bch_btree_keys_free(struct btree_keys *); | |
283 | int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t); | |
284 | void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *, | |
285 | bool *); | |
ee811287 | 286 | |
a85e968e KO |
287 | void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t); |
288 | void bch_bset_build_written_tree(struct btree_keys *); | |
289 | void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *); | |
0f49cf3d | 290 | bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *); |
a85e968e | 291 | void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *); |
829a60b9 KO |
292 | unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *, |
293 | struct bkey *); | |
294 | ||
295 | enum { | |
296 | BTREE_INSERT_STATUS_NO_INSERT = 0, | |
297 | BTREE_INSERT_STATUS_INSERT, | |
298 | BTREE_INSERT_STATUS_BACK_MERGE, | |
299 | BTREE_INSERT_STATUS_OVERWROTE, | |
300 | BTREE_INSERT_STATUS_FRONT_MERGE, | |
301 | }; | |
a85e968e | 302 | |
ee811287 KO |
303 | /* Btree key iteration */ |
304 | ||
305 | struct btree_iter { | |
306 | size_t size, used; | |
307 | #ifdef CONFIG_BCACHE_DEBUG | |
c052dd9a | 308 | struct btree_keys *b; |
ee811287 KO |
309 | #endif |
310 | struct btree_iter_set { | |
311 | struct bkey *k, *end; | |
312 | } data[MAX_BSETS]; | |
313 | }; | |
314 | ||
a85e968e | 315 | typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *); |
ee811287 KO |
316 | |
317 | struct bkey *bch_btree_iter_next(struct btree_iter *); | |
318 | struct bkey *bch_btree_iter_next_filter(struct btree_iter *, | |
a85e968e | 319 | struct btree_keys *, ptr_filter_fn); |
ee811287 KO |
320 | |
321 | void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *); | |
c052dd9a | 322 | struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *, |
ee811287 KO |
323 | struct bkey *); |
324 | ||
c052dd9a KO |
325 | struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *, |
326 | const struct bkey *); | |
ee811287 KO |
327 | |
328 | /* | |
329 | * Returns the first key that is strictly greater than search | |
330 | */ | |
c052dd9a KO |
331 | static inline struct bkey *bch_bset_search(struct btree_keys *b, |
332 | struct bset_tree *t, | |
ee811287 KO |
333 | const struct bkey *search) |
334 | { | |
335 | return search ? __bch_bset_search(b, t, search) : t->data->start; | |
336 | } | |
337 | ||
c052dd9a KO |
338 | #define for_each_key_filter(b, k, iter, filter) \ |
339 | for (bch_btree_iter_init((b), (iter), NULL); \ | |
340 | ((k) = bch_btree_iter_next_filter((iter), (b), filter));) | |
341 | ||
342 | #define for_each_key(b, k, iter) \ | |
343 | for (bch_btree_iter_init((b), (iter), NULL); \ | |
344 | ((k) = bch_btree_iter_next(iter));) | |
345 | ||
67539e85 KO |
346 | /* Sorting */ |
347 | ||
348 | struct bset_sort_state { | |
349 | mempool_t *pool; | |
350 | ||
351 | unsigned page_order; | |
352 | unsigned crit_factor; | |
353 | ||
354 | struct time_stats time; | |
355 | }; | |
356 | ||
357 | void bch_bset_sort_state_free(struct bset_sort_state *); | |
358 | int bch_bset_sort_state_init(struct bset_sort_state *, unsigned); | |
89ebb4a2 KO |
359 | void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *); |
360 | void bch_btree_sort_into(struct btree_keys *, struct btree_keys *, | |
67539e85 | 361 | struct bset_sort_state *); |
a85e968e | 362 | void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *, |
67539e85 | 363 | struct bset_sort_state *); |
89ebb4a2 | 364 | void bch_btree_sort_partial(struct btree_keys *, unsigned, |
67539e85 KO |
365 | struct bset_sort_state *); |
366 | ||
89ebb4a2 | 367 | static inline void bch_btree_sort(struct btree_keys *b, |
67539e85 KO |
368 | struct bset_sort_state *state) |
369 | { | |
370 | bch_btree_sort_partial(b, 0, state); | |
371 | } | |
372 | ||
f67342dd KO |
373 | struct bset_stats { |
374 | size_t sets_written, sets_unwritten; | |
375 | size_t bytes_written, bytes_unwritten; | |
376 | size_t floats, failed; | |
377 | }; | |
378 | ||
379 | void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *); | |
380 | ||
ee811287 KO |
381 | /* Bkey utility code */ |
382 | ||
383 | #define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys) | |
384 | ||
385 | static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx) | |
386 | { | |
387 | return bkey_idx(i->start, idx); | |
388 | } | |
389 | ||
390 | static inline void bkey_init(struct bkey *k) | |
391 | { | |
392 | *k = ZERO_KEY; | |
393 | } | |
394 | ||
395 | static __always_inline int64_t bkey_cmp(const struct bkey *l, | |
396 | const struct bkey *r) | |
397 | { | |
398 | return unlikely(KEY_INODE(l) != KEY_INODE(r)) | |
399 | ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r) | |
400 | : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); | |
401 | } | |
402 | ||
403 | void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, | |
404 | unsigned); | |
405 | bool __bch_cut_front(const struct bkey *, struct bkey *); | |
406 | bool __bch_cut_back(const struct bkey *, struct bkey *); | |
407 | ||
408 | static inline bool bch_cut_front(const struct bkey *where, struct bkey *k) | |
409 | { | |
410 | BUG_ON(bkey_cmp(where, k) > 0); | |
411 | return __bch_cut_front(where, k); | |
412 | } | |
413 | ||
414 | static inline bool bch_cut_back(const struct bkey *where, struct bkey *k) | |
415 | { | |
416 | BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0); | |
417 | return __bch_cut_back(where, k); | |
418 | } | |
419 | ||
420 | #define PRECEDING_KEY(_k) \ | |
421 | ({ \ | |
422 | struct bkey *_ret = NULL; \ | |
423 | \ | |
424 | if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \ | |
425 | _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \ | |
426 | \ | |
427 | if (!_ret->low) \ | |
428 | _ret->high--; \ | |
429 | _ret->low--; \ | |
430 | } \ | |
431 | \ | |
432 | _ret; \ | |
433 | }) | |
434 | ||
a85e968e KO |
435 | static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k) |
436 | { | |
437 | return b->ops->key_invalid(b, k); | |
438 | } | |
439 | ||
440 | static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k) | |
441 | { | |
442 | return b->ops->key_bad(b, k); | |
443 | } | |
444 | ||
dc9d98d6 KO |
445 | static inline void bch_bkey_to_text(struct btree_keys *b, char *buf, |
446 | size_t size, const struct bkey *k) | |
447 | { | |
448 | return b->ops->key_to_text(buf, size, k); | |
449 | } | |
450 | ||
3bdad1e4 NS |
451 | static inline bool bch_bkey_equal_header(const struct bkey *l, |
452 | const struct bkey *r) | |
453 | { | |
454 | return (KEY_DIRTY(l) == KEY_DIRTY(r) && | |
455 | KEY_PTRS(l) == KEY_PTRS(r) && | |
8e094808 | 456 | KEY_CSUM(l) == KEY_CSUM(r)); |
3bdad1e4 NS |
457 | } |
458 | ||
cafe5635 KO |
459 | /* Keylists */ |
460 | ||
461 | struct keylist { | |
cafe5635 | 462 | union { |
c2f95ae2 KO |
463 | struct bkey *keys; |
464 | uint64_t *keys_p; | |
465 | }; | |
466 | union { | |
467 | struct bkey *top; | |
468 | uint64_t *top_p; | |
cafe5635 KO |
469 | }; |
470 | ||
471 | /* Enough room for btree_split's keys without realloc */ | |
472 | #define KEYLIST_INLINE 16 | |
c2f95ae2 | 473 | uint64_t inline_keys[KEYLIST_INLINE]; |
cafe5635 KO |
474 | }; |
475 | ||
476 | static inline void bch_keylist_init(struct keylist *l) | |
477 | { | |
c2f95ae2 | 478 | l->top_p = l->keys_p = l->inline_keys; |
cafe5635 KO |
479 | } |
480 | ||
c13f3af9 KO |
481 | static inline void bch_keylist_init_single(struct keylist *l, struct bkey *k) |
482 | { | |
483 | l->keys = k; | |
484 | l->top = bkey_next(k); | |
485 | } | |
486 | ||
cafe5635 KO |
487 | static inline void bch_keylist_push(struct keylist *l) |
488 | { | |
489 | l->top = bkey_next(l->top); | |
490 | } | |
491 | ||
492 | static inline void bch_keylist_add(struct keylist *l, struct bkey *k) | |
493 | { | |
494 | bkey_copy(l->top, k); | |
495 | bch_keylist_push(l); | |
496 | } | |
497 | ||
498 | static inline bool bch_keylist_empty(struct keylist *l) | |
499 | { | |
c2f95ae2 KO |
500 | return l->top == l->keys; |
501 | } | |
502 | ||
503 | static inline void bch_keylist_reset(struct keylist *l) | |
504 | { | |
505 | l->top = l->keys; | |
cafe5635 KO |
506 | } |
507 | ||
508 | static inline void bch_keylist_free(struct keylist *l) | |
509 | { | |
c2f95ae2 KO |
510 | if (l->keys_p != l->inline_keys) |
511 | kfree(l->keys_p); | |
512 | } | |
513 | ||
514 | static inline size_t bch_keylist_nkeys(struct keylist *l) | |
515 | { | |
516 | return l->top_p - l->keys_p; | |
517 | } | |
518 | ||
519 | static inline size_t bch_keylist_bytes(struct keylist *l) | |
520 | { | |
521 | return bch_keylist_nkeys(l) * sizeof(uint64_t); | |
cafe5635 KO |
522 | } |
523 | ||
cafe5635 | 524 | struct bkey *bch_keylist_pop(struct keylist *); |
26c949f8 | 525 | void bch_keylist_pop_front(struct keylist *); |
085d2a3d | 526 | int __bch_keylist_realloc(struct keylist *, unsigned); |
cafe5635 | 527 | |
dc9d98d6 KO |
528 | /* Debug stuff */ |
529 | ||
530 | #ifdef CONFIG_BCACHE_DEBUG | |
531 | ||
532 | int __bch_count_data(struct btree_keys *); | |
533 | void __bch_check_keys(struct btree_keys *, const char *, ...); | |
534 | void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); | |
535 | void bch_dump_bucket(struct btree_keys *); | |
536 | ||
537 | #else | |
538 | ||
539 | static inline int __bch_count_data(struct btree_keys *b) { return -1; } | |
540 | static inline void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} | |
541 | static inline void bch_dump_bucket(struct btree_keys *b) {} | |
542 | void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); | |
543 | ||
544 | #endif | |
545 | ||
546 | static inline bool btree_keys_expensive_checks(struct btree_keys *b) | |
547 | { | |
548 | #ifdef CONFIG_BCACHE_DEBUG | |
549 | return *b->expensive_debug_checks; | |
550 | #else | |
551 | return false; | |
552 | #endif | |
553 | } | |
554 | ||
555 | static inline int bch_count_data(struct btree_keys *b) | |
556 | { | |
557 | return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1; | |
558 | } | |
559 | ||
560 | #define bch_check_keys(b, ...) \ | |
561 | do { \ | |
562 | if (btree_keys_expensive_checks(b)) \ | |
563 | __bch_check_keys(b, __VA_ARGS__); \ | |
564 | } while (0) | |
cafe5635 | 565 | |
cafe5635 | 566 | #endif |