Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
cafe5635 KO |
2 | /* |
3 | * Code for working with individual keys, and sorted sets of keys with in a | |
4 | * btree node | |
5 | * | |
6 | * Copyright 2012 Google, Inc. | |
7 | */ | |
8 | ||
46f5aa88 | 9 | #define pr_fmt(fmt) "bcache: %s() " fmt, __func__ |
89ebb4a2 KO |
10 | |
11 | #include "util.h" | |
12 | #include "bset.h" | |
cafe5635 | 13 | |
dc9d98d6 | 14 | #include <linux/console.h> |
e6017571 | 15 | #include <linux/sched/clock.h> |
cafe5635 | 16 | #include <linux/random.h> |
cd953ed0 | 17 | #include <linux/prefetch.h> |
cafe5635 | 18 | |
dc9d98d6 KO |
19 | #ifdef CONFIG_BCACHE_DEBUG |
20 | ||
6f10f7d1 | 21 | void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set) |
dc9d98d6 KO |
22 | { |
23 | struct bkey *k, *next; | |
24 | ||
25 | for (k = i->start; k < bset_bkey_last(i); k = next) { | |
26 | next = bkey_next(k); | |
27 | ||
6ae63e35 | 28 | pr_err("block %u key %u/%u: ", set, |
6f10f7d1 | 29 | (unsigned int) ((u64 *) k - i->d), i->keys); |
dc9d98d6 KO |
30 | |
31 | if (b->ops->key_dump) | |
32 | b->ops->key_dump(b, k); | |
33 | else | |
46f5aa88 | 34 | pr_cont("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k)); |
dc9d98d6 KO |
35 | |
36 | if (next < bset_bkey_last(i) && | |
37 | bkey_cmp(k, b->ops->is_extents ? | |
38 | &START_KEY(next) : next) > 0) | |
6ae63e35 | 39 | pr_err("Key skipped backwards\n"); |
dc9d98d6 KO |
40 | } |
41 | } | |
42 | ||
43 | void bch_dump_bucket(struct btree_keys *b) | |
44 | { | |
6f10f7d1 | 45 | unsigned int i; |
dc9d98d6 KO |
46 | |
47 | console_lock(); | |
48 | for (i = 0; i <= b->nsets; i++) | |
49 | bch_dump_bset(b, b->set[i].data, | |
50 | bset_sector_offset(b, b->set[i].data)); | |
51 | console_unlock(); | |
52 | } | |
53 | ||
54 | int __bch_count_data(struct btree_keys *b) | |
55 | { | |
6f10f7d1 | 56 | unsigned int ret = 0; |
dc9d98d6 KO |
57 | struct btree_iter iter; |
58 | struct bkey *k; | |
59 | ||
60 | if (b->ops->is_extents) | |
61 | for_each_key(b, k, &iter) | |
62 | ret += KEY_SIZE(k); | |
63 | return ret; | |
64 | } | |
65 | ||
66 | void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) | |
67 | { | |
68 | va_list args; | |
69 | struct bkey *k, *p = NULL; | |
70 | struct btree_iter iter; | |
71 | const char *err; | |
72 | ||
73 | for_each_key(b, k, &iter) { | |
74 | if (b->ops->is_extents) { | |
75 | err = "Keys out of order"; | |
76 | if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) | |
77 | goto bug; | |
78 | ||
79 | if (bch_ptr_invalid(b, k)) | |
80 | continue; | |
81 | ||
82 | err = "Overlapping keys"; | |
83 | if (p && bkey_cmp(p, &START_KEY(k)) > 0) | |
84 | goto bug; | |
85 | } else { | |
86 | if (bch_ptr_bad(b, k)) | |
87 | continue; | |
88 | ||
89 | err = "Duplicate keys"; | |
90 | if (p && !bkey_cmp(p, k)) | |
91 | goto bug; | |
92 | } | |
93 | p = k; | |
94 | } | |
95 | #if 0 | |
96 | err = "Key larger than btree node key"; | |
97 | if (p && bkey_cmp(p, &b->key) > 0) | |
98 | goto bug; | |
99 | #endif | |
100 | return; | |
101 | bug: | |
102 | bch_dump_bucket(b); | |
103 | ||
104 | va_start(args, fmt); | |
105 | vprintk(fmt, args); | |
106 | va_end(args); | |
107 | ||
108 | panic("bch_check_keys error: %s:\n", err); | |
109 | } | |
110 | ||
111 | static void bch_btree_iter_next_check(struct btree_iter *iter) | |
112 | { | |
113 | struct bkey *k = iter->data->k, *next = bkey_next(k); | |
114 | ||
115 | if (next < iter->data->end && | |
116 | bkey_cmp(k, iter->b->ops->is_extents ? | |
117 | &START_KEY(next) : next) > 0) { | |
118 | bch_dump_bucket(iter->b); | |
119 | panic("Key skipped backwards\n"); | |
120 | } | |
121 | } | |
122 | ||
123 | #else | |
124 | ||
125 | static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} | |
126 | ||
127 | #endif | |
128 | ||
cafe5635 KO |
129 | /* Keylists */ |
130 | ||
6f10f7d1 | 131 | int __bch_keylist_realloc(struct keylist *l, unsigned int u64s) |
cafe5635 | 132 | { |
c2f95ae2 | 133 | size_t oldsize = bch_keylist_nkeys(l); |
085d2a3d | 134 | size_t newsize = oldsize + u64s; |
c2f95ae2 KO |
135 | uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p; |
136 | uint64_t *new_keys; | |
cafe5635 | 137 | |
cafe5635 KO |
138 | newsize = roundup_pow_of_two(newsize); |
139 | ||
140 | if (newsize <= KEYLIST_INLINE || | |
141 | roundup_pow_of_two(oldsize) == newsize) | |
142 | return 0; | |
143 | ||
c2f95ae2 | 144 | new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO); |
cafe5635 | 145 | |
c2f95ae2 | 146 | if (!new_keys) |
cafe5635 KO |
147 | return -ENOMEM; |
148 | ||
c2f95ae2 KO |
149 | if (!old_keys) |
150 | memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize); | |
cafe5635 | 151 | |
c2f95ae2 KO |
152 | l->keys_p = new_keys; |
153 | l->top_p = new_keys + oldsize; | |
cafe5635 KO |
154 | |
155 | return 0; | |
156 | } | |
157 | ||
06c1526d | 158 | /* Pop the top key of keylist by pointing l->top to its previous key */ |
cafe5635 KO |
159 | struct bkey *bch_keylist_pop(struct keylist *l) |
160 | { | |
c2f95ae2 | 161 | struct bkey *k = l->keys; |
cafe5635 KO |
162 | |
163 | if (k == l->top) | |
164 | return NULL; | |
165 | ||
166 | while (bkey_next(k) != l->top) | |
167 | k = bkey_next(k); | |
168 | ||
169 | return l->top = k; | |
170 | } | |
171 | ||
06c1526d | 172 | /* Pop the bottom key of keylist and update l->top_p */ |
26c949f8 KO |
173 | void bch_keylist_pop_front(struct keylist *l) |
174 | { | |
c2f95ae2 | 175 | l->top_p -= bkey_u64s(l->keys); |
26c949f8 | 176 | |
c2f95ae2 KO |
177 | memmove(l->keys, |
178 | bkey_next(l->keys), | |
179 | bch_keylist_bytes(l)); | |
26c949f8 KO |
180 | } |
181 | ||
cafe5635 KO |
182 | /* Key/pointer manipulation */ |
183 | ||
184 | void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, | |
6f10f7d1 | 185 | unsigned int i) |
cafe5635 KO |
186 | { |
187 | BUG_ON(i > KEY_PTRS(src)); | |
188 | ||
189 | /* Only copy the header, key, and one pointer. */ | |
190 | memcpy(dest, src, 2 * sizeof(uint64_t)); | |
191 | dest->ptr[0] = src->ptr[i]; | |
192 | SET_KEY_PTRS(dest, 1); | |
193 | /* We didn't copy the checksum so clear that bit. */ | |
194 | SET_KEY_CSUM(dest, 0); | |
195 | } | |
196 | ||
197 | bool __bch_cut_front(const struct bkey *where, struct bkey *k) | |
198 | { | |
6f10f7d1 | 199 | unsigned int i, len = 0; |
cafe5635 KO |
200 | |
201 | if (bkey_cmp(where, &START_KEY(k)) <= 0) | |
202 | return false; | |
203 | ||
204 | if (bkey_cmp(where, k) < 0) | |
205 | len = KEY_OFFSET(k) - KEY_OFFSET(where); | |
206 | else | |
207 | bkey_copy_key(k, where); | |
208 | ||
209 | for (i = 0; i < KEY_PTRS(k); i++) | |
210 | SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len); | |
211 | ||
212 | BUG_ON(len > KEY_SIZE(k)); | |
213 | SET_KEY_SIZE(k, len); | |
214 | return true; | |
215 | } | |
216 | ||
217 | bool __bch_cut_back(const struct bkey *where, struct bkey *k) | |
218 | { | |
6f10f7d1 | 219 | unsigned int len = 0; |
cafe5635 KO |
220 | |
221 | if (bkey_cmp(where, k) >= 0) | |
222 | return false; | |
223 | ||
224 | BUG_ON(KEY_INODE(where) != KEY_INODE(k)); | |
225 | ||
226 | if (bkey_cmp(where, &START_KEY(k)) > 0) | |
227 | len = KEY_OFFSET(where) - KEY_START(k); | |
228 | ||
229 | bkey_copy_key(k, where); | |
230 | ||
231 | BUG_ON(len > KEY_SIZE(k)); | |
232 | SET_KEY_SIZE(k, len); | |
233 | return true; | |
234 | } | |
235 | ||
ee811287 KO |
236 | /* Auxiliary search trees */ |
237 | ||
238 | /* 32 bits total: */ | |
239 | #define BKEY_MID_BITS 3 | |
240 | #define BKEY_EXPONENT_BITS 7 | |
241 | #define BKEY_MANTISSA_BITS (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS) | |
242 | #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1) | |
243 | ||
244 | struct bkey_float { | |
6f10f7d1 CL |
245 | unsigned int exponent:BKEY_EXPONENT_BITS; |
246 | unsigned int m:BKEY_MID_BITS; | |
247 | unsigned int mantissa:BKEY_MANTISSA_BITS; | |
ee811287 KO |
248 | } __packed; |
249 | ||
250 | /* | |
251 | * BSET_CACHELINE was originally intended to match the hardware cacheline size - | |
252 | * it used to be 64, but I realized the lookup code would touch slightly less | |
253 | * memory if it was 128. | |
254 | * | |
255 | * It definites the number of bytes (in struct bset) per struct bkey_float in | |
256 | * the auxiliar search tree - when we're done searching the bset_float tree we | |
257 | * have this many bytes left that we do a linear search over. | |
258 | * | |
259 | * Since (after level 5) every level of the bset_tree is on a new cacheline, | |
260 | * we're touching one fewer cacheline in the bset tree in exchange for one more | |
261 | * cacheline in the linear search - but the linear search might stop before it | |
262 | * gets to the second cacheline. | |
263 | */ | |
264 | ||
265 | #define BSET_CACHELINE 128 | |
266 | ||
267 | /* Space required for the btree node keys */ | |
a85e968e | 268 | static inline size_t btree_keys_bytes(struct btree_keys *b) |
ee811287 KO |
269 | { |
270 | return PAGE_SIZE << b->page_order; | |
271 | } | |
272 | ||
a85e968e | 273 | static inline size_t btree_keys_cachelines(struct btree_keys *b) |
ee811287 KO |
274 | { |
275 | return btree_keys_bytes(b) / BSET_CACHELINE; | |
276 | } | |
277 | ||
278 | /* Space required for the auxiliary search trees */ | |
a85e968e | 279 | static inline size_t bset_tree_bytes(struct btree_keys *b) |
ee811287 KO |
280 | { |
281 | return btree_keys_cachelines(b) * sizeof(struct bkey_float); | |
282 | } | |
283 | ||
284 | /* Space required for the prev pointers */ | |
a85e968e | 285 | static inline size_t bset_prev_bytes(struct btree_keys *b) |
ee811287 KO |
286 | { |
287 | return btree_keys_cachelines(b) * sizeof(uint8_t); | |
288 | } | |
289 | ||
290 | /* Memory allocation */ | |
291 | ||
a85e968e | 292 | void bch_btree_keys_free(struct btree_keys *b) |
ee811287 | 293 | { |
a85e968e | 294 | struct bset_tree *t = b->set; |
ee811287 KO |
295 | |
296 | if (bset_prev_bytes(b) < PAGE_SIZE) | |
297 | kfree(t->prev); | |
298 | else | |
299 | free_pages((unsigned long) t->prev, | |
300 | get_order(bset_prev_bytes(b))); | |
301 | ||
302 | if (bset_tree_bytes(b) < PAGE_SIZE) | |
303 | kfree(t->tree); | |
304 | else | |
305 | free_pages((unsigned long) t->tree, | |
306 | get_order(bset_tree_bytes(b))); | |
307 | ||
308 | free_pages((unsigned long) t->data, b->page_order); | |
309 | ||
310 | t->prev = NULL; | |
311 | t->tree = NULL; | |
312 | t->data = NULL; | |
313 | } | |
314 | ||
b0d30981 CL |
315 | int bch_btree_keys_alloc(struct btree_keys *b, |
316 | unsigned int page_order, | |
317 | gfp_t gfp) | |
ee811287 | 318 | { |
a85e968e | 319 | struct bset_tree *t = b->set; |
ee811287 KO |
320 | |
321 | BUG_ON(t->data); | |
322 | ||
323 | b->page_order = page_order; | |
324 | ||
325 | t->data = (void *) __get_free_pages(gfp, b->page_order); | |
326 | if (!t->data) | |
327 | goto err; | |
328 | ||
329 | t->tree = bset_tree_bytes(b) < PAGE_SIZE | |
330 | ? kmalloc(bset_tree_bytes(b), gfp) | |
331 | : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b))); | |
332 | if (!t->tree) | |
333 | goto err; | |
334 | ||
335 | t->prev = bset_prev_bytes(b) < PAGE_SIZE | |
336 | ? kmalloc(bset_prev_bytes(b), gfp) | |
337 | : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b))); | |
338 | if (!t->prev) | |
339 | goto err; | |
340 | ||
341 | return 0; | |
342 | err: | |
343 | bch_btree_keys_free(b); | |
344 | return -ENOMEM; | |
345 | } | |
a85e968e KO |
346 | |
347 | void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops, | |
348 | bool *expensive_debug_checks) | |
349 | { | |
a85e968e KO |
350 | b->ops = ops; |
351 | b->expensive_debug_checks = expensive_debug_checks; | |
352 | b->nsets = 0; | |
353 | b->last_set_unwritten = 0; | |
354 | ||
a85e968e | 355 | /* |
bd9026c8 CL |
356 | * struct btree_keys in embedded in struct btree, and struct |
357 | * bset_tree is embedded into struct btree_keys. They are all | |
358 | * initialized as 0 by kzalloc() in mca_bucket_alloc(), and | |
359 | * b->set[0].data is allocated in bch_btree_keys_alloc(), so we | |
360 | * don't have to initiate b->set[].size and b->set[].data here | |
361 | * any more. | |
a85e968e | 362 | */ |
a85e968e | 363 | } |
ee811287 | 364 | |
cafe5635 KO |
365 | /* Binary tree stuff for auxiliary search trees */ |
366 | ||
b467a6ac CL |
367 | /* |
368 | * return array index next to j when does in-order traverse | |
369 | * of a binary tree which is stored in a linear array | |
370 | */ | |
6f10f7d1 | 371 | static unsigned int inorder_next(unsigned int j, unsigned int size) |
cafe5635 KO |
372 | { |
373 | if (j * 2 + 1 < size) { | |
374 | j = j * 2 + 1; | |
375 | ||
376 | while (j * 2 < size) | |
377 | j *= 2; | |
378 | } else | |
379 | j >>= ffz(j) + 1; | |
380 | ||
381 | return j; | |
382 | } | |
383 | ||
b467a6ac CL |
384 | /* |
385 | * return array index previous to j when does in-order traverse | |
386 | * of a binary tree which is stored in a linear array | |
387 | */ | |
6f10f7d1 | 388 | static unsigned int inorder_prev(unsigned int j, unsigned int size) |
cafe5635 KO |
389 | { |
390 | if (j * 2 < size) { | |
391 | j = j * 2; | |
392 | ||
393 | while (j * 2 + 1 < size) | |
394 | j = j * 2 + 1; | |
395 | } else | |
396 | j >>= ffs(j); | |
397 | ||
398 | return j; | |
399 | } | |
400 | ||
3be11dba CL |
401 | /* |
402 | * I have no idea why this code works... and I'm the one who wrote it | |
cafe5635 KO |
403 | * |
404 | * However, I do know what it does: | |
405 | * Given a binary tree constructed in an array (i.e. how you normally implement | |
406 | * a heap), it converts a node in the tree - referenced by array index - to the | |
407 | * index it would have if you did an inorder traversal. | |
408 | * | |
409 | * Also tested for every j, size up to size somewhere around 6 million. | |
410 | * | |
411 | * The binary tree starts at array index 1, not 0 | |
412 | * extra is a function of size: | |
413 | * extra = (size - rounddown_pow_of_two(size - 1)) << 1; | |
414 | */ | |
6f10f7d1 CL |
415 | static unsigned int __to_inorder(unsigned int j, |
416 | unsigned int size, | |
417 | unsigned int extra) | |
cafe5635 | 418 | { |
6f10f7d1 CL |
419 | unsigned int b = fls(j); |
420 | unsigned int shift = fls(size - 1) - b; | |
cafe5635 KO |
421 | |
422 | j ^= 1U << (b - 1); | |
423 | j <<= 1; | |
424 | j |= 1; | |
425 | j <<= shift; | |
426 | ||
427 | if (j > extra) | |
428 | j -= (j - extra) >> 1; | |
429 | ||
430 | return j; | |
431 | } | |
432 | ||
b467a6ac CL |
433 | /* |
434 | * Return the cacheline index in bset_tree->data, where j is index | |
435 | * from a linear array which stores the auxiliar binary tree | |
436 | */ | |
6f10f7d1 | 437 | static unsigned int to_inorder(unsigned int j, struct bset_tree *t) |
cafe5635 KO |
438 | { |
439 | return __to_inorder(j, t->size, t->extra); | |
440 | } | |
441 | ||
6f10f7d1 CL |
442 | static unsigned int __inorder_to_tree(unsigned int j, |
443 | unsigned int size, | |
444 | unsigned int extra) | |
cafe5635 | 445 | { |
6f10f7d1 | 446 | unsigned int shift; |
cafe5635 KO |
447 | |
448 | if (j > extra) | |
449 | j += j - extra; | |
450 | ||
451 | shift = ffs(j); | |
452 | ||
453 | j >>= shift; | |
454 | j |= roundup_pow_of_two(size) >> shift; | |
455 | ||
456 | return j; | |
457 | } | |
458 | ||
b467a6ac CL |
459 | /* |
460 | * Return an index from a linear array which stores the auxiliar binary | |
461 | * tree, j is the cacheline index of t->data. | |
462 | */ | |
6f10f7d1 | 463 | static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t) |
cafe5635 KO |
464 | { |
465 | return __inorder_to_tree(j, t->size, t->extra); | |
466 | } | |
467 | ||
468 | #if 0 | |
469 | void inorder_test(void) | |
470 | { | |
471 | unsigned long done = 0; | |
472 | ktime_t start = ktime_get(); | |
473 | ||
6f10f7d1 | 474 | for (unsigned int size = 2; |
cafe5635 KO |
475 | size < 65536000; |
476 | size++) { | |
b0d30981 CL |
477 | unsigned int extra = |
478 | (size - rounddown_pow_of_two(size - 1)) << 1; | |
6f10f7d1 | 479 | unsigned int i = 1, j = rounddown_pow_of_two(size - 1); |
cafe5635 KO |
480 | |
481 | if (!(size % 4096)) | |
6ae63e35 | 482 | pr_notice("loop %u, %llu per us\n", size, |
cafe5635 KO |
483 | done / ktime_us_delta(ktime_get(), start)); |
484 | ||
485 | while (1) { | |
486 | if (__inorder_to_tree(i, size, extra) != j) | |
487 | panic("size %10u j %10u i %10u", size, j, i); | |
488 | ||
489 | if (__to_inorder(j, size, extra) != i) | |
490 | panic("size %10u j %10u i %10u", size, j, i); | |
491 | ||
492 | if (j == rounddown_pow_of_two(size) - 1) | |
493 | break; | |
494 | ||
495 | BUG_ON(inorder_prev(inorder_next(j, size), size) != j); | |
496 | ||
497 | j = inorder_next(j, size); | |
498 | i++; | |
499 | } | |
500 | ||
501 | done += size - 1; | |
502 | } | |
503 | } | |
504 | #endif | |
505 | ||
506 | /* | |
48a73025 | 507 | * Cacheline/offset <-> bkey pointer arithmetic: |
cafe5635 KO |
508 | * |
509 | * t->tree is a binary search tree in an array; each node corresponds to a key | |
510 | * in one cacheline in t->set (BSET_CACHELINE bytes). | |
511 | * | |
512 | * This means we don't have to store the full index of the key that a node in | |
513 | * the binary tree points to; to_inorder() gives us the cacheline, and then | |
514 | * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes. | |
515 | * | |
48a73025 | 516 | * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to |
cafe5635 KO |
517 | * make this work. |
518 | * | |
519 | * To construct the bfloat for an arbitrary key we need to know what the key | |
520 | * immediately preceding it is: we have to check if the two keys differ in the | |
521 | * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size | |
522 | * of the previous key so we can walk backwards to it from t->tree[j]'s key. | |
523 | */ | |
524 | ||
6f10f7d1 CL |
525 | static struct bkey *cacheline_to_bkey(struct bset_tree *t, |
526 | unsigned int cacheline, | |
527 | unsigned int offset) | |
cafe5635 KO |
528 | { |
529 | return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8; | |
530 | } | |
531 | ||
6f10f7d1 | 532 | static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k) |
cafe5635 KO |
533 | { |
534 | return ((void *) k - (void *) t->data) / BSET_CACHELINE; | |
535 | } | |
536 | ||
6f10f7d1 CL |
537 | static unsigned int bkey_to_cacheline_offset(struct bset_tree *t, |
538 | unsigned int cacheline, | |
9dd6358a | 539 | struct bkey *k) |
cafe5635 | 540 | { |
9dd6358a | 541 | return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0); |
cafe5635 KO |
542 | } |
543 | ||
6f10f7d1 | 544 | static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j) |
cafe5635 KO |
545 | { |
546 | return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m); | |
547 | } | |
548 | ||
6f10f7d1 | 549 | static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j) |
cafe5635 KO |
550 | { |
551 | return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]); | |
552 | } | |
553 | ||
554 | /* | |
555 | * For the write set - the one we're currently inserting keys into - we don't | |
556 | * maintain a full search tree, we just keep a simple lookup table in t->prev. | |
557 | */ | |
6f10f7d1 | 558 | static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline) |
cafe5635 KO |
559 | { |
560 | return cacheline_to_bkey(t, cacheline, t->prev[cacheline]); | |
561 | } | |
562 | ||
563 | static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift) | |
564 | { | |
cafe5635 KO |
565 | low >>= shift; |
566 | low |= (high << 1) << (63U - shift); | |
cafe5635 KO |
567 | return low; |
568 | } | |
569 | ||
b467a6ac CL |
570 | /* |
571 | * Calculate mantissa value for struct bkey_float. | |
572 | * If most significant bit of f->exponent is not set, then | |
573 | * - f->exponent >> 6 is 0 | |
574 | * - p[0] points to bkey->low | |
575 | * - p[-1] borrows bits from KEY_INODE() of bkey->high | |
576 | * if most isgnificant bits of f->exponent is set, then | |
577 | * - f->exponent >> 6 is 1 | |
578 | * - p[0] points to bits from KEY_INODE() of bkey->high | |
579 | * - p[-1] points to other bits from KEY_INODE() of | |
580 | * bkey->high too. | |
581 | * See make_bfloat() to check when most significant bit of f->exponent | |
582 | * is set or not. | |
583 | */ | |
6f10f7d1 | 584 | static inline unsigned int bfloat_mantissa(const struct bkey *k, |
cafe5635 KO |
585 | struct bkey_float *f) |
586 | { | |
587 | const uint64_t *p = &k->low - (f->exponent >> 6); | |
1fae7cf0 | 588 | |
cafe5635 KO |
589 | return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK; |
590 | } | |
591 | ||
6f10f7d1 | 592 | static void make_bfloat(struct bset_tree *t, unsigned int j) |
cafe5635 KO |
593 | { |
594 | struct bkey_float *f = &t->tree[j]; | |
595 | struct bkey *m = tree_to_bkey(t, j); | |
596 | struct bkey *p = tree_to_prev_bkey(t, j); | |
597 | ||
598 | struct bkey *l = is_power_of_2(j) | |
599 | ? t->data->start | |
600 | : tree_to_prev_bkey(t, j >> ffs(j)); | |
601 | ||
602 | struct bkey *r = is_power_of_2(j + 1) | |
fafff81c | 603 | ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end)) |
cafe5635 KO |
604 | : tree_to_bkey(t, j >> (ffz(j) + 1)); |
605 | ||
606 | BUG_ON(m < l || m > r); | |
607 | BUG_ON(bkey_next(p) != m); | |
608 | ||
b467a6ac CL |
609 | /* |
610 | * If l and r have different KEY_INODE values (different backing | |
611 | * device), f->exponent records how many least significant bits | |
612 | * are different in KEY_INODE values and sets most significant | |
613 | * bits to 1 (by +64). | |
614 | * If l and r have same KEY_INODE value, f->exponent records | |
615 | * how many different bits in least significant bits of bkey->low. | |
616 | * See bfloat_mantiss() how the most significant bit of | |
617 | * f->exponent is used to calculate bfloat mantissa value. | |
618 | */ | |
cafe5635 KO |
619 | if (KEY_INODE(l) != KEY_INODE(r)) |
620 | f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64; | |
621 | else | |
622 | f->exponent = fls64(r->low ^ l->low); | |
623 | ||
624 | f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0); | |
625 | ||
626 | /* | |
627 | * Setting f->exponent = 127 flags this node as failed, and causes the | |
628 | * lookup code to fall back to comparing against the original key. | |
629 | */ | |
630 | ||
631 | if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f)) | |
632 | f->mantissa = bfloat_mantissa(m, f) - 1; | |
633 | else | |
634 | f->exponent = 127; | |
635 | } | |
636 | ||
a85e968e | 637 | static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t) |
cafe5635 | 638 | { |
a85e968e | 639 | if (t != b->set) { |
6f10f7d1 | 640 | unsigned int j = roundup(t[-1].size, |
cafe5635 KO |
641 | 64 / sizeof(struct bkey_float)); |
642 | ||
643 | t->tree = t[-1].tree + j; | |
644 | t->prev = t[-1].prev + j; | |
645 | } | |
646 | ||
a85e968e | 647 | while (t < b->set + MAX_BSETS) |
cafe5635 KO |
648 | t++->size = 0; |
649 | } | |
650 | ||
a85e968e | 651 | static void bch_bset_build_unwritten_tree(struct btree_keys *b) |
cafe5635 | 652 | { |
ee811287 | 653 | struct bset_tree *t = bset_tree_last(b); |
cafe5635 | 654 | |
a85e968e KO |
655 | BUG_ON(b->last_set_unwritten); |
656 | b->last_set_unwritten = 1; | |
657 | ||
cafe5635 KO |
658 | bset_alloc_tree(b, t); |
659 | ||
a85e968e | 660 | if (t->tree != b->set->tree + btree_keys_cachelines(b)) { |
9dd6358a | 661 | t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start); |
cafe5635 KO |
662 | t->size = 1; |
663 | } | |
664 | } | |
665 | ||
a85e968e | 666 | void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic) |
ee811287 | 667 | { |
a85e968e KO |
668 | if (i != b->set->data) { |
669 | b->set[++b->nsets].data = i; | |
670 | i->seq = b->set->data->seq; | |
ee811287 KO |
671 | } else |
672 | get_random_bytes(&i->seq, sizeof(uint64_t)); | |
673 | ||
674 | i->magic = magic; | |
675 | i->version = 0; | |
676 | i->keys = 0; | |
677 | ||
678 | bch_bset_build_unwritten_tree(b); | |
679 | } | |
680 | ||
b467a6ac CL |
681 | /* |
682 | * Build auxiliary binary tree 'struct bset_tree *t', this tree is used to | |
683 | * accelerate bkey search in a btree node (pointed by bset_tree->data in | |
684 | * memory). After search in the auxiliar tree by calling bset_search_tree(), | |
685 | * a struct bset_search_iter is returned which indicates range [l, r] from | |
686 | * bset_tree->data where the searching bkey might be inside. Then a followed | |
687 | * linear comparison does the exact search, see __bch_bset_search() for how | |
688 | * the auxiliary tree is used. | |
689 | */ | |
a85e968e | 690 | void bch_bset_build_written_tree(struct btree_keys *b) |
cafe5635 | 691 | { |
ee811287 | 692 | struct bset_tree *t = bset_tree_last(b); |
9dd6358a | 693 | struct bkey *prev = NULL, *k = t->data->start; |
6f10f7d1 | 694 | unsigned int j, cacheline = 1; |
cafe5635 | 695 | |
a85e968e KO |
696 | b->last_set_unwritten = 0; |
697 | ||
cafe5635 KO |
698 | bset_alloc_tree(b, t); |
699 | ||
6f10f7d1 | 700 | t->size = min_t(unsigned int, |
fafff81c | 701 | bkey_to_cacheline(t, bset_bkey_last(t->data)), |
a85e968e | 702 | b->set->tree + btree_keys_cachelines(b) - t->tree); |
cafe5635 KO |
703 | |
704 | if (t->size < 2) { | |
705 | t->size = 0; | |
706 | return; | |
707 | } | |
708 | ||
709 | t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1; | |
710 | ||
711 | /* First we figure out where the first key in each cacheline is */ | |
712 | for (j = inorder_next(0, t->size); | |
713 | j; | |
714 | j = inorder_next(j, t->size)) { | |
9dd6358a KO |
715 | while (bkey_to_cacheline(t, k) < cacheline) |
716 | prev = k, k = bkey_next(k); | |
cafe5635 | 717 | |
9dd6358a KO |
718 | t->prev[j] = bkey_u64s(prev); |
719 | t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k); | |
cafe5635 KO |
720 | } |
721 | ||
fafff81c | 722 | while (bkey_next(k) != bset_bkey_last(t->data)) |
cafe5635 KO |
723 | k = bkey_next(k); |
724 | ||
725 | t->end = *k; | |
726 | ||
727 | /* Then we build the tree */ | |
728 | for (j = inorder_next(0, t->size); | |
729 | j; | |
730 | j = inorder_next(j, t->size)) | |
731 | make_bfloat(t, j); | |
732 | } | |
733 | ||
829a60b9 KO |
734 | /* Insert */ |
735 | ||
a85e968e | 736 | void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k) |
cafe5635 KO |
737 | { |
738 | struct bset_tree *t; | |
6f10f7d1 | 739 | unsigned int inorder, j = 1; |
cafe5635 | 740 | |
a85e968e | 741 | for (t = b->set; t <= bset_tree_last(b); t++) |
fafff81c | 742 | if (k < bset_bkey_last(t->data)) |
cafe5635 KO |
743 | goto found_set; |
744 | ||
745 | BUG(); | |
746 | found_set: | |
747 | if (!t->size || !bset_written(b, t)) | |
748 | return; | |
749 | ||
750 | inorder = bkey_to_cacheline(t, k); | |
751 | ||
752 | if (k == t->data->start) | |
753 | goto fix_left; | |
754 | ||
fafff81c | 755 | if (bkey_next(k) == bset_bkey_last(t->data)) { |
cafe5635 KO |
756 | t->end = *k; |
757 | goto fix_right; | |
758 | } | |
759 | ||
760 | j = inorder_to_tree(inorder, t); | |
761 | ||
762 | if (j && | |
763 | j < t->size && | |
764 | k == tree_to_bkey(t, j)) | |
765 | fix_left: do { | |
766 | make_bfloat(t, j); | |
767 | j = j * 2; | |
768 | } while (j < t->size); | |
769 | ||
770 | j = inorder_to_tree(inorder + 1, t); | |
771 | ||
772 | if (j && | |
773 | j < t->size && | |
774 | k == tree_to_prev_bkey(t, j)) | |
775 | fix_right: do { | |
776 | make_bfloat(t, j); | |
777 | j = j * 2 + 1; | |
778 | } while (j < t->size); | |
779 | } | |
780 | ||
a85e968e | 781 | static void bch_bset_fix_lookup_table(struct btree_keys *b, |
ee811287 KO |
782 | struct bset_tree *t, |
783 | struct bkey *k) | |
cafe5635 | 784 | { |
6f10f7d1 CL |
785 | unsigned int shift = bkey_u64s(k); |
786 | unsigned int j = bkey_to_cacheline(t, k); | |
cafe5635 KO |
787 | |
788 | /* We're getting called from btree_split() or btree_gc, just bail out */ | |
789 | if (!t->size) | |
790 | return; | |
791 | ||
3be11dba CL |
792 | /* |
793 | * k is the key we just inserted; we need to find the entry in the | |
cafe5635 KO |
794 | * lookup table for the first key that is strictly greater than k: |
795 | * it's either k's cacheline or the next one | |
796 | */ | |
9dd6358a KO |
797 | while (j < t->size && |
798 | table_to_bkey(t, j) <= k) | |
cafe5635 KO |
799 | j++; |
800 | ||
3be11dba CL |
801 | /* |
802 | * Adjust all the lookup table entries, and find a new key for any that | |
cafe5635 KO |
803 | * have gotten too big |
804 | */ | |
805 | for (; j < t->size; j++) { | |
806 | t->prev[j] += shift; | |
807 | ||
808 | if (t->prev[j] > 7) { | |
809 | k = table_to_bkey(t, j - 1); | |
810 | ||
811 | while (k < cacheline_to_bkey(t, j, 0)) | |
812 | k = bkey_next(k); | |
813 | ||
9dd6358a | 814 | t->prev[j] = bkey_to_cacheline_offset(t, j, k); |
cafe5635 KO |
815 | } |
816 | } | |
817 | ||
a85e968e | 818 | if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree) |
cafe5635 KO |
819 | return; |
820 | ||
821 | /* Possibly add a new entry to the end of the lookup table */ | |
822 | ||
823 | for (k = table_to_bkey(t, t->size - 1); | |
fafff81c | 824 | k != bset_bkey_last(t->data); |
cafe5635 KO |
825 | k = bkey_next(k)) |
826 | if (t->size == bkey_to_cacheline(t, k)) { | |
b0d30981 CL |
827 | t->prev[t->size] = |
828 | bkey_to_cacheline_offset(t, t->size, k); | |
cafe5635 KO |
829 | t->size++; |
830 | } | |
831 | } | |
832 | ||
0f49cf3d NS |
833 | /* |
834 | * Tries to merge l and r: l should be lower than r | |
835 | * Returns true if we were able to merge. If we did merge, l will be the merged | |
836 | * key, r will be untouched. | |
837 | */ | |
838 | bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r) | |
839 | { | |
840 | if (!b->ops->key_merge) | |
841 | return false; | |
842 | ||
843 | /* | |
844 | * Generic header checks | |
845 | * Assumes left and right are in order | |
846 | * Left and right must be exactly aligned | |
847 | */ | |
3bdad1e4 NS |
848 | if (!bch_bkey_equal_header(l, r) || |
849 | bkey_cmp(l, &START_KEY(r))) | |
0f49cf3d NS |
850 | return false; |
851 | ||
852 | return b->ops->key_merge(b, l, r); | |
853 | } | |
0f49cf3d | 854 | |
a85e968e | 855 | void bch_bset_insert(struct btree_keys *b, struct bkey *where, |
ee811287 | 856 | struct bkey *insert) |
cafe5635 | 857 | { |
ee811287 | 858 | struct bset_tree *t = bset_tree_last(b); |
cafe5635 | 859 | |
a85e968e | 860 | BUG_ON(!b->last_set_unwritten); |
ee811287 KO |
861 | BUG_ON(bset_byte_offset(b, t->data) + |
862 | __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) > | |
863 | PAGE_SIZE << b->page_order); | |
cafe5635 | 864 | |
ee811287 KO |
865 | memmove((uint64_t *) where + bkey_u64s(insert), |
866 | where, | |
867 | (void *) bset_bkey_last(t->data) - (void *) where); | |
cafe5635 | 868 | |
ee811287 KO |
869 | t->data->keys += bkey_u64s(insert); |
870 | bkey_copy(where, insert); | |
871 | bch_bset_fix_lookup_table(b, t, where); | |
cafe5635 KO |
872 | } |
873 | ||
6f10f7d1 | 874 | unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, |
829a60b9 KO |
875 | struct bkey *replace_key) |
876 | { | |
6f10f7d1 | 877 | unsigned int status = BTREE_INSERT_STATUS_NO_INSERT; |
829a60b9 KO |
878 | struct bset *i = bset_tree_last(b)->data; |
879 | struct bkey *m, *prev = NULL; | |
880 | struct btree_iter iter; | |
31b90956 CL |
881 | struct bkey preceding_key_on_stack = ZERO_KEY; |
882 | struct bkey *preceding_key_p = &preceding_key_on_stack; | |
829a60b9 KO |
883 | |
884 | BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); | |
885 | ||
31b90956 CL |
886 | /* |
887 | * If k has preceding key, preceding_key_p will be set to address | |
888 | * of k's preceding key; otherwise preceding_key_p will be set | |
889 | * to NULL inside preceding_key(). | |
890 | */ | |
891 | if (b->ops->is_extents) | |
892 | preceding_key(&START_KEY(k), &preceding_key_p); | |
893 | else | |
894 | preceding_key(k, &preceding_key_p); | |
895 | ||
896 | m = bch_btree_iter_init(b, &iter, preceding_key_p); | |
829a60b9 KO |
897 | |
898 | if (b->ops->insert_fixup(b, k, &iter, replace_key)) | |
899 | return status; | |
900 | ||
901 | status = BTREE_INSERT_STATUS_INSERT; | |
902 | ||
903 | while (m != bset_bkey_last(i) && | |
904 | bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) | |
905 | prev = m, m = bkey_next(m); | |
906 | ||
907 | /* prev is in the tree, if we merge we're done */ | |
908 | status = BTREE_INSERT_STATUS_BACK_MERGE; | |
909 | if (prev && | |
910 | bch_bkey_try_merge(b, prev, k)) | |
911 | goto merged; | |
912 | #if 0 | |
913 | status = BTREE_INSERT_STATUS_OVERWROTE; | |
914 | if (m != bset_bkey_last(i) && | |
915 | KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) | |
916 | goto copy; | |
917 | #endif | |
918 | status = BTREE_INSERT_STATUS_FRONT_MERGE; | |
919 | if (m != bset_bkey_last(i) && | |
920 | bch_bkey_try_merge(b, k, m)) | |
921 | goto copy; | |
922 | ||
923 | bch_bset_insert(b, m, k); | |
924 | copy: bkey_copy(m, k); | |
925 | merged: | |
926 | return status; | |
927 | } | |
829a60b9 KO |
928 | |
929 | /* Lookup */ | |
930 | ||
cafe5635 KO |
931 | struct bset_search_iter { |
932 | struct bkey *l, *r; | |
933 | }; | |
934 | ||
a85e968e | 935 | static struct bset_search_iter bset_search_write_set(struct bset_tree *t, |
cafe5635 KO |
936 | const struct bkey *search) |
937 | { | |
6f10f7d1 | 938 | unsigned int li = 0, ri = t->size; |
cafe5635 | 939 | |
cafe5635 | 940 | while (li + 1 != ri) { |
6f10f7d1 | 941 | unsigned int m = (li + ri) >> 1; |
cafe5635 KO |
942 | |
943 | if (bkey_cmp(table_to_bkey(t, m), search) > 0) | |
944 | ri = m; | |
945 | else | |
946 | li = m; | |
947 | } | |
948 | ||
949 | return (struct bset_search_iter) { | |
950 | table_to_bkey(t, li), | |
fafff81c | 951 | ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data) |
cafe5635 KO |
952 | }; |
953 | } | |
954 | ||
a85e968e | 955 | static struct bset_search_iter bset_search_tree(struct bset_tree *t, |
cafe5635 KO |
956 | const struct bkey *search) |
957 | { | |
958 | struct bkey *l, *r; | |
959 | struct bkey_float *f; | |
6f10f7d1 | 960 | unsigned int inorder, j, n = 1; |
cafe5635 KO |
961 | |
962 | do { | |
6f10f7d1 | 963 | unsigned int p = n << 4; |
1fae7cf0 | 964 | |
f960facb CL |
965 | if (p < t->size) |
966 | prefetch(&t->tree[p]); | |
cafe5635 KO |
967 | |
968 | j = n; | |
969 | f = &t->tree[j]; | |
970 | ||
944a4f34 CL |
971 | if (likely(f->exponent != 127)) { |
972 | if (f->mantissa >= bfloat_mantissa(search, f)) | |
973 | n = j * 2; | |
974 | else | |
975 | n = j * 2 + 1; | |
976 | } else { | |
977 | if (bkey_cmp(tree_to_bkey(t, j), search) > 0) | |
978 | n = j * 2; | |
979 | else | |
980 | n = j * 2 + 1; | |
981 | } | |
cafe5635 KO |
982 | } while (n < t->size); |
983 | ||
984 | inorder = to_inorder(j, t); | |
985 | ||
986 | /* | |
987 | * n would have been the node we recursed to - the low bit tells us if | |
988 | * we recursed left or recursed right. | |
989 | */ | |
990 | if (n & 1) { | |
991 | l = cacheline_to_bkey(t, inorder, f->m); | |
992 | ||
993 | if (++inorder != t->size) { | |
994 | f = &t->tree[inorder_next(j, t->size)]; | |
995 | r = cacheline_to_bkey(t, inorder, f->m); | |
996 | } else | |
fafff81c | 997 | r = bset_bkey_last(t->data); |
cafe5635 KO |
998 | } else { |
999 | r = cacheline_to_bkey(t, inorder, f->m); | |
1000 | ||
1001 | if (--inorder) { | |
1002 | f = &t->tree[inorder_prev(j, t->size)]; | |
1003 | l = cacheline_to_bkey(t, inorder, f->m); | |
1004 | } else | |
1005 | l = t->data->start; | |
1006 | } | |
1007 | ||
1008 | return (struct bset_search_iter) {l, r}; | |
1009 | } | |
1010 | ||
c052dd9a | 1011 | struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t, |
cafe5635 KO |
1012 | const struct bkey *search) |
1013 | { | |
1014 | struct bset_search_iter i; | |
1015 | ||
1016 | /* | |
1017 | * First, we search for a cacheline, then lastly we do a linear search | |
1018 | * within that cacheline. | |
1019 | * | |
1020 | * To search for the cacheline, there's three different possibilities: | |
1021 | * * The set is too small to have a search tree, so we just do a linear | |
1022 | * search over the whole set. | |
1023 | * * The set is the one we're currently inserting into; keeping a full | |
1024 | * auxiliary search tree up to date would be too expensive, so we | |
1025 | * use a much simpler lookup table to do a binary search - | |
1026 | * bset_search_write_set(). | |
1027 | * * Or we use the auxiliary search tree we constructed earlier - | |
1028 | * bset_search_tree() | |
1029 | */ | |
1030 | ||
1031 | if (unlikely(!t->size)) { | |
1032 | i.l = t->data->start; | |
fafff81c | 1033 | i.r = bset_bkey_last(t->data); |
c052dd9a | 1034 | } else if (bset_written(b, t)) { |
cafe5635 KO |
1035 | /* |
1036 | * Each node in the auxiliary search tree covers a certain range | |
1037 | * of bits, and keys above and below the set it covers might | |
1038 | * differ outside those bits - so we have to special case the | |
1039 | * start and end - handle that here: | |
1040 | */ | |
1041 | ||
1042 | if (unlikely(bkey_cmp(search, &t->end) >= 0)) | |
fafff81c | 1043 | return bset_bkey_last(t->data); |
cafe5635 KO |
1044 | |
1045 | if (unlikely(bkey_cmp(search, t->data->start) < 0)) | |
1046 | return t->data->start; | |
1047 | ||
a85e968e KO |
1048 | i = bset_search_tree(t, search); |
1049 | } else { | |
c052dd9a | 1050 | BUG_ON(!b->nsets && |
a85e968e KO |
1051 | t->size < bkey_to_cacheline(t, bset_bkey_last(t->data))); |
1052 | ||
1053 | i = bset_search_write_set(t, search); | |
1054 | } | |
cafe5635 | 1055 | |
c052dd9a KO |
1056 | if (btree_keys_expensive_checks(b)) { |
1057 | BUG_ON(bset_written(b, t) && | |
280481d0 KO |
1058 | i.l != t->data->start && |
1059 | bkey_cmp(tree_to_prev_bkey(t, | |
1060 | inorder_to_tree(bkey_to_cacheline(t, i.l), t)), | |
1061 | search) > 0); | |
cafe5635 | 1062 | |
fafff81c | 1063 | BUG_ON(i.r != bset_bkey_last(t->data) && |
280481d0 KO |
1064 | bkey_cmp(i.r, search) <= 0); |
1065 | } | |
cafe5635 KO |
1066 | |
1067 | while (likely(i.l != i.r) && | |
1068 | bkey_cmp(i.l, search) <= 0) | |
1069 | i.l = bkey_next(i.l); | |
1070 | ||
1071 | return i.l; | |
1072 | } | |
1073 | ||
1074 | /* Btree iterator */ | |
1075 | ||
911c9610 KO |
1076 | typedef bool (btree_iter_cmp_fn)(struct btree_iter_set, |
1077 | struct btree_iter_set); | |
1078 | ||
cafe5635 KO |
1079 | static inline bool btree_iter_cmp(struct btree_iter_set l, |
1080 | struct btree_iter_set r) | |
1081 | { | |
911c9610 | 1082 | return bkey_cmp(l.k, r.k) > 0; |
cafe5635 KO |
1083 | } |
1084 | ||
1085 | static inline bool btree_iter_end(struct btree_iter *iter) | |
1086 | { | |
1087 | return !iter->used; | |
1088 | } | |
1089 | ||
1090 | void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, | |
1091 | struct bkey *end) | |
1092 | { | |
1093 | if (k != end) | |
1094 | BUG_ON(!heap_add(iter, | |
1095 | ((struct btree_iter_set) { k, end }), | |
1096 | btree_iter_cmp)); | |
1097 | } | |
1098 | ||
c052dd9a | 1099 | static struct bkey *__bch_btree_iter_init(struct btree_keys *b, |
911c9610 KO |
1100 | struct btree_iter *iter, |
1101 | struct bkey *search, | |
1102 | struct bset_tree *start) | |
cafe5635 KO |
1103 | { |
1104 | struct bkey *ret = NULL; | |
1fae7cf0 | 1105 | |
cafe5635 KO |
1106 | iter->size = ARRAY_SIZE(iter->data); |
1107 | iter->used = 0; | |
1108 | ||
280481d0 KO |
1109 | #ifdef CONFIG_BCACHE_DEBUG |
1110 | iter->b = b; | |
1111 | #endif | |
1112 | ||
c052dd9a | 1113 | for (; start <= bset_tree_last(b); start++) { |
cafe5635 | 1114 | ret = bch_bset_search(b, start, search); |
fafff81c | 1115 | bch_btree_iter_push(iter, ret, bset_bkey_last(start->data)); |
cafe5635 KO |
1116 | } |
1117 | ||
1118 | return ret; | |
1119 | } | |
1120 | ||
c052dd9a | 1121 | struct bkey *bch_btree_iter_init(struct btree_keys *b, |
911c9610 KO |
1122 | struct btree_iter *iter, |
1123 | struct bkey *search) | |
1124 | { | |
c052dd9a | 1125 | return __bch_btree_iter_init(b, iter, search, b->set); |
911c9610 KO |
1126 | } |
1127 | ||
1128 | static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, | |
1129 | btree_iter_cmp_fn *cmp) | |
cafe5635 | 1130 | { |
42361469 | 1131 | struct btree_iter_set b __maybe_unused; |
cafe5635 KO |
1132 | struct bkey *ret = NULL; |
1133 | ||
1134 | if (!btree_iter_end(iter)) { | |
280481d0 KO |
1135 | bch_btree_iter_next_check(iter); |
1136 | ||
cafe5635 KO |
1137 | ret = iter->data->k; |
1138 | iter->data->k = bkey_next(iter->data->k); | |
1139 | ||
1140 | if (iter->data->k > iter->data->end) { | |
cc0f4eaa | 1141 | WARN_ONCE(1, "bset was corrupt!\n"); |
cafe5635 KO |
1142 | iter->data->k = iter->data->end; |
1143 | } | |
1144 | ||
1145 | if (iter->data->k == iter->data->end) | |
42361469 | 1146 | heap_pop(iter, b, cmp); |
cafe5635 | 1147 | else |
911c9610 | 1148 | heap_sift(iter, 0, cmp); |
cafe5635 KO |
1149 | } |
1150 | ||
1151 | return ret; | |
1152 | } | |
1153 | ||
911c9610 KO |
1154 | struct bkey *bch_btree_iter_next(struct btree_iter *iter) |
1155 | { | |
1156 | return __bch_btree_iter_next(iter, btree_iter_cmp); | |
1157 | ||
1158 | } | |
1159 | ||
cafe5635 | 1160 | struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, |
a85e968e | 1161 | struct btree_keys *b, ptr_filter_fn fn) |
cafe5635 KO |
1162 | { |
1163 | struct bkey *ret; | |
1164 | ||
1165 | do { | |
1166 | ret = bch_btree_iter_next(iter); | |
1167 | } while (ret && fn(b, ret)); | |
1168 | ||
1169 | return ret; | |
1170 | } | |
1171 | ||
cafe5635 KO |
1172 | /* Mergesort */ |
1173 | ||
67539e85 KO |
1174 | void bch_bset_sort_state_free(struct bset_sort_state *state) |
1175 | { | |
d19936a2 | 1176 | mempool_exit(&state->pool); |
67539e85 KO |
1177 | } |
1178 | ||
6f10f7d1 CL |
1179 | int bch_bset_sort_state_init(struct bset_sort_state *state, |
1180 | unsigned int page_order) | |
67539e85 KO |
1181 | { |
1182 | spin_lock_init(&state->time.lock); | |
1183 | ||
1184 | state->page_order = page_order; | |
1185 | state->crit_factor = int_sqrt(1 << page_order); | |
1186 | ||
d19936a2 | 1187 | return mempool_init_page_pool(&state->pool, 1, page_order); |
67539e85 KO |
1188 | } |
1189 | ||
a85e968e | 1190 | static void btree_mergesort(struct btree_keys *b, struct bset *out, |
cafe5635 KO |
1191 | struct btree_iter *iter, |
1192 | bool fixup, bool remove_stale) | |
1193 | { | |
911c9610 | 1194 | int i; |
cafe5635 | 1195 | struct bkey *k, *last = NULL; |
ef71ec00 | 1196 | BKEY_PADDED(k) tmp; |
a85e968e | 1197 | bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale |
cafe5635 KO |
1198 | ? bch_ptr_bad |
1199 | : bch_ptr_invalid; | |
1200 | ||
911c9610 KO |
1201 | /* Heapify the iterator, using our comparison function */ |
1202 | for (i = iter->used / 2 - 1; i >= 0; --i) | |
65d45231 | 1203 | heap_sift(iter, i, b->ops->sort_cmp); |
911c9610 | 1204 | |
cafe5635 | 1205 | while (!btree_iter_end(iter)) { |
65d45231 KO |
1206 | if (b->ops->sort_fixup && fixup) |
1207 | k = b->ops->sort_fixup(iter, &tmp.k); | |
ef71ec00 KO |
1208 | else |
1209 | k = NULL; | |
1210 | ||
1211 | if (!k) | |
65d45231 | 1212 | k = __bch_btree_iter_next(iter, b->ops->sort_cmp); |
cafe5635 | 1213 | |
cafe5635 KO |
1214 | if (bad(b, k)) |
1215 | continue; | |
1216 | ||
1217 | if (!last) { | |
1218 | last = out->start; | |
1219 | bkey_copy(last, k); | |
65d45231 | 1220 | } else if (!bch_bkey_try_merge(b, last, k)) { |
cafe5635 KO |
1221 | last = bkey_next(last); |
1222 | bkey_copy(last, k); | |
1223 | } | |
1224 | } | |
1225 | ||
1226 | out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0; | |
1227 | ||
46f5aa88 | 1228 | pr_debug("sorted %i keys\n", out->keys); |
cafe5635 KO |
1229 | } |
1230 | ||
a85e968e | 1231 | static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, |
6f10f7d1 | 1232 | unsigned int start, unsigned int order, bool fixup, |
67539e85 | 1233 | struct bset_sort_state *state) |
cafe5635 KO |
1234 | { |
1235 | uint64_t start_time; | |
0a451145 | 1236 | bool used_mempool = false; |
501d52a9 | 1237 | struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, |
cafe5635 KO |
1238 | order); |
1239 | if (!out) { | |
3572324a KO |
1240 | struct page *outp; |
1241 | ||
67539e85 KO |
1242 | BUG_ON(order > state->page_order); |
1243 | ||
d19936a2 | 1244 | outp = mempool_alloc(&state->pool, GFP_NOIO); |
3572324a | 1245 | out = page_address(outp); |
0a451145 | 1246 | used_mempool = true; |
a85e968e | 1247 | order = state->page_order; |
cafe5635 KO |
1248 | } |
1249 | ||
1250 | start_time = local_clock(); | |
1251 | ||
67539e85 | 1252 | btree_mergesort(b, out, iter, fixup, false); |
cafe5635 KO |
1253 | b->nsets = start; |
1254 | ||
cafe5635 KO |
1255 | if (!start && order == b->page_order) { |
1256 | /* | |
1257 | * Our temporary buffer is the same size as the btree node's | |
1258 | * buffer, we can just swap buffers instead of doing a big | |
1259 | * memcpy() | |
7a0bc2a8 CL |
1260 | * |
1261 | * Don't worry event 'out' is allocated from mempool, it can | |
1262 | * still be swapped here. Because state->pool is a page mempool | |
1263 | * creaated by by mempool_init_page_pool(), which allocates | |
1264 | * pages by alloc_pages() indeed. | |
cafe5635 KO |
1265 | */ |
1266 | ||
a85e968e KO |
1267 | out->magic = b->set->data->magic; |
1268 | out->seq = b->set->data->seq; | |
1269 | out->version = b->set->data->version; | |
1270 | swap(out, b->set->data); | |
cafe5635 | 1271 | } else { |
a85e968e KO |
1272 | b->set[start].data->keys = out->keys; |
1273 | memcpy(b->set[start].data->start, out->start, | |
fafff81c | 1274 | (void *) bset_bkey_last(out) - (void *) out->start); |
cafe5635 KO |
1275 | } |
1276 | ||
0a451145 | 1277 | if (used_mempool) |
d19936a2 | 1278 | mempool_free(virt_to_page(out), &state->pool); |
cafe5635 KO |
1279 | else |
1280 | free_pages((unsigned long) out, order); | |
1281 | ||
a85e968e | 1282 | bch_bset_build_written_tree(b); |
cafe5635 | 1283 | |
65d22e91 | 1284 | if (!start) |
67539e85 | 1285 | bch_time_stats_update(&state->time, start_time); |
cafe5635 KO |
1286 | } |
1287 | ||
6f10f7d1 | 1288 | void bch_btree_sort_partial(struct btree_keys *b, unsigned int start, |
67539e85 | 1289 | struct bset_sort_state *state) |
cafe5635 | 1290 | { |
89ebb4a2 | 1291 | size_t order = b->page_order, keys = 0; |
cafe5635 | 1292 | struct btree_iter iter; |
89ebb4a2 | 1293 | int oldsize = bch_count_data(b); |
280481d0 | 1294 | |
89ebb4a2 | 1295 | __bch_btree_iter_init(b, &iter, NULL, &b->set[start]); |
cafe5635 KO |
1296 | |
1297 | if (start) { | |
6f10f7d1 | 1298 | unsigned int i; |
cafe5635 | 1299 | |
89ebb4a2 KO |
1300 | for (i = start; i <= b->nsets; i++) |
1301 | keys += b->set[i].data->keys; | |
cafe5635 | 1302 | |
89ebb4a2 | 1303 | order = get_order(__set_bytes(b->set->data, keys)); |
cafe5635 KO |
1304 | } |
1305 | ||
89ebb4a2 | 1306 | __btree_sort(b, &iter, start, order, false, state); |
cafe5635 | 1307 | |
89ebb4a2 | 1308 | EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize); |
cafe5635 KO |
1309 | } |
1310 | ||
a85e968e KO |
1311 | void bch_btree_sort_and_fix_extents(struct btree_keys *b, |
1312 | struct btree_iter *iter, | |
67539e85 | 1313 | struct bset_sort_state *state) |
cafe5635 | 1314 | { |
67539e85 | 1315 | __btree_sort(b, iter, 0, b->page_order, true, state); |
cafe5635 KO |
1316 | } |
1317 | ||
89ebb4a2 | 1318 | void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, |
67539e85 | 1319 | struct bset_sort_state *state) |
cafe5635 KO |
1320 | { |
1321 | uint64_t start_time = local_clock(); | |
cafe5635 | 1322 | struct btree_iter iter; |
1fae7cf0 | 1323 | |
89ebb4a2 | 1324 | bch_btree_iter_init(b, &iter, NULL); |
cafe5635 | 1325 | |
89ebb4a2 | 1326 | btree_mergesort(b, new->set->data, &iter, false, true); |
cafe5635 | 1327 | |
67539e85 | 1328 | bch_time_stats_update(&state->time, start_time); |
cafe5635 | 1329 | |
89ebb4a2 | 1330 | new->set->size = 0; // XXX: why? |
cafe5635 KO |
1331 | } |
1332 | ||
6ded34d1 KO |
1333 | #define SORT_CRIT (4096 / sizeof(uint64_t)) |
1334 | ||
89ebb4a2 | 1335 | void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state) |
cafe5635 | 1336 | { |
6f10f7d1 | 1337 | unsigned int crit = SORT_CRIT; |
6ded34d1 | 1338 | int i; |
cafe5635 | 1339 | |
6ded34d1 | 1340 | /* Don't sort if nothing to do */ |
89ebb4a2 | 1341 | if (!b->nsets) |
6ded34d1 | 1342 | goto out; |
cafe5635 | 1343 | |
89ebb4a2 | 1344 | for (i = b->nsets - 1; i >= 0; --i) { |
67539e85 | 1345 | crit *= state->crit_factor; |
cafe5635 | 1346 | |
89ebb4a2 | 1347 | if (b->set[i].data->keys < crit) { |
67539e85 | 1348 | bch_btree_sort_partial(b, i, state); |
cafe5635 KO |
1349 | return; |
1350 | } | |
1351 | } | |
1352 | ||
6ded34d1 | 1353 | /* Sort if we'd overflow */ |
89ebb4a2 | 1354 | if (b->nsets + 1 == MAX_BSETS) { |
67539e85 | 1355 | bch_btree_sort(b, state); |
6ded34d1 KO |
1356 | return; |
1357 | } | |
1358 | ||
1359 | out: | |
89ebb4a2 | 1360 | bch_bset_build_written_tree(b); |
cafe5635 KO |
1361 | } |
1362 | ||
f67342dd | 1363 | void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats) |
cafe5635 | 1364 | { |
6f10f7d1 | 1365 | unsigned int i; |
cafe5635 | 1366 | |
f67342dd KO |
1367 | for (i = 0; i <= b->nsets; i++) { |
1368 | struct bset_tree *t = &b->set[i]; | |
cafe5635 KO |
1369 | size_t bytes = t->data->keys * sizeof(uint64_t); |
1370 | size_t j; | |
1371 | ||
f67342dd | 1372 | if (bset_written(b, t)) { |
cafe5635 KO |
1373 | stats->sets_written++; |
1374 | stats->bytes_written += bytes; | |
1375 | ||
1376 | stats->floats += t->size - 1; | |
1377 | ||
1378 | for (j = 1; j < t->size; j++) | |
1379 | if (t->tree[j].exponent == 127) | |
1380 | stats->failed++; | |
1381 | } else { | |
1382 | stats->sets_unwritten++; | |
1383 | stats->bytes_unwritten += bytes; | |
1384 | } | |
1385 | } | |
cafe5635 | 1386 | } |