Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | /* |
2 | * Code for working with individual keys, and sorted sets of keys with in a | |
3 | * btree node | |
4 | * | |
5 | * Copyright 2012 Google, Inc. | |
6 | */ | |
7 | ||
89ebb4a2 KO |
8 | #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ |
9 | ||
10 | #include "util.h" | |
11 | #include "bset.h" | |
cafe5635 | 12 | |
dc9d98d6 | 13 | #include <linux/console.h> |
cafe5635 | 14 | #include <linux/random.h> |
cd953ed0 | 15 | #include <linux/prefetch.h> |
cafe5635 | 16 | |
dc9d98d6 KO |
17 | #ifdef CONFIG_BCACHE_DEBUG |
18 | ||
19 | void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) | |
20 | { | |
21 | struct bkey *k, *next; | |
22 | ||
23 | for (k = i->start; k < bset_bkey_last(i); k = next) { | |
24 | next = bkey_next(k); | |
25 | ||
85cbe1f8 KO |
26 | printk(KERN_ERR "block %u key %u/%u: ", set, |
27 | (unsigned) ((u64 *) k - i->d), i->keys); | |
dc9d98d6 KO |
28 | |
29 | if (b->ops->key_dump) | |
30 | b->ops->key_dump(b, k); | |
31 | else | |
32 | printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k)); | |
33 | ||
34 | if (next < bset_bkey_last(i) && | |
35 | bkey_cmp(k, b->ops->is_extents ? | |
36 | &START_KEY(next) : next) > 0) | |
37 | printk(KERN_ERR "Key skipped backwards\n"); | |
38 | } | |
39 | } | |
40 | ||
41 | void bch_dump_bucket(struct btree_keys *b) | |
42 | { | |
43 | unsigned i; | |
44 | ||
45 | console_lock(); | |
46 | for (i = 0; i <= b->nsets; i++) | |
47 | bch_dump_bset(b, b->set[i].data, | |
48 | bset_sector_offset(b, b->set[i].data)); | |
49 | console_unlock(); | |
50 | } | |
51 | ||
52 | int __bch_count_data(struct btree_keys *b) | |
53 | { | |
54 | unsigned ret = 0; | |
55 | struct btree_iter iter; | |
56 | struct bkey *k; | |
57 | ||
58 | if (b->ops->is_extents) | |
59 | for_each_key(b, k, &iter) | |
60 | ret += KEY_SIZE(k); | |
61 | return ret; | |
62 | } | |
63 | ||
64 | void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) | |
65 | { | |
66 | va_list args; | |
67 | struct bkey *k, *p = NULL; | |
68 | struct btree_iter iter; | |
69 | const char *err; | |
70 | ||
71 | for_each_key(b, k, &iter) { | |
72 | if (b->ops->is_extents) { | |
73 | err = "Keys out of order"; | |
74 | if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) | |
75 | goto bug; | |
76 | ||
77 | if (bch_ptr_invalid(b, k)) | |
78 | continue; | |
79 | ||
80 | err = "Overlapping keys"; | |
81 | if (p && bkey_cmp(p, &START_KEY(k)) > 0) | |
82 | goto bug; | |
83 | } else { | |
84 | if (bch_ptr_bad(b, k)) | |
85 | continue; | |
86 | ||
87 | err = "Duplicate keys"; | |
88 | if (p && !bkey_cmp(p, k)) | |
89 | goto bug; | |
90 | } | |
91 | p = k; | |
92 | } | |
93 | #if 0 | |
94 | err = "Key larger than btree node key"; | |
95 | if (p && bkey_cmp(p, &b->key) > 0) | |
96 | goto bug; | |
97 | #endif | |
98 | return; | |
99 | bug: | |
100 | bch_dump_bucket(b); | |
101 | ||
102 | va_start(args, fmt); | |
103 | vprintk(fmt, args); | |
104 | va_end(args); | |
105 | ||
106 | panic("bch_check_keys error: %s:\n", err); | |
107 | } | |
108 | ||
109 | static void bch_btree_iter_next_check(struct btree_iter *iter) | |
110 | { | |
111 | struct bkey *k = iter->data->k, *next = bkey_next(k); | |
112 | ||
113 | if (next < iter->data->end && | |
114 | bkey_cmp(k, iter->b->ops->is_extents ? | |
115 | &START_KEY(next) : next) > 0) { | |
116 | bch_dump_bucket(iter->b); | |
117 | panic("Key skipped backwards\n"); | |
118 | } | |
119 | } | |
120 | ||
121 | #else | |
122 | ||
123 | static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} | |
124 | ||
125 | #endif | |
126 | ||
cafe5635 KO |
127 | /* Keylists */ |
128 | ||
085d2a3d | 129 | int __bch_keylist_realloc(struct keylist *l, unsigned u64s) |
cafe5635 | 130 | { |
c2f95ae2 | 131 | size_t oldsize = bch_keylist_nkeys(l); |
085d2a3d | 132 | size_t newsize = oldsize + u64s; |
c2f95ae2 KO |
133 | uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p; |
134 | uint64_t *new_keys; | |
cafe5635 | 135 | |
cafe5635 KO |
136 | newsize = roundup_pow_of_two(newsize); |
137 | ||
138 | if (newsize <= KEYLIST_INLINE || | |
139 | roundup_pow_of_two(oldsize) == newsize) | |
140 | return 0; | |
141 | ||
c2f95ae2 | 142 | new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO); |
cafe5635 | 143 | |
c2f95ae2 | 144 | if (!new_keys) |
cafe5635 KO |
145 | return -ENOMEM; |
146 | ||
c2f95ae2 KO |
147 | if (!old_keys) |
148 | memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize); | |
cafe5635 | 149 | |
c2f95ae2 KO |
150 | l->keys_p = new_keys; |
151 | l->top_p = new_keys + oldsize; | |
cafe5635 KO |
152 | |
153 | return 0; | |
154 | } | |
155 | ||
156 | struct bkey *bch_keylist_pop(struct keylist *l) | |
157 | { | |
c2f95ae2 | 158 | struct bkey *k = l->keys; |
cafe5635 KO |
159 | |
160 | if (k == l->top) | |
161 | return NULL; | |
162 | ||
163 | while (bkey_next(k) != l->top) | |
164 | k = bkey_next(k); | |
165 | ||
166 | return l->top = k; | |
167 | } | |
168 | ||
26c949f8 KO |
169 | void bch_keylist_pop_front(struct keylist *l) |
170 | { | |
c2f95ae2 | 171 | l->top_p -= bkey_u64s(l->keys); |
26c949f8 | 172 | |
c2f95ae2 KO |
173 | memmove(l->keys, |
174 | bkey_next(l->keys), | |
175 | bch_keylist_bytes(l)); | |
26c949f8 KO |
176 | } |
177 | ||
cafe5635 KO |
178 | /* Key/pointer manipulation */ |
179 | ||
180 | void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, | |
181 | unsigned i) | |
182 | { | |
183 | BUG_ON(i > KEY_PTRS(src)); | |
184 | ||
185 | /* Only copy the header, key, and one pointer. */ | |
186 | memcpy(dest, src, 2 * sizeof(uint64_t)); | |
187 | dest->ptr[0] = src->ptr[i]; | |
188 | SET_KEY_PTRS(dest, 1); | |
189 | /* We didn't copy the checksum so clear that bit. */ | |
190 | SET_KEY_CSUM(dest, 0); | |
191 | } | |
192 | ||
193 | bool __bch_cut_front(const struct bkey *where, struct bkey *k) | |
194 | { | |
195 | unsigned i, len = 0; | |
196 | ||
197 | if (bkey_cmp(where, &START_KEY(k)) <= 0) | |
198 | return false; | |
199 | ||
200 | if (bkey_cmp(where, k) < 0) | |
201 | len = KEY_OFFSET(k) - KEY_OFFSET(where); | |
202 | else | |
203 | bkey_copy_key(k, where); | |
204 | ||
205 | for (i = 0; i < KEY_PTRS(k); i++) | |
206 | SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len); | |
207 | ||
208 | BUG_ON(len > KEY_SIZE(k)); | |
209 | SET_KEY_SIZE(k, len); | |
210 | return true; | |
211 | } | |
212 | ||
213 | bool __bch_cut_back(const struct bkey *where, struct bkey *k) | |
214 | { | |
215 | unsigned len = 0; | |
216 | ||
217 | if (bkey_cmp(where, k) >= 0) | |
218 | return false; | |
219 | ||
220 | BUG_ON(KEY_INODE(where) != KEY_INODE(k)); | |
221 | ||
222 | if (bkey_cmp(where, &START_KEY(k)) > 0) | |
223 | len = KEY_OFFSET(where) - KEY_START(k); | |
224 | ||
225 | bkey_copy_key(k, where); | |
226 | ||
227 | BUG_ON(len > KEY_SIZE(k)); | |
228 | SET_KEY_SIZE(k, len); | |
229 | return true; | |
230 | } | |
231 | ||
ee811287 KO |
232 | /* Auxiliary search trees */ |
233 | ||
234 | /* 32 bits total: */ | |
235 | #define BKEY_MID_BITS 3 | |
236 | #define BKEY_EXPONENT_BITS 7 | |
237 | #define BKEY_MANTISSA_BITS (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS) | |
238 | #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1) | |
239 | ||
240 | struct bkey_float { | |
241 | unsigned exponent:BKEY_EXPONENT_BITS; | |
242 | unsigned m:BKEY_MID_BITS; | |
243 | unsigned mantissa:BKEY_MANTISSA_BITS; | |
244 | } __packed; | |
245 | ||
246 | /* | |
247 | * BSET_CACHELINE was originally intended to match the hardware cacheline size - | |
248 | * it used to be 64, but I realized the lookup code would touch slightly less | |
249 | * memory if it was 128. | |
250 | * | |
251 | * It definites the number of bytes (in struct bset) per struct bkey_float in | |
252 | * the auxiliar search tree - when we're done searching the bset_float tree we | |
253 | * have this many bytes left that we do a linear search over. | |
254 | * | |
255 | * Since (after level 5) every level of the bset_tree is on a new cacheline, | |
256 | * we're touching one fewer cacheline in the bset tree in exchange for one more | |
257 | * cacheline in the linear search - but the linear search might stop before it | |
258 | * gets to the second cacheline. | |
259 | */ | |
260 | ||
261 | #define BSET_CACHELINE 128 | |
262 | ||
263 | /* Space required for the btree node keys */ | |
a85e968e | 264 | static inline size_t btree_keys_bytes(struct btree_keys *b) |
ee811287 KO |
265 | { |
266 | return PAGE_SIZE << b->page_order; | |
267 | } | |
268 | ||
a85e968e | 269 | static inline size_t btree_keys_cachelines(struct btree_keys *b) |
ee811287 KO |
270 | { |
271 | return btree_keys_bytes(b) / BSET_CACHELINE; | |
272 | } | |
273 | ||
274 | /* Space required for the auxiliary search trees */ | |
a85e968e | 275 | static inline size_t bset_tree_bytes(struct btree_keys *b) |
ee811287 KO |
276 | { |
277 | return btree_keys_cachelines(b) * sizeof(struct bkey_float); | |
278 | } | |
279 | ||
280 | /* Space required for the prev pointers */ | |
a85e968e | 281 | static inline size_t bset_prev_bytes(struct btree_keys *b) |
ee811287 KO |
282 | { |
283 | return btree_keys_cachelines(b) * sizeof(uint8_t); | |
284 | } | |
285 | ||
286 | /* Memory allocation */ | |
287 | ||
a85e968e | 288 | void bch_btree_keys_free(struct btree_keys *b) |
ee811287 | 289 | { |
a85e968e | 290 | struct bset_tree *t = b->set; |
ee811287 KO |
291 | |
292 | if (bset_prev_bytes(b) < PAGE_SIZE) | |
293 | kfree(t->prev); | |
294 | else | |
295 | free_pages((unsigned long) t->prev, | |
296 | get_order(bset_prev_bytes(b))); | |
297 | ||
298 | if (bset_tree_bytes(b) < PAGE_SIZE) | |
299 | kfree(t->tree); | |
300 | else | |
301 | free_pages((unsigned long) t->tree, | |
302 | get_order(bset_tree_bytes(b))); | |
303 | ||
304 | free_pages((unsigned long) t->data, b->page_order); | |
305 | ||
306 | t->prev = NULL; | |
307 | t->tree = NULL; | |
308 | t->data = NULL; | |
309 | } | |
a85e968e | 310 | EXPORT_SYMBOL(bch_btree_keys_free); |
ee811287 | 311 | |
a85e968e | 312 | int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp) |
ee811287 | 313 | { |
a85e968e | 314 | struct bset_tree *t = b->set; |
ee811287 KO |
315 | |
316 | BUG_ON(t->data); | |
317 | ||
318 | b->page_order = page_order; | |
319 | ||
320 | t->data = (void *) __get_free_pages(gfp, b->page_order); | |
321 | if (!t->data) | |
322 | goto err; | |
323 | ||
324 | t->tree = bset_tree_bytes(b) < PAGE_SIZE | |
325 | ? kmalloc(bset_tree_bytes(b), gfp) | |
326 | : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b))); | |
327 | if (!t->tree) | |
328 | goto err; | |
329 | ||
330 | t->prev = bset_prev_bytes(b) < PAGE_SIZE | |
331 | ? kmalloc(bset_prev_bytes(b), gfp) | |
332 | : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b))); | |
333 | if (!t->prev) | |
334 | goto err; | |
335 | ||
336 | return 0; | |
337 | err: | |
338 | bch_btree_keys_free(b); | |
339 | return -ENOMEM; | |
340 | } | |
a85e968e KO |
341 | EXPORT_SYMBOL(bch_btree_keys_alloc); |
342 | ||
343 | void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops, | |
344 | bool *expensive_debug_checks) | |
345 | { | |
346 | unsigned i; | |
347 | ||
348 | b->ops = ops; | |
349 | b->expensive_debug_checks = expensive_debug_checks; | |
350 | b->nsets = 0; | |
351 | b->last_set_unwritten = 0; | |
352 | ||
353 | /* XXX: shouldn't be needed */ | |
354 | for (i = 0; i < MAX_BSETS; i++) | |
355 | b->set[i].size = 0; | |
356 | /* | |
357 | * Second loop starts at 1 because b->keys[0]->data is the memory we | |
358 | * allocated | |
359 | */ | |
360 | for (i = 1; i < MAX_BSETS; i++) | |
361 | b->set[i].data = NULL; | |
362 | } | |
363 | EXPORT_SYMBOL(bch_btree_keys_init); | |
ee811287 | 364 | |
cafe5635 KO |
365 | /* Binary tree stuff for auxiliary search trees */ |
366 | ||
367 | static unsigned inorder_next(unsigned j, unsigned size) | |
368 | { | |
369 | if (j * 2 + 1 < size) { | |
370 | j = j * 2 + 1; | |
371 | ||
372 | while (j * 2 < size) | |
373 | j *= 2; | |
374 | } else | |
375 | j >>= ffz(j) + 1; | |
376 | ||
377 | return j; | |
378 | } | |
379 | ||
380 | static unsigned inorder_prev(unsigned j, unsigned size) | |
381 | { | |
382 | if (j * 2 < size) { | |
383 | j = j * 2; | |
384 | ||
385 | while (j * 2 + 1 < size) | |
386 | j = j * 2 + 1; | |
387 | } else | |
388 | j >>= ffs(j); | |
389 | ||
390 | return j; | |
391 | } | |
392 | ||
393 | /* I have no idea why this code works... and I'm the one who wrote it | |
394 | * | |
395 | * However, I do know what it does: | |
396 | * Given a binary tree constructed in an array (i.e. how you normally implement | |
397 | * a heap), it converts a node in the tree - referenced by array index - to the | |
398 | * index it would have if you did an inorder traversal. | |
399 | * | |
400 | * Also tested for every j, size up to size somewhere around 6 million. | |
401 | * | |
402 | * The binary tree starts at array index 1, not 0 | |
403 | * extra is a function of size: | |
404 | * extra = (size - rounddown_pow_of_two(size - 1)) << 1; | |
405 | */ | |
406 | static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra) | |
407 | { | |
408 | unsigned b = fls(j); | |
409 | unsigned shift = fls(size - 1) - b; | |
410 | ||
411 | j ^= 1U << (b - 1); | |
412 | j <<= 1; | |
413 | j |= 1; | |
414 | j <<= shift; | |
415 | ||
416 | if (j > extra) | |
417 | j -= (j - extra) >> 1; | |
418 | ||
419 | return j; | |
420 | } | |
421 | ||
422 | static unsigned to_inorder(unsigned j, struct bset_tree *t) | |
423 | { | |
424 | return __to_inorder(j, t->size, t->extra); | |
425 | } | |
426 | ||
427 | static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra) | |
428 | { | |
429 | unsigned shift; | |
430 | ||
431 | if (j > extra) | |
432 | j += j - extra; | |
433 | ||
434 | shift = ffs(j); | |
435 | ||
436 | j >>= shift; | |
437 | j |= roundup_pow_of_two(size) >> shift; | |
438 | ||
439 | return j; | |
440 | } | |
441 | ||
442 | static unsigned inorder_to_tree(unsigned j, struct bset_tree *t) | |
443 | { | |
444 | return __inorder_to_tree(j, t->size, t->extra); | |
445 | } | |
446 | ||
447 | #if 0 | |
448 | void inorder_test(void) | |
449 | { | |
450 | unsigned long done = 0; | |
451 | ktime_t start = ktime_get(); | |
452 | ||
453 | for (unsigned size = 2; | |
454 | size < 65536000; | |
455 | size++) { | |
456 | unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1; | |
457 | unsigned i = 1, j = rounddown_pow_of_two(size - 1); | |
458 | ||
459 | if (!(size % 4096)) | |
460 | printk(KERN_NOTICE "loop %u, %llu per us\n", size, | |
461 | done / ktime_us_delta(ktime_get(), start)); | |
462 | ||
463 | while (1) { | |
464 | if (__inorder_to_tree(i, size, extra) != j) | |
465 | panic("size %10u j %10u i %10u", size, j, i); | |
466 | ||
467 | if (__to_inorder(j, size, extra) != i) | |
468 | panic("size %10u j %10u i %10u", size, j, i); | |
469 | ||
470 | if (j == rounddown_pow_of_two(size) - 1) | |
471 | break; | |
472 | ||
473 | BUG_ON(inorder_prev(inorder_next(j, size), size) != j); | |
474 | ||
475 | j = inorder_next(j, size); | |
476 | i++; | |
477 | } | |
478 | ||
479 | done += size - 1; | |
480 | } | |
481 | } | |
482 | #endif | |
483 | ||
484 | /* | |
48a73025 | 485 | * Cacheline/offset <-> bkey pointer arithmetic: |
cafe5635 KO |
486 | * |
487 | * t->tree is a binary search tree in an array; each node corresponds to a key | |
488 | * in one cacheline in t->set (BSET_CACHELINE bytes). | |
489 | * | |
490 | * This means we don't have to store the full index of the key that a node in | |
491 | * the binary tree points to; to_inorder() gives us the cacheline, and then | |
492 | * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes. | |
493 | * | |
48a73025 | 494 | * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to |
cafe5635 KO |
495 | * make this work. |
496 | * | |
497 | * To construct the bfloat for an arbitrary key we need to know what the key | |
498 | * immediately preceding it is: we have to check if the two keys differ in the | |
499 | * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size | |
500 | * of the previous key so we can walk backwards to it from t->tree[j]'s key. | |
501 | */ | |
502 | ||
503 | static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline, | |
504 | unsigned offset) | |
505 | { | |
506 | return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8; | |
507 | } | |
508 | ||
509 | static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k) | |
510 | { | |
511 | return ((void *) k - (void *) t->data) / BSET_CACHELINE; | |
512 | } | |
513 | ||
9dd6358a KO |
514 | static unsigned bkey_to_cacheline_offset(struct bset_tree *t, |
515 | unsigned cacheline, | |
516 | struct bkey *k) | |
cafe5635 | 517 | { |
9dd6358a | 518 | return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0); |
cafe5635 KO |
519 | } |
520 | ||
521 | static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j) | |
522 | { | |
523 | return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m); | |
524 | } | |
525 | ||
526 | static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j) | |
527 | { | |
528 | return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]); | |
529 | } | |
530 | ||
531 | /* | |
532 | * For the write set - the one we're currently inserting keys into - we don't | |
533 | * maintain a full search tree, we just keep a simple lookup table in t->prev. | |
534 | */ | |
535 | static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline) | |
536 | { | |
537 | return cacheline_to_bkey(t, cacheline, t->prev[cacheline]); | |
538 | } | |
539 | ||
540 | static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift) | |
541 | { | |
cafe5635 KO |
542 | low >>= shift; |
543 | low |= (high << 1) << (63U - shift); | |
cafe5635 KO |
544 | return low; |
545 | } | |
546 | ||
547 | static inline unsigned bfloat_mantissa(const struct bkey *k, | |
548 | struct bkey_float *f) | |
549 | { | |
550 | const uint64_t *p = &k->low - (f->exponent >> 6); | |
551 | return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK; | |
552 | } | |
553 | ||
554 | static void make_bfloat(struct bset_tree *t, unsigned j) | |
555 | { | |
556 | struct bkey_float *f = &t->tree[j]; | |
557 | struct bkey *m = tree_to_bkey(t, j); | |
558 | struct bkey *p = tree_to_prev_bkey(t, j); | |
559 | ||
560 | struct bkey *l = is_power_of_2(j) | |
561 | ? t->data->start | |
562 | : tree_to_prev_bkey(t, j >> ffs(j)); | |
563 | ||
564 | struct bkey *r = is_power_of_2(j + 1) | |
fafff81c | 565 | ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end)) |
cafe5635 KO |
566 | : tree_to_bkey(t, j >> (ffz(j) + 1)); |
567 | ||
568 | BUG_ON(m < l || m > r); | |
569 | BUG_ON(bkey_next(p) != m); | |
570 | ||
571 | if (KEY_INODE(l) != KEY_INODE(r)) | |
572 | f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64; | |
573 | else | |
574 | f->exponent = fls64(r->low ^ l->low); | |
575 | ||
576 | f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0); | |
577 | ||
578 | /* | |
579 | * Setting f->exponent = 127 flags this node as failed, and causes the | |
580 | * lookup code to fall back to comparing against the original key. | |
581 | */ | |
582 | ||
583 | if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f)) | |
584 | f->mantissa = bfloat_mantissa(m, f) - 1; | |
585 | else | |
586 | f->exponent = 127; | |
587 | } | |
588 | ||
a85e968e | 589 | static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t) |
cafe5635 | 590 | { |
a85e968e | 591 | if (t != b->set) { |
cafe5635 KO |
592 | unsigned j = roundup(t[-1].size, |
593 | 64 / sizeof(struct bkey_float)); | |
594 | ||
595 | t->tree = t[-1].tree + j; | |
596 | t->prev = t[-1].prev + j; | |
597 | } | |
598 | ||
a85e968e | 599 | while (t < b->set + MAX_BSETS) |
cafe5635 KO |
600 | t++->size = 0; |
601 | } | |
602 | ||
a85e968e | 603 | static void bch_bset_build_unwritten_tree(struct btree_keys *b) |
cafe5635 | 604 | { |
ee811287 | 605 | struct bset_tree *t = bset_tree_last(b); |
cafe5635 | 606 | |
a85e968e KO |
607 | BUG_ON(b->last_set_unwritten); |
608 | b->last_set_unwritten = 1; | |
609 | ||
cafe5635 KO |
610 | bset_alloc_tree(b, t); |
611 | ||
a85e968e | 612 | if (t->tree != b->set->tree + btree_keys_cachelines(b)) { |
9dd6358a | 613 | t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start); |
cafe5635 KO |
614 | t->size = 1; |
615 | } | |
616 | } | |
617 | ||
a85e968e | 618 | void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic) |
ee811287 | 619 | { |
a85e968e KO |
620 | if (i != b->set->data) { |
621 | b->set[++b->nsets].data = i; | |
622 | i->seq = b->set->data->seq; | |
ee811287 KO |
623 | } else |
624 | get_random_bytes(&i->seq, sizeof(uint64_t)); | |
625 | ||
626 | i->magic = magic; | |
627 | i->version = 0; | |
628 | i->keys = 0; | |
629 | ||
630 | bch_bset_build_unwritten_tree(b); | |
631 | } | |
a85e968e | 632 | EXPORT_SYMBOL(bch_bset_init_next); |
ee811287 | 633 | |
a85e968e | 634 | void bch_bset_build_written_tree(struct btree_keys *b) |
cafe5635 | 635 | { |
ee811287 | 636 | struct bset_tree *t = bset_tree_last(b); |
9dd6358a | 637 | struct bkey *prev = NULL, *k = t->data->start; |
cafe5635 KO |
638 | unsigned j, cacheline = 1; |
639 | ||
a85e968e KO |
640 | b->last_set_unwritten = 0; |
641 | ||
cafe5635 KO |
642 | bset_alloc_tree(b, t); |
643 | ||
644 | t->size = min_t(unsigned, | |
fafff81c | 645 | bkey_to_cacheline(t, bset_bkey_last(t->data)), |
a85e968e | 646 | b->set->tree + btree_keys_cachelines(b) - t->tree); |
cafe5635 KO |
647 | |
648 | if (t->size < 2) { | |
649 | t->size = 0; | |
650 | return; | |
651 | } | |
652 | ||
653 | t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1; | |
654 | ||
655 | /* First we figure out where the first key in each cacheline is */ | |
656 | for (j = inorder_next(0, t->size); | |
657 | j; | |
658 | j = inorder_next(j, t->size)) { | |
9dd6358a KO |
659 | while (bkey_to_cacheline(t, k) < cacheline) |
660 | prev = k, k = bkey_next(k); | |
cafe5635 | 661 | |
9dd6358a KO |
662 | t->prev[j] = bkey_u64s(prev); |
663 | t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k); | |
cafe5635 KO |
664 | } |
665 | ||
fafff81c | 666 | while (bkey_next(k) != bset_bkey_last(t->data)) |
cafe5635 KO |
667 | k = bkey_next(k); |
668 | ||
669 | t->end = *k; | |
670 | ||
671 | /* Then we build the tree */ | |
672 | for (j = inorder_next(0, t->size); | |
673 | j; | |
674 | j = inorder_next(j, t->size)) | |
675 | make_bfloat(t, j); | |
676 | } | |
a85e968e | 677 | EXPORT_SYMBOL(bch_bset_build_written_tree); |
cafe5635 | 678 | |
829a60b9 KO |
679 | /* Insert */ |
680 | ||
a85e968e | 681 | void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k) |
cafe5635 KO |
682 | { |
683 | struct bset_tree *t; | |
684 | unsigned inorder, j = 1; | |
685 | ||
a85e968e | 686 | for (t = b->set; t <= bset_tree_last(b); t++) |
fafff81c | 687 | if (k < bset_bkey_last(t->data)) |
cafe5635 KO |
688 | goto found_set; |
689 | ||
690 | BUG(); | |
691 | found_set: | |
692 | if (!t->size || !bset_written(b, t)) | |
693 | return; | |
694 | ||
695 | inorder = bkey_to_cacheline(t, k); | |
696 | ||
697 | if (k == t->data->start) | |
698 | goto fix_left; | |
699 | ||
fafff81c | 700 | if (bkey_next(k) == bset_bkey_last(t->data)) { |
cafe5635 KO |
701 | t->end = *k; |
702 | goto fix_right; | |
703 | } | |
704 | ||
705 | j = inorder_to_tree(inorder, t); | |
706 | ||
707 | if (j && | |
708 | j < t->size && | |
709 | k == tree_to_bkey(t, j)) | |
710 | fix_left: do { | |
711 | make_bfloat(t, j); | |
712 | j = j * 2; | |
713 | } while (j < t->size); | |
714 | ||
715 | j = inorder_to_tree(inorder + 1, t); | |
716 | ||
717 | if (j && | |
718 | j < t->size && | |
719 | k == tree_to_prev_bkey(t, j)) | |
720 | fix_right: do { | |
721 | make_bfloat(t, j); | |
722 | j = j * 2 + 1; | |
723 | } while (j < t->size); | |
724 | } | |
a85e968e | 725 | EXPORT_SYMBOL(bch_bset_fix_invalidated_key); |
cafe5635 | 726 | |
a85e968e | 727 | static void bch_bset_fix_lookup_table(struct btree_keys *b, |
ee811287 KO |
728 | struct bset_tree *t, |
729 | struct bkey *k) | |
cafe5635 | 730 | { |
cafe5635 KO |
731 | unsigned shift = bkey_u64s(k); |
732 | unsigned j = bkey_to_cacheline(t, k); | |
733 | ||
734 | /* We're getting called from btree_split() or btree_gc, just bail out */ | |
735 | if (!t->size) | |
736 | return; | |
737 | ||
738 | /* k is the key we just inserted; we need to find the entry in the | |
739 | * lookup table for the first key that is strictly greater than k: | |
740 | * it's either k's cacheline or the next one | |
741 | */ | |
9dd6358a KO |
742 | while (j < t->size && |
743 | table_to_bkey(t, j) <= k) | |
cafe5635 KO |
744 | j++; |
745 | ||
746 | /* Adjust all the lookup table entries, and find a new key for any that | |
747 | * have gotten too big | |
748 | */ | |
749 | for (; j < t->size; j++) { | |
750 | t->prev[j] += shift; | |
751 | ||
752 | if (t->prev[j] > 7) { | |
753 | k = table_to_bkey(t, j - 1); | |
754 | ||
755 | while (k < cacheline_to_bkey(t, j, 0)) | |
756 | k = bkey_next(k); | |
757 | ||
9dd6358a | 758 | t->prev[j] = bkey_to_cacheline_offset(t, j, k); |
cafe5635 KO |
759 | } |
760 | } | |
761 | ||
a85e968e | 762 | if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree) |
cafe5635 KO |
763 | return; |
764 | ||
765 | /* Possibly add a new entry to the end of the lookup table */ | |
766 | ||
767 | for (k = table_to_bkey(t, t->size - 1); | |
fafff81c | 768 | k != bset_bkey_last(t->data); |
cafe5635 KO |
769 | k = bkey_next(k)) |
770 | if (t->size == bkey_to_cacheline(t, k)) { | |
9dd6358a | 771 | t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k); |
cafe5635 KO |
772 | t->size++; |
773 | } | |
774 | } | |
775 | ||
0f49cf3d NS |
776 | /* |
777 | * Tries to merge l and r: l should be lower than r | |
778 | * Returns true if we were able to merge. If we did merge, l will be the merged | |
779 | * key, r will be untouched. | |
780 | */ | |
781 | bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r) | |
782 | { | |
783 | if (!b->ops->key_merge) | |
784 | return false; | |
785 | ||
786 | /* | |
787 | * Generic header checks | |
788 | * Assumes left and right are in order | |
789 | * Left and right must be exactly aligned | |
790 | */ | |
3bdad1e4 NS |
791 | if (!bch_bkey_equal_header(l, r) || |
792 | bkey_cmp(l, &START_KEY(r))) | |
0f49cf3d NS |
793 | return false; |
794 | ||
795 | return b->ops->key_merge(b, l, r); | |
796 | } | |
797 | EXPORT_SYMBOL(bch_bkey_try_merge); | |
798 | ||
a85e968e | 799 | void bch_bset_insert(struct btree_keys *b, struct bkey *where, |
ee811287 | 800 | struct bkey *insert) |
cafe5635 | 801 | { |
ee811287 | 802 | struct bset_tree *t = bset_tree_last(b); |
cafe5635 | 803 | |
a85e968e | 804 | BUG_ON(!b->last_set_unwritten); |
ee811287 KO |
805 | BUG_ON(bset_byte_offset(b, t->data) + |
806 | __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) > | |
807 | PAGE_SIZE << b->page_order); | |
cafe5635 | 808 | |
ee811287 KO |
809 | memmove((uint64_t *) where + bkey_u64s(insert), |
810 | where, | |
811 | (void *) bset_bkey_last(t->data) - (void *) where); | |
cafe5635 | 812 | |
ee811287 KO |
813 | t->data->keys += bkey_u64s(insert); |
814 | bkey_copy(where, insert); | |
815 | bch_bset_fix_lookup_table(b, t, where); | |
cafe5635 | 816 | } |
a85e968e | 817 | EXPORT_SYMBOL(bch_bset_insert); |
cafe5635 | 818 | |
829a60b9 KO |
819 | unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k, |
820 | struct bkey *replace_key) | |
821 | { | |
822 | unsigned status = BTREE_INSERT_STATUS_NO_INSERT; | |
823 | struct bset *i = bset_tree_last(b)->data; | |
824 | struct bkey *m, *prev = NULL; | |
825 | struct btree_iter iter; | |
826 | ||
827 | BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); | |
828 | ||
829 | m = bch_btree_iter_init(b, &iter, b->ops->is_extents | |
830 | ? PRECEDING_KEY(&START_KEY(k)) | |
831 | : PRECEDING_KEY(k)); | |
832 | ||
833 | if (b->ops->insert_fixup(b, k, &iter, replace_key)) | |
834 | return status; | |
835 | ||
836 | status = BTREE_INSERT_STATUS_INSERT; | |
837 | ||
838 | while (m != bset_bkey_last(i) && | |
839 | bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) | |
840 | prev = m, m = bkey_next(m); | |
841 | ||
842 | /* prev is in the tree, if we merge we're done */ | |
843 | status = BTREE_INSERT_STATUS_BACK_MERGE; | |
844 | if (prev && | |
845 | bch_bkey_try_merge(b, prev, k)) | |
846 | goto merged; | |
847 | #if 0 | |
848 | status = BTREE_INSERT_STATUS_OVERWROTE; | |
849 | if (m != bset_bkey_last(i) && | |
850 | KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) | |
851 | goto copy; | |
852 | #endif | |
853 | status = BTREE_INSERT_STATUS_FRONT_MERGE; | |
854 | if (m != bset_bkey_last(i) && | |
855 | bch_bkey_try_merge(b, k, m)) | |
856 | goto copy; | |
857 | ||
858 | bch_bset_insert(b, m, k); | |
859 | copy: bkey_copy(m, k); | |
860 | merged: | |
861 | return status; | |
862 | } | |
863 | EXPORT_SYMBOL(bch_btree_insert_key); | |
864 | ||
865 | /* Lookup */ | |
866 | ||
cafe5635 KO |
867 | struct bset_search_iter { |
868 | struct bkey *l, *r; | |
869 | }; | |
870 | ||
a85e968e | 871 | static struct bset_search_iter bset_search_write_set(struct bset_tree *t, |
cafe5635 KO |
872 | const struct bkey *search) |
873 | { | |
874 | unsigned li = 0, ri = t->size; | |
875 | ||
cafe5635 KO |
876 | while (li + 1 != ri) { |
877 | unsigned m = (li + ri) >> 1; | |
878 | ||
879 | if (bkey_cmp(table_to_bkey(t, m), search) > 0) | |
880 | ri = m; | |
881 | else | |
882 | li = m; | |
883 | } | |
884 | ||
885 | return (struct bset_search_iter) { | |
886 | table_to_bkey(t, li), | |
fafff81c | 887 | ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data) |
cafe5635 KO |
888 | }; |
889 | } | |
890 | ||
a85e968e | 891 | static struct bset_search_iter bset_search_tree(struct bset_tree *t, |
cafe5635 KO |
892 | const struct bkey *search) |
893 | { | |
894 | struct bkey *l, *r; | |
895 | struct bkey_float *f; | |
896 | unsigned inorder, j, n = 1; | |
897 | ||
898 | do { | |
899 | unsigned p = n << 4; | |
900 | p &= ((int) (p - t->size)) >> 31; | |
901 | ||
902 | prefetch(&t->tree[p]); | |
903 | ||
904 | j = n; | |
905 | f = &t->tree[j]; | |
906 | ||
907 | /* | |
908 | * n = (f->mantissa > bfloat_mantissa()) | |
909 | * ? j * 2 | |
910 | * : j * 2 + 1; | |
911 | * | |
912 | * We need to subtract 1 from f->mantissa for the sign bit trick | |
913 | * to work - that's done in make_bfloat() | |
914 | */ | |
915 | if (likely(f->exponent != 127)) | |
916 | n = j * 2 + (((unsigned) | |
917 | (f->mantissa - | |
918 | bfloat_mantissa(search, f))) >> 31); | |
919 | else | |
920 | n = (bkey_cmp(tree_to_bkey(t, j), search) > 0) | |
921 | ? j * 2 | |
922 | : j * 2 + 1; | |
923 | } while (n < t->size); | |
924 | ||
925 | inorder = to_inorder(j, t); | |
926 | ||
927 | /* | |
928 | * n would have been the node we recursed to - the low bit tells us if | |
929 | * we recursed left or recursed right. | |
930 | */ | |
931 | if (n & 1) { | |
932 | l = cacheline_to_bkey(t, inorder, f->m); | |
933 | ||
934 | if (++inorder != t->size) { | |
935 | f = &t->tree[inorder_next(j, t->size)]; | |
936 | r = cacheline_to_bkey(t, inorder, f->m); | |
937 | } else | |
fafff81c | 938 | r = bset_bkey_last(t->data); |
cafe5635 KO |
939 | } else { |
940 | r = cacheline_to_bkey(t, inorder, f->m); | |
941 | ||
942 | if (--inorder) { | |
943 | f = &t->tree[inorder_prev(j, t->size)]; | |
944 | l = cacheline_to_bkey(t, inorder, f->m); | |
945 | } else | |
946 | l = t->data->start; | |
947 | } | |
948 | ||
949 | return (struct bset_search_iter) {l, r}; | |
950 | } | |
951 | ||
c052dd9a | 952 | struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t, |
cafe5635 KO |
953 | const struct bkey *search) |
954 | { | |
955 | struct bset_search_iter i; | |
956 | ||
957 | /* | |
958 | * First, we search for a cacheline, then lastly we do a linear search | |
959 | * within that cacheline. | |
960 | * | |
961 | * To search for the cacheline, there's three different possibilities: | |
962 | * * The set is too small to have a search tree, so we just do a linear | |
963 | * search over the whole set. | |
964 | * * The set is the one we're currently inserting into; keeping a full | |
965 | * auxiliary search tree up to date would be too expensive, so we | |
966 | * use a much simpler lookup table to do a binary search - | |
967 | * bset_search_write_set(). | |
968 | * * Or we use the auxiliary search tree we constructed earlier - | |
969 | * bset_search_tree() | |
970 | */ | |
971 | ||
972 | if (unlikely(!t->size)) { | |
973 | i.l = t->data->start; | |
fafff81c | 974 | i.r = bset_bkey_last(t->data); |
c052dd9a | 975 | } else if (bset_written(b, t)) { |
cafe5635 KO |
976 | /* |
977 | * Each node in the auxiliary search tree covers a certain range | |
978 | * of bits, and keys above and below the set it covers might | |
979 | * differ outside those bits - so we have to special case the | |
980 | * start and end - handle that here: | |
981 | */ | |
982 | ||
983 | if (unlikely(bkey_cmp(search, &t->end) >= 0)) | |
fafff81c | 984 | return bset_bkey_last(t->data); |
cafe5635 KO |
985 | |
986 | if (unlikely(bkey_cmp(search, t->data->start) < 0)) | |
987 | return t->data->start; | |
988 | ||
a85e968e KO |
989 | i = bset_search_tree(t, search); |
990 | } else { | |
c052dd9a | 991 | BUG_ON(!b->nsets && |
a85e968e KO |
992 | t->size < bkey_to_cacheline(t, bset_bkey_last(t->data))); |
993 | ||
994 | i = bset_search_write_set(t, search); | |
995 | } | |
cafe5635 | 996 | |
c052dd9a KO |
997 | if (btree_keys_expensive_checks(b)) { |
998 | BUG_ON(bset_written(b, t) && | |
280481d0 KO |
999 | i.l != t->data->start && |
1000 | bkey_cmp(tree_to_prev_bkey(t, | |
1001 | inorder_to_tree(bkey_to_cacheline(t, i.l), t)), | |
1002 | search) > 0); | |
cafe5635 | 1003 | |
fafff81c | 1004 | BUG_ON(i.r != bset_bkey_last(t->data) && |
280481d0 KO |
1005 | bkey_cmp(i.r, search) <= 0); |
1006 | } | |
cafe5635 KO |
1007 | |
1008 | while (likely(i.l != i.r) && | |
1009 | bkey_cmp(i.l, search) <= 0) | |
1010 | i.l = bkey_next(i.l); | |
1011 | ||
1012 | return i.l; | |
1013 | } | |
a85e968e | 1014 | EXPORT_SYMBOL(__bch_bset_search); |
cafe5635 KO |
1015 | |
1016 | /* Btree iterator */ | |
1017 | ||
911c9610 KO |
1018 | typedef bool (btree_iter_cmp_fn)(struct btree_iter_set, |
1019 | struct btree_iter_set); | |
1020 | ||
cafe5635 KO |
1021 | static inline bool btree_iter_cmp(struct btree_iter_set l, |
1022 | struct btree_iter_set r) | |
1023 | { | |
911c9610 | 1024 | return bkey_cmp(l.k, r.k) > 0; |
cafe5635 KO |
1025 | } |
1026 | ||
1027 | static inline bool btree_iter_end(struct btree_iter *iter) | |
1028 | { | |
1029 | return !iter->used; | |
1030 | } | |
1031 | ||
1032 | void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, | |
1033 | struct bkey *end) | |
1034 | { | |
1035 | if (k != end) | |
1036 | BUG_ON(!heap_add(iter, | |
1037 | ((struct btree_iter_set) { k, end }), | |
1038 | btree_iter_cmp)); | |
1039 | } | |
1040 | ||
c052dd9a | 1041 | static struct bkey *__bch_btree_iter_init(struct btree_keys *b, |
911c9610 KO |
1042 | struct btree_iter *iter, |
1043 | struct bkey *search, | |
1044 | struct bset_tree *start) | |
cafe5635 KO |
1045 | { |
1046 | struct bkey *ret = NULL; | |
1047 | iter->size = ARRAY_SIZE(iter->data); | |
1048 | iter->used = 0; | |
1049 | ||
280481d0 KO |
1050 | #ifdef CONFIG_BCACHE_DEBUG |
1051 | iter->b = b; | |
1052 | #endif | |
1053 | ||
c052dd9a | 1054 | for (; start <= bset_tree_last(b); start++) { |
cafe5635 | 1055 | ret = bch_bset_search(b, start, search); |
fafff81c | 1056 | bch_btree_iter_push(iter, ret, bset_bkey_last(start->data)); |
cafe5635 KO |
1057 | } |
1058 | ||
1059 | return ret; | |
1060 | } | |
1061 | ||
c052dd9a | 1062 | struct bkey *bch_btree_iter_init(struct btree_keys *b, |
911c9610 KO |
1063 | struct btree_iter *iter, |
1064 | struct bkey *search) | |
1065 | { | |
c052dd9a | 1066 | return __bch_btree_iter_init(b, iter, search, b->set); |
911c9610 | 1067 | } |
a85e968e | 1068 | EXPORT_SYMBOL(bch_btree_iter_init); |
911c9610 KO |
1069 | |
1070 | static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, | |
1071 | btree_iter_cmp_fn *cmp) | |
cafe5635 KO |
1072 | { |
1073 | struct btree_iter_set unused; | |
1074 | struct bkey *ret = NULL; | |
1075 | ||
1076 | if (!btree_iter_end(iter)) { | |
280481d0 KO |
1077 | bch_btree_iter_next_check(iter); |
1078 | ||
cafe5635 KO |
1079 | ret = iter->data->k; |
1080 | iter->data->k = bkey_next(iter->data->k); | |
1081 | ||
1082 | if (iter->data->k > iter->data->end) { | |
cc0f4eaa | 1083 | WARN_ONCE(1, "bset was corrupt!\n"); |
cafe5635 KO |
1084 | iter->data->k = iter->data->end; |
1085 | } | |
1086 | ||
1087 | if (iter->data->k == iter->data->end) | |
911c9610 | 1088 | heap_pop(iter, unused, cmp); |
cafe5635 | 1089 | else |
911c9610 | 1090 | heap_sift(iter, 0, cmp); |
cafe5635 KO |
1091 | } |
1092 | ||
1093 | return ret; | |
1094 | } | |
1095 | ||
911c9610 KO |
1096 | struct bkey *bch_btree_iter_next(struct btree_iter *iter) |
1097 | { | |
1098 | return __bch_btree_iter_next(iter, btree_iter_cmp); | |
1099 | ||
1100 | } | |
a85e968e | 1101 | EXPORT_SYMBOL(bch_btree_iter_next); |
911c9610 | 1102 | |
cafe5635 | 1103 | struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, |
a85e968e | 1104 | struct btree_keys *b, ptr_filter_fn fn) |
cafe5635 KO |
1105 | { |
1106 | struct bkey *ret; | |
1107 | ||
1108 | do { | |
1109 | ret = bch_btree_iter_next(iter); | |
1110 | } while (ret && fn(b, ret)); | |
1111 | ||
1112 | return ret; | |
1113 | } | |
1114 | ||
cafe5635 KO |
1115 | /* Mergesort */ |
1116 | ||
67539e85 KO |
1117 | void bch_bset_sort_state_free(struct bset_sort_state *state) |
1118 | { | |
1119 | if (state->pool) | |
1120 | mempool_destroy(state->pool); | |
1121 | } | |
1122 | ||
1123 | int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order) | |
1124 | { | |
1125 | spin_lock_init(&state->time.lock); | |
1126 | ||
1127 | state->page_order = page_order; | |
1128 | state->crit_factor = int_sqrt(1 << page_order); | |
1129 | ||
1130 | state->pool = mempool_create_page_pool(1, page_order); | |
1131 | if (!state->pool) | |
1132 | return -ENOMEM; | |
1133 | ||
1134 | return 0; | |
1135 | } | |
a85e968e | 1136 | EXPORT_SYMBOL(bch_bset_sort_state_init); |
67539e85 | 1137 | |
a85e968e | 1138 | static void btree_mergesort(struct btree_keys *b, struct bset *out, |
cafe5635 KO |
1139 | struct btree_iter *iter, |
1140 | bool fixup, bool remove_stale) | |
1141 | { | |
911c9610 | 1142 | int i; |
cafe5635 | 1143 | struct bkey *k, *last = NULL; |
ef71ec00 | 1144 | BKEY_PADDED(k) tmp; |
a85e968e | 1145 | bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale |
cafe5635 KO |
1146 | ? bch_ptr_bad |
1147 | : bch_ptr_invalid; | |
1148 | ||
911c9610 KO |
1149 | /* Heapify the iterator, using our comparison function */ |
1150 | for (i = iter->used / 2 - 1; i >= 0; --i) | |
65d45231 | 1151 | heap_sift(iter, i, b->ops->sort_cmp); |
911c9610 | 1152 | |
cafe5635 | 1153 | while (!btree_iter_end(iter)) { |
65d45231 KO |
1154 | if (b->ops->sort_fixup && fixup) |
1155 | k = b->ops->sort_fixup(iter, &tmp.k); | |
ef71ec00 KO |
1156 | else |
1157 | k = NULL; | |
1158 | ||
1159 | if (!k) | |
65d45231 | 1160 | k = __bch_btree_iter_next(iter, b->ops->sort_cmp); |
cafe5635 | 1161 | |
cafe5635 KO |
1162 | if (bad(b, k)) |
1163 | continue; | |
1164 | ||
1165 | if (!last) { | |
1166 | last = out->start; | |
1167 | bkey_copy(last, k); | |
65d45231 | 1168 | } else if (!bch_bkey_try_merge(b, last, k)) { |
cafe5635 KO |
1169 | last = bkey_next(last); |
1170 | bkey_copy(last, k); | |
1171 | } | |
1172 | } | |
1173 | ||
1174 | out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0; | |
1175 | ||
1176 | pr_debug("sorted %i keys", out->keys); | |
cafe5635 KO |
1177 | } |
1178 | ||
a85e968e | 1179 | static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, |
67539e85 KO |
1180 | unsigned start, unsigned order, bool fixup, |
1181 | struct bset_sort_state *state) | |
cafe5635 KO |
1182 | { |
1183 | uint64_t start_time; | |
0a451145 | 1184 | bool used_mempool = false; |
501d52a9 | 1185 | struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, |
cafe5635 KO |
1186 | order); |
1187 | if (!out) { | |
3572324a KO |
1188 | struct page *outp; |
1189 | ||
67539e85 KO |
1190 | BUG_ON(order > state->page_order); |
1191 | ||
3572324a KO |
1192 | outp = mempool_alloc(state->pool, GFP_NOIO); |
1193 | out = page_address(outp); | |
0a451145 | 1194 | used_mempool = true; |
a85e968e | 1195 | order = state->page_order; |
cafe5635 KO |
1196 | } |
1197 | ||
1198 | start_time = local_clock(); | |
1199 | ||
67539e85 | 1200 | btree_mergesort(b, out, iter, fixup, false); |
cafe5635 KO |
1201 | b->nsets = start; |
1202 | ||
cafe5635 KO |
1203 | if (!start && order == b->page_order) { |
1204 | /* | |
1205 | * Our temporary buffer is the same size as the btree node's | |
1206 | * buffer, we can just swap buffers instead of doing a big | |
1207 | * memcpy() | |
1208 | */ | |
1209 | ||
a85e968e KO |
1210 | out->magic = b->set->data->magic; |
1211 | out->seq = b->set->data->seq; | |
1212 | out->version = b->set->data->version; | |
1213 | swap(out, b->set->data); | |
cafe5635 | 1214 | } else { |
a85e968e KO |
1215 | b->set[start].data->keys = out->keys; |
1216 | memcpy(b->set[start].data->start, out->start, | |
fafff81c | 1217 | (void *) bset_bkey_last(out) - (void *) out->start); |
cafe5635 KO |
1218 | } |
1219 | ||
0a451145 | 1220 | if (used_mempool) |
67539e85 | 1221 | mempool_free(virt_to_page(out), state->pool); |
cafe5635 KO |
1222 | else |
1223 | free_pages((unsigned long) out, order); | |
1224 | ||
a85e968e | 1225 | bch_bset_build_written_tree(b); |
cafe5635 | 1226 | |
65d22e91 | 1227 | if (!start) |
67539e85 | 1228 | bch_time_stats_update(&state->time, start_time); |
cafe5635 KO |
1229 | } |
1230 | ||
89ebb4a2 | 1231 | void bch_btree_sort_partial(struct btree_keys *b, unsigned start, |
67539e85 | 1232 | struct bset_sort_state *state) |
cafe5635 | 1233 | { |
89ebb4a2 | 1234 | size_t order = b->page_order, keys = 0; |
cafe5635 | 1235 | struct btree_iter iter; |
89ebb4a2 | 1236 | int oldsize = bch_count_data(b); |
280481d0 | 1237 | |
89ebb4a2 | 1238 | __bch_btree_iter_init(b, &iter, NULL, &b->set[start]); |
cafe5635 KO |
1239 | |
1240 | if (start) { | |
1241 | unsigned i; | |
1242 | ||
89ebb4a2 KO |
1243 | for (i = start; i <= b->nsets; i++) |
1244 | keys += b->set[i].data->keys; | |
cafe5635 | 1245 | |
89ebb4a2 | 1246 | order = get_order(__set_bytes(b->set->data, keys)); |
cafe5635 KO |
1247 | } |
1248 | ||
89ebb4a2 | 1249 | __btree_sort(b, &iter, start, order, false, state); |
cafe5635 | 1250 | |
89ebb4a2 | 1251 | EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize); |
cafe5635 | 1252 | } |
65d45231 | 1253 | EXPORT_SYMBOL(bch_btree_sort_partial); |
cafe5635 | 1254 | |
a85e968e KO |
1255 | void bch_btree_sort_and_fix_extents(struct btree_keys *b, |
1256 | struct btree_iter *iter, | |
67539e85 | 1257 | struct bset_sort_state *state) |
cafe5635 | 1258 | { |
67539e85 | 1259 | __btree_sort(b, iter, 0, b->page_order, true, state); |
cafe5635 KO |
1260 | } |
1261 | ||
89ebb4a2 | 1262 | void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new, |
67539e85 | 1263 | struct bset_sort_state *state) |
cafe5635 KO |
1264 | { |
1265 | uint64_t start_time = local_clock(); | |
1266 | ||
1267 | struct btree_iter iter; | |
89ebb4a2 | 1268 | bch_btree_iter_init(b, &iter, NULL); |
cafe5635 | 1269 | |
89ebb4a2 | 1270 | btree_mergesort(b, new->set->data, &iter, false, true); |
cafe5635 | 1271 | |
67539e85 | 1272 | bch_time_stats_update(&state->time, start_time); |
cafe5635 | 1273 | |
89ebb4a2 | 1274 | new->set->size = 0; // XXX: why? |
cafe5635 KO |
1275 | } |
1276 | ||
6ded34d1 KO |
1277 | #define SORT_CRIT (4096 / sizeof(uint64_t)) |
1278 | ||
89ebb4a2 | 1279 | void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state) |
cafe5635 | 1280 | { |
6ded34d1 KO |
1281 | unsigned crit = SORT_CRIT; |
1282 | int i; | |
cafe5635 | 1283 | |
6ded34d1 | 1284 | /* Don't sort if nothing to do */ |
89ebb4a2 | 1285 | if (!b->nsets) |
6ded34d1 | 1286 | goto out; |
cafe5635 | 1287 | |
89ebb4a2 | 1288 | for (i = b->nsets - 1; i >= 0; --i) { |
67539e85 | 1289 | crit *= state->crit_factor; |
cafe5635 | 1290 | |
89ebb4a2 | 1291 | if (b->set[i].data->keys < crit) { |
67539e85 | 1292 | bch_btree_sort_partial(b, i, state); |
cafe5635 KO |
1293 | return; |
1294 | } | |
1295 | } | |
1296 | ||
6ded34d1 | 1297 | /* Sort if we'd overflow */ |
89ebb4a2 | 1298 | if (b->nsets + 1 == MAX_BSETS) { |
67539e85 | 1299 | bch_btree_sort(b, state); |
6ded34d1 KO |
1300 | return; |
1301 | } | |
1302 | ||
1303 | out: | |
89ebb4a2 | 1304 | bch_bset_build_written_tree(b); |
cafe5635 | 1305 | } |
a85e968e | 1306 | EXPORT_SYMBOL(bch_btree_sort_lazy); |
cafe5635 | 1307 | |
f67342dd | 1308 | void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats) |
cafe5635 | 1309 | { |
cafe5635 KO |
1310 | unsigned i; |
1311 | ||
f67342dd KO |
1312 | for (i = 0; i <= b->nsets; i++) { |
1313 | struct bset_tree *t = &b->set[i]; | |
cafe5635 KO |
1314 | size_t bytes = t->data->keys * sizeof(uint64_t); |
1315 | size_t j; | |
1316 | ||
f67342dd | 1317 | if (bset_written(b, t)) { |
cafe5635 KO |
1318 | stats->sets_written++; |
1319 | stats->bytes_written += bytes; | |
1320 | ||
1321 | stats->floats += t->size - 1; | |
1322 | ||
1323 | for (j = 1; j < t->size; j++) | |
1324 | if (t->tree[j].exponent == 127) | |
1325 | stats->failed++; | |
1326 | } else { | |
1327 | stats->sets_unwritten++; | |
1328 | stats->bytes_unwritten += bytes; | |
1329 | } | |
1330 | } | |
cafe5635 | 1331 | } |