Commit | Line | Data |
---|---|---|
cafe5635 KO |
1 | /* |
2 | * Code for working with individual keys, and sorted sets of keys with in a | |
3 | * btree node | |
4 | * | |
5 | * Copyright 2012 Google, Inc. | |
6 | */ | |
7 | ||
8 | #include "bcache.h" | |
9 | #include "btree.h" | |
10 | #include "debug.h" | |
11 | ||
12 | #include <linux/random.h> | |
13 | ||
14 | /* Keylists */ | |
15 | ||
16 | void bch_keylist_copy(struct keylist *dest, struct keylist *src) | |
17 | { | |
18 | *dest = *src; | |
19 | ||
20 | if (src->list == src->d) { | |
21 | size_t n = (uint64_t *) src->top - src->d; | |
22 | dest->top = (struct bkey *) &dest->d[n]; | |
23 | dest->list = dest->d; | |
24 | } | |
25 | } | |
26 | ||
27 | int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c) | |
28 | { | |
29 | unsigned oldsize = (uint64_t *) l->top - l->list; | |
30 | unsigned newsize = oldsize + 2 + nptrs; | |
31 | uint64_t *new; | |
32 | ||
33 | /* The journalling code doesn't handle the case where the keys to insert | |
34 | * is bigger than an empty write: If we just return -ENOMEM here, | |
35 | * bio_insert() and bio_invalidate() will insert the keys created so far | |
36 | * and finish the rest when the keylist is empty. | |
37 | */ | |
38 | if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) | |
39 | return -ENOMEM; | |
40 | ||
41 | newsize = roundup_pow_of_two(newsize); | |
42 | ||
43 | if (newsize <= KEYLIST_INLINE || | |
44 | roundup_pow_of_two(oldsize) == newsize) | |
45 | return 0; | |
46 | ||
47 | new = krealloc(l->list == l->d ? NULL : l->list, | |
48 | sizeof(uint64_t) * newsize, GFP_NOIO); | |
49 | ||
50 | if (!new) | |
51 | return -ENOMEM; | |
52 | ||
53 | if (l->list == l->d) | |
54 | memcpy(new, l->list, sizeof(uint64_t) * KEYLIST_INLINE); | |
55 | ||
56 | l->list = new; | |
57 | l->top = (struct bkey *) (&l->list[oldsize]); | |
58 | ||
59 | return 0; | |
60 | } | |
61 | ||
62 | struct bkey *bch_keylist_pop(struct keylist *l) | |
63 | { | |
64 | struct bkey *k = l->bottom; | |
65 | ||
66 | if (k == l->top) | |
67 | return NULL; | |
68 | ||
69 | while (bkey_next(k) != l->top) | |
70 | k = bkey_next(k); | |
71 | ||
72 | return l->top = k; | |
73 | } | |
74 | ||
75 | /* Pointer validation */ | |
76 | ||
77 | bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k) | |
78 | { | |
79 | unsigned i; | |
80 | ||
81 | if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))) | |
82 | goto bad; | |
83 | ||
84 | if (!level && KEY_SIZE(k) > KEY_OFFSET(k)) | |
85 | goto bad; | |
86 | ||
87 | if (!KEY_SIZE(k)) | |
88 | return true; | |
89 | ||
90 | for (i = 0; i < KEY_PTRS(k); i++) | |
91 | if (ptr_available(c, k, i)) { | |
92 | struct cache *ca = PTR_CACHE(c, k, i); | |
93 | size_t bucket = PTR_BUCKET_NR(c, k, i); | |
94 | size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); | |
95 | ||
96 | if (KEY_SIZE(k) + r > c->sb.bucket_size || | |
97 | bucket < ca->sb.first_bucket || | |
98 | bucket >= ca->sb.nbuckets) | |
99 | goto bad; | |
100 | } | |
101 | ||
102 | return false; | |
103 | bad: | |
104 | cache_bug(c, "spotted bad key %s: %s", pkey(k), bch_ptr_status(c, k)); | |
105 | return true; | |
106 | } | |
107 | ||
108 | bool bch_ptr_bad(struct btree *b, const struct bkey *k) | |
109 | { | |
110 | struct bucket *g; | |
111 | unsigned i, stale; | |
112 | ||
113 | if (!bkey_cmp(k, &ZERO_KEY) || | |
114 | !KEY_PTRS(k) || | |
115 | bch_ptr_invalid(b, k)) | |
116 | return true; | |
117 | ||
118 | if (KEY_PTRS(k) && PTR_DEV(k, 0) == PTR_CHECK_DEV) | |
119 | return true; | |
120 | ||
121 | for (i = 0; i < KEY_PTRS(k); i++) | |
122 | if (ptr_available(b->c, k, i)) { | |
123 | g = PTR_BUCKET(b->c, k, i); | |
124 | stale = ptr_stale(b->c, k, i); | |
125 | ||
126 | btree_bug_on(stale > 96, b, | |
127 | "key too stale: %i, need_gc %u", | |
128 | stale, b->c->need_gc); | |
129 | ||
130 | btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k), | |
131 | b, "stale dirty pointer"); | |
132 | ||
133 | if (stale) | |
134 | return true; | |
135 | ||
136 | #ifdef CONFIG_BCACHE_EDEBUG | |
137 | if (!mutex_trylock(&b->c->bucket_lock)) | |
138 | continue; | |
139 | ||
140 | if (b->level) { | |
141 | if (KEY_DIRTY(k) || | |
142 | g->prio != BTREE_PRIO || | |
143 | (b->c->gc_mark_valid && | |
144 | GC_MARK(g) != GC_MARK_METADATA)) | |
145 | goto bug; | |
146 | ||
147 | } else { | |
148 | if (g->prio == BTREE_PRIO) | |
149 | goto bug; | |
150 | ||
151 | if (KEY_DIRTY(k) && | |
152 | b->c->gc_mark_valid && | |
153 | GC_MARK(g) != GC_MARK_DIRTY) | |
154 | goto bug; | |
155 | } | |
156 | mutex_unlock(&b->c->bucket_lock); | |
157 | #endif | |
158 | } | |
159 | ||
160 | return false; | |
161 | #ifdef CONFIG_BCACHE_EDEBUG | |
162 | bug: | |
163 | mutex_unlock(&b->c->bucket_lock); | |
164 | btree_bug(b, "inconsistent pointer %s: bucket %li pin %i " | |
165 | "prio %i gen %i last_gc %i mark %llu gc_gen %i", pkey(k), | |
166 | PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), | |
167 | g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); | |
168 | return true; | |
169 | #endif | |
170 | } | |
171 | ||
172 | /* Key/pointer manipulation */ | |
173 | ||
174 | void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src, | |
175 | unsigned i) | |
176 | { | |
177 | BUG_ON(i > KEY_PTRS(src)); | |
178 | ||
179 | /* Only copy the header, key, and one pointer. */ | |
180 | memcpy(dest, src, 2 * sizeof(uint64_t)); | |
181 | dest->ptr[0] = src->ptr[i]; | |
182 | SET_KEY_PTRS(dest, 1); | |
183 | /* We didn't copy the checksum so clear that bit. */ | |
184 | SET_KEY_CSUM(dest, 0); | |
185 | } | |
186 | ||
187 | bool __bch_cut_front(const struct bkey *where, struct bkey *k) | |
188 | { | |
189 | unsigned i, len = 0; | |
190 | ||
191 | if (bkey_cmp(where, &START_KEY(k)) <= 0) | |
192 | return false; | |
193 | ||
194 | if (bkey_cmp(where, k) < 0) | |
195 | len = KEY_OFFSET(k) - KEY_OFFSET(where); | |
196 | else | |
197 | bkey_copy_key(k, where); | |
198 | ||
199 | for (i = 0; i < KEY_PTRS(k); i++) | |
200 | SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len); | |
201 | ||
202 | BUG_ON(len > KEY_SIZE(k)); | |
203 | SET_KEY_SIZE(k, len); | |
204 | return true; | |
205 | } | |
206 | ||
207 | bool __bch_cut_back(const struct bkey *where, struct bkey *k) | |
208 | { | |
209 | unsigned len = 0; | |
210 | ||
211 | if (bkey_cmp(where, k) >= 0) | |
212 | return false; | |
213 | ||
214 | BUG_ON(KEY_INODE(where) != KEY_INODE(k)); | |
215 | ||
216 | if (bkey_cmp(where, &START_KEY(k)) > 0) | |
217 | len = KEY_OFFSET(where) - KEY_START(k); | |
218 | ||
219 | bkey_copy_key(k, where); | |
220 | ||
221 | BUG_ON(len > KEY_SIZE(k)); | |
222 | SET_KEY_SIZE(k, len); | |
223 | return true; | |
224 | } | |
225 | ||
226 | static uint64_t merge_chksums(struct bkey *l, struct bkey *r) | |
227 | { | |
228 | return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) & | |
229 | ~((uint64_t)1 << 63); | |
230 | } | |
231 | ||
232 | /* Tries to merge l and r: l should be lower than r | |
233 | * Returns true if we were able to merge. If we did merge, l will be the merged | |
234 | * key, r will be untouched. | |
235 | */ | |
236 | bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r) | |
237 | { | |
238 | unsigned i; | |
239 | ||
240 | if (key_merging_disabled(b->c)) | |
241 | return false; | |
242 | ||
243 | if (KEY_PTRS(l) != KEY_PTRS(r) || | |
244 | KEY_DIRTY(l) != KEY_DIRTY(r) || | |
245 | bkey_cmp(l, &START_KEY(r))) | |
246 | return false; | |
247 | ||
248 | for (i = 0; i < KEY_PTRS(l); i++) | |
249 | if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || | |
250 | PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) | |
251 | return false; | |
252 | ||
253 | /* Keys with no pointers aren't restricted to one bucket and could | |
254 | * overflow KEY_SIZE | |
255 | */ | |
256 | if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) { | |
257 | SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l)); | |
258 | SET_KEY_SIZE(l, USHRT_MAX); | |
259 | ||
260 | bch_cut_front(l, r); | |
261 | return false; | |
262 | } | |
263 | ||
264 | if (KEY_CSUM(l)) { | |
265 | if (KEY_CSUM(r)) | |
266 | l->ptr[KEY_PTRS(l)] = merge_chksums(l, r); | |
267 | else | |
268 | SET_KEY_CSUM(l, 0); | |
269 | } | |
270 | ||
271 | SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r)); | |
272 | SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r)); | |
273 | ||
274 | return true; | |
275 | } | |
276 | ||
277 | /* Binary tree stuff for auxiliary search trees */ | |
278 | ||
279 | static unsigned inorder_next(unsigned j, unsigned size) | |
280 | { | |
281 | if (j * 2 + 1 < size) { | |
282 | j = j * 2 + 1; | |
283 | ||
284 | while (j * 2 < size) | |
285 | j *= 2; | |
286 | } else | |
287 | j >>= ffz(j) + 1; | |
288 | ||
289 | return j; | |
290 | } | |
291 | ||
292 | static unsigned inorder_prev(unsigned j, unsigned size) | |
293 | { | |
294 | if (j * 2 < size) { | |
295 | j = j * 2; | |
296 | ||
297 | while (j * 2 + 1 < size) | |
298 | j = j * 2 + 1; | |
299 | } else | |
300 | j >>= ffs(j); | |
301 | ||
302 | return j; | |
303 | } | |
304 | ||
305 | /* I have no idea why this code works... and I'm the one who wrote it | |
306 | * | |
307 | * However, I do know what it does: | |
308 | * Given a binary tree constructed in an array (i.e. how you normally implement | |
309 | * a heap), it converts a node in the tree - referenced by array index - to the | |
310 | * index it would have if you did an inorder traversal. | |
311 | * | |
312 | * Also tested for every j, size up to size somewhere around 6 million. | |
313 | * | |
314 | * The binary tree starts at array index 1, not 0 | |
315 | * extra is a function of size: | |
316 | * extra = (size - rounddown_pow_of_two(size - 1)) << 1; | |
317 | */ | |
318 | static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra) | |
319 | { | |
320 | unsigned b = fls(j); | |
321 | unsigned shift = fls(size - 1) - b; | |
322 | ||
323 | j ^= 1U << (b - 1); | |
324 | j <<= 1; | |
325 | j |= 1; | |
326 | j <<= shift; | |
327 | ||
328 | if (j > extra) | |
329 | j -= (j - extra) >> 1; | |
330 | ||
331 | return j; | |
332 | } | |
333 | ||
334 | static unsigned to_inorder(unsigned j, struct bset_tree *t) | |
335 | { | |
336 | return __to_inorder(j, t->size, t->extra); | |
337 | } | |
338 | ||
339 | static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra) | |
340 | { | |
341 | unsigned shift; | |
342 | ||
343 | if (j > extra) | |
344 | j += j - extra; | |
345 | ||
346 | shift = ffs(j); | |
347 | ||
348 | j >>= shift; | |
349 | j |= roundup_pow_of_two(size) >> shift; | |
350 | ||
351 | return j; | |
352 | } | |
353 | ||
354 | static unsigned inorder_to_tree(unsigned j, struct bset_tree *t) | |
355 | { | |
356 | return __inorder_to_tree(j, t->size, t->extra); | |
357 | } | |
358 | ||
359 | #if 0 | |
360 | void inorder_test(void) | |
361 | { | |
362 | unsigned long done = 0; | |
363 | ktime_t start = ktime_get(); | |
364 | ||
365 | for (unsigned size = 2; | |
366 | size < 65536000; | |
367 | size++) { | |
368 | unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1; | |
369 | unsigned i = 1, j = rounddown_pow_of_two(size - 1); | |
370 | ||
371 | if (!(size % 4096)) | |
372 | printk(KERN_NOTICE "loop %u, %llu per us\n", size, | |
373 | done / ktime_us_delta(ktime_get(), start)); | |
374 | ||
375 | while (1) { | |
376 | if (__inorder_to_tree(i, size, extra) != j) | |
377 | panic("size %10u j %10u i %10u", size, j, i); | |
378 | ||
379 | if (__to_inorder(j, size, extra) != i) | |
380 | panic("size %10u j %10u i %10u", size, j, i); | |
381 | ||
382 | if (j == rounddown_pow_of_two(size) - 1) | |
383 | break; | |
384 | ||
385 | BUG_ON(inorder_prev(inorder_next(j, size), size) != j); | |
386 | ||
387 | j = inorder_next(j, size); | |
388 | i++; | |
389 | } | |
390 | ||
391 | done += size - 1; | |
392 | } | |
393 | } | |
394 | #endif | |
395 | ||
396 | /* | |
397 | * Cacheline/offset <-> bkey pointer arithmatic: | |
398 | * | |
399 | * t->tree is a binary search tree in an array; each node corresponds to a key | |
400 | * in one cacheline in t->set (BSET_CACHELINE bytes). | |
401 | * | |
402 | * This means we don't have to store the full index of the key that a node in | |
403 | * the binary tree points to; to_inorder() gives us the cacheline, and then | |
404 | * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes. | |
405 | * | |
406 | * cacheline_to_bkey() and friends abstract out all the pointer arithmatic to | |
407 | * make this work. | |
408 | * | |
409 | * To construct the bfloat for an arbitrary key we need to know what the key | |
410 | * immediately preceding it is: we have to check if the two keys differ in the | |
411 | * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size | |
412 | * of the previous key so we can walk backwards to it from t->tree[j]'s key. | |
413 | */ | |
414 | ||
415 | static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline, | |
416 | unsigned offset) | |
417 | { | |
418 | return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8; | |
419 | } | |
420 | ||
421 | static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k) | |
422 | { | |
423 | return ((void *) k - (void *) t->data) / BSET_CACHELINE; | |
424 | } | |
425 | ||
426 | static unsigned bkey_to_cacheline_offset(struct bkey *k) | |
427 | { | |
428 | return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t); | |
429 | } | |
430 | ||
431 | static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j) | |
432 | { | |
433 | return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m); | |
434 | } | |
435 | ||
436 | static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j) | |
437 | { | |
438 | return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]); | |
439 | } | |
440 | ||
441 | /* | |
442 | * For the write set - the one we're currently inserting keys into - we don't | |
443 | * maintain a full search tree, we just keep a simple lookup table in t->prev. | |
444 | */ | |
445 | static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline) | |
446 | { | |
447 | return cacheline_to_bkey(t, cacheline, t->prev[cacheline]); | |
448 | } | |
449 | ||
450 | static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift) | |
451 | { | |
452 | #ifdef CONFIG_X86_64 | |
453 | asm("shrd %[shift],%[high],%[low]" | |
454 | : [low] "+Rm" (low) | |
455 | : [high] "R" (high), | |
456 | [shift] "ci" (shift) | |
457 | : "cc"); | |
458 | #else | |
459 | low >>= shift; | |
460 | low |= (high << 1) << (63U - shift); | |
461 | #endif | |
462 | return low; | |
463 | } | |
464 | ||
465 | static inline unsigned bfloat_mantissa(const struct bkey *k, | |
466 | struct bkey_float *f) | |
467 | { | |
468 | const uint64_t *p = &k->low - (f->exponent >> 6); | |
469 | return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK; | |
470 | } | |
471 | ||
472 | static void make_bfloat(struct bset_tree *t, unsigned j) | |
473 | { | |
474 | struct bkey_float *f = &t->tree[j]; | |
475 | struct bkey *m = tree_to_bkey(t, j); | |
476 | struct bkey *p = tree_to_prev_bkey(t, j); | |
477 | ||
478 | struct bkey *l = is_power_of_2(j) | |
479 | ? t->data->start | |
480 | : tree_to_prev_bkey(t, j >> ffs(j)); | |
481 | ||
482 | struct bkey *r = is_power_of_2(j + 1) | |
483 | ? node(t->data, t->data->keys - bkey_u64s(&t->end)) | |
484 | : tree_to_bkey(t, j >> (ffz(j) + 1)); | |
485 | ||
486 | BUG_ON(m < l || m > r); | |
487 | BUG_ON(bkey_next(p) != m); | |
488 | ||
489 | if (KEY_INODE(l) != KEY_INODE(r)) | |
490 | f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64; | |
491 | else | |
492 | f->exponent = fls64(r->low ^ l->low); | |
493 | ||
494 | f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0); | |
495 | ||
496 | /* | |
497 | * Setting f->exponent = 127 flags this node as failed, and causes the | |
498 | * lookup code to fall back to comparing against the original key. | |
499 | */ | |
500 | ||
501 | if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f)) | |
502 | f->mantissa = bfloat_mantissa(m, f) - 1; | |
503 | else | |
504 | f->exponent = 127; | |
505 | } | |
506 | ||
507 | static void bset_alloc_tree(struct btree *b, struct bset_tree *t) | |
508 | { | |
509 | if (t != b->sets) { | |
510 | unsigned j = roundup(t[-1].size, | |
511 | 64 / sizeof(struct bkey_float)); | |
512 | ||
513 | t->tree = t[-1].tree + j; | |
514 | t->prev = t[-1].prev + j; | |
515 | } | |
516 | ||
517 | while (t < b->sets + MAX_BSETS) | |
518 | t++->size = 0; | |
519 | } | |
520 | ||
521 | static void bset_build_unwritten_tree(struct btree *b) | |
522 | { | |
523 | struct bset_tree *t = b->sets + b->nsets; | |
524 | ||
525 | bset_alloc_tree(b, t); | |
526 | ||
527 | if (t->tree != b->sets->tree + bset_tree_space(b)) { | |
528 | t->prev[0] = bkey_to_cacheline_offset(t->data->start); | |
529 | t->size = 1; | |
530 | } | |
531 | } | |
532 | ||
533 | static void bset_build_written_tree(struct btree *b) | |
534 | { | |
535 | struct bset_tree *t = b->sets + b->nsets; | |
536 | struct bkey *k = t->data->start; | |
537 | unsigned j, cacheline = 1; | |
538 | ||
539 | bset_alloc_tree(b, t); | |
540 | ||
541 | t->size = min_t(unsigned, | |
542 | bkey_to_cacheline(t, end(t->data)), | |
543 | b->sets->tree + bset_tree_space(b) - t->tree); | |
544 | ||
545 | if (t->size < 2) { | |
546 | t->size = 0; | |
547 | return; | |
548 | } | |
549 | ||
550 | t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1; | |
551 | ||
552 | /* First we figure out where the first key in each cacheline is */ | |
553 | for (j = inorder_next(0, t->size); | |
554 | j; | |
555 | j = inorder_next(j, t->size)) { | |
556 | while (bkey_to_cacheline(t, k) != cacheline) | |
557 | k = bkey_next(k); | |
558 | ||
559 | t->prev[j] = bkey_u64s(k); | |
560 | k = bkey_next(k); | |
561 | cacheline++; | |
562 | t->tree[j].m = bkey_to_cacheline_offset(k); | |
563 | } | |
564 | ||
565 | while (bkey_next(k) != end(t->data)) | |
566 | k = bkey_next(k); | |
567 | ||
568 | t->end = *k; | |
569 | ||
570 | /* Then we build the tree */ | |
571 | for (j = inorder_next(0, t->size); | |
572 | j; | |
573 | j = inorder_next(j, t->size)) | |
574 | make_bfloat(t, j); | |
575 | } | |
576 | ||
577 | void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k) | |
578 | { | |
579 | struct bset_tree *t; | |
580 | unsigned inorder, j = 1; | |
581 | ||
582 | for (t = b->sets; t <= &b->sets[b->nsets]; t++) | |
583 | if (k < end(t->data)) | |
584 | goto found_set; | |
585 | ||
586 | BUG(); | |
587 | found_set: | |
588 | if (!t->size || !bset_written(b, t)) | |
589 | return; | |
590 | ||
591 | inorder = bkey_to_cacheline(t, k); | |
592 | ||
593 | if (k == t->data->start) | |
594 | goto fix_left; | |
595 | ||
596 | if (bkey_next(k) == end(t->data)) { | |
597 | t->end = *k; | |
598 | goto fix_right; | |
599 | } | |
600 | ||
601 | j = inorder_to_tree(inorder, t); | |
602 | ||
603 | if (j && | |
604 | j < t->size && | |
605 | k == tree_to_bkey(t, j)) | |
606 | fix_left: do { | |
607 | make_bfloat(t, j); | |
608 | j = j * 2; | |
609 | } while (j < t->size); | |
610 | ||
611 | j = inorder_to_tree(inorder + 1, t); | |
612 | ||
613 | if (j && | |
614 | j < t->size && | |
615 | k == tree_to_prev_bkey(t, j)) | |
616 | fix_right: do { | |
617 | make_bfloat(t, j); | |
618 | j = j * 2 + 1; | |
619 | } while (j < t->size); | |
620 | } | |
621 | ||
622 | void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k) | |
623 | { | |
624 | struct bset_tree *t = &b->sets[b->nsets]; | |
625 | unsigned shift = bkey_u64s(k); | |
626 | unsigned j = bkey_to_cacheline(t, k); | |
627 | ||
628 | /* We're getting called from btree_split() or btree_gc, just bail out */ | |
629 | if (!t->size) | |
630 | return; | |
631 | ||
632 | /* k is the key we just inserted; we need to find the entry in the | |
633 | * lookup table for the first key that is strictly greater than k: | |
634 | * it's either k's cacheline or the next one | |
635 | */ | |
636 | if (j < t->size && | |
637 | table_to_bkey(t, j) <= k) | |
638 | j++; | |
639 | ||
640 | /* Adjust all the lookup table entries, and find a new key for any that | |
641 | * have gotten too big | |
642 | */ | |
643 | for (; j < t->size; j++) { | |
644 | t->prev[j] += shift; | |
645 | ||
646 | if (t->prev[j] > 7) { | |
647 | k = table_to_bkey(t, j - 1); | |
648 | ||
649 | while (k < cacheline_to_bkey(t, j, 0)) | |
650 | k = bkey_next(k); | |
651 | ||
652 | t->prev[j] = bkey_to_cacheline_offset(k); | |
653 | } | |
654 | } | |
655 | ||
656 | if (t->size == b->sets->tree + bset_tree_space(b) - t->tree) | |
657 | return; | |
658 | ||
659 | /* Possibly add a new entry to the end of the lookup table */ | |
660 | ||
661 | for (k = table_to_bkey(t, t->size - 1); | |
662 | k != end(t->data); | |
663 | k = bkey_next(k)) | |
664 | if (t->size == bkey_to_cacheline(t, k)) { | |
665 | t->prev[t->size] = bkey_to_cacheline_offset(k); | |
666 | t->size++; | |
667 | } | |
668 | } | |
669 | ||
670 | void bch_bset_init_next(struct btree *b) | |
671 | { | |
672 | struct bset *i = write_block(b); | |
673 | ||
674 | if (i != b->sets[0].data) { | |
675 | b->sets[++b->nsets].data = i; | |
676 | i->seq = b->sets[0].data->seq; | |
677 | } else | |
678 | get_random_bytes(&i->seq, sizeof(uint64_t)); | |
679 | ||
680 | i->magic = bset_magic(b->c); | |
681 | i->version = 0; | |
682 | i->keys = 0; | |
683 | ||
684 | bset_build_unwritten_tree(b); | |
685 | } | |
686 | ||
687 | struct bset_search_iter { | |
688 | struct bkey *l, *r; | |
689 | }; | |
690 | ||
691 | static struct bset_search_iter bset_search_write_set(struct btree *b, | |
692 | struct bset_tree *t, | |
693 | const struct bkey *search) | |
694 | { | |
695 | unsigned li = 0, ri = t->size; | |
696 | ||
697 | BUG_ON(!b->nsets && | |
698 | t->size < bkey_to_cacheline(t, end(t->data))); | |
699 | ||
700 | while (li + 1 != ri) { | |
701 | unsigned m = (li + ri) >> 1; | |
702 | ||
703 | if (bkey_cmp(table_to_bkey(t, m), search) > 0) | |
704 | ri = m; | |
705 | else | |
706 | li = m; | |
707 | } | |
708 | ||
709 | return (struct bset_search_iter) { | |
710 | table_to_bkey(t, li), | |
711 | ri < t->size ? table_to_bkey(t, ri) : end(t->data) | |
712 | }; | |
713 | } | |
714 | ||
715 | static struct bset_search_iter bset_search_tree(struct btree *b, | |
716 | struct bset_tree *t, | |
717 | const struct bkey *search) | |
718 | { | |
719 | struct bkey *l, *r; | |
720 | struct bkey_float *f; | |
721 | unsigned inorder, j, n = 1; | |
722 | ||
723 | do { | |
724 | unsigned p = n << 4; | |
725 | p &= ((int) (p - t->size)) >> 31; | |
726 | ||
727 | prefetch(&t->tree[p]); | |
728 | ||
729 | j = n; | |
730 | f = &t->tree[j]; | |
731 | ||
732 | /* | |
733 | * n = (f->mantissa > bfloat_mantissa()) | |
734 | * ? j * 2 | |
735 | * : j * 2 + 1; | |
736 | * | |
737 | * We need to subtract 1 from f->mantissa for the sign bit trick | |
738 | * to work - that's done in make_bfloat() | |
739 | */ | |
740 | if (likely(f->exponent != 127)) | |
741 | n = j * 2 + (((unsigned) | |
742 | (f->mantissa - | |
743 | bfloat_mantissa(search, f))) >> 31); | |
744 | else | |
745 | n = (bkey_cmp(tree_to_bkey(t, j), search) > 0) | |
746 | ? j * 2 | |
747 | : j * 2 + 1; | |
748 | } while (n < t->size); | |
749 | ||
750 | inorder = to_inorder(j, t); | |
751 | ||
752 | /* | |
753 | * n would have been the node we recursed to - the low bit tells us if | |
754 | * we recursed left or recursed right. | |
755 | */ | |
756 | if (n & 1) { | |
757 | l = cacheline_to_bkey(t, inorder, f->m); | |
758 | ||
759 | if (++inorder != t->size) { | |
760 | f = &t->tree[inorder_next(j, t->size)]; | |
761 | r = cacheline_to_bkey(t, inorder, f->m); | |
762 | } else | |
763 | r = end(t->data); | |
764 | } else { | |
765 | r = cacheline_to_bkey(t, inorder, f->m); | |
766 | ||
767 | if (--inorder) { | |
768 | f = &t->tree[inorder_prev(j, t->size)]; | |
769 | l = cacheline_to_bkey(t, inorder, f->m); | |
770 | } else | |
771 | l = t->data->start; | |
772 | } | |
773 | ||
774 | return (struct bset_search_iter) {l, r}; | |
775 | } | |
776 | ||
777 | struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t, | |
778 | const struct bkey *search) | |
779 | { | |
780 | struct bset_search_iter i; | |
781 | ||
782 | /* | |
783 | * First, we search for a cacheline, then lastly we do a linear search | |
784 | * within that cacheline. | |
785 | * | |
786 | * To search for the cacheline, there's three different possibilities: | |
787 | * * The set is too small to have a search tree, so we just do a linear | |
788 | * search over the whole set. | |
789 | * * The set is the one we're currently inserting into; keeping a full | |
790 | * auxiliary search tree up to date would be too expensive, so we | |
791 | * use a much simpler lookup table to do a binary search - | |
792 | * bset_search_write_set(). | |
793 | * * Or we use the auxiliary search tree we constructed earlier - | |
794 | * bset_search_tree() | |
795 | */ | |
796 | ||
797 | if (unlikely(!t->size)) { | |
798 | i.l = t->data->start; | |
799 | i.r = end(t->data); | |
800 | } else if (bset_written(b, t)) { | |
801 | /* | |
802 | * Each node in the auxiliary search tree covers a certain range | |
803 | * of bits, and keys above and below the set it covers might | |
804 | * differ outside those bits - so we have to special case the | |
805 | * start and end - handle that here: | |
806 | */ | |
807 | ||
808 | if (unlikely(bkey_cmp(search, &t->end) >= 0)) | |
809 | return end(t->data); | |
810 | ||
811 | if (unlikely(bkey_cmp(search, t->data->start) < 0)) | |
812 | return t->data->start; | |
813 | ||
814 | i = bset_search_tree(b, t, search); | |
815 | } else | |
816 | i = bset_search_write_set(b, t, search); | |
817 | ||
818 | #ifdef CONFIG_BCACHE_EDEBUG | |
819 | BUG_ON(bset_written(b, t) && | |
820 | i.l != t->data->start && | |
821 | bkey_cmp(tree_to_prev_bkey(t, | |
822 | inorder_to_tree(bkey_to_cacheline(t, i.l), t)), | |
823 | search) > 0); | |
824 | ||
825 | BUG_ON(i.r != end(t->data) && | |
826 | bkey_cmp(i.r, search) <= 0); | |
827 | #endif | |
828 | ||
829 | while (likely(i.l != i.r) && | |
830 | bkey_cmp(i.l, search) <= 0) | |
831 | i.l = bkey_next(i.l); | |
832 | ||
833 | return i.l; | |
834 | } | |
835 | ||
836 | /* Btree iterator */ | |
837 | ||
838 | static inline bool btree_iter_cmp(struct btree_iter_set l, | |
839 | struct btree_iter_set r) | |
840 | { | |
841 | int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k)); | |
842 | ||
843 | return c ? c > 0 : l.k < r.k; | |
844 | } | |
845 | ||
846 | static inline bool btree_iter_end(struct btree_iter *iter) | |
847 | { | |
848 | return !iter->used; | |
849 | } | |
850 | ||
851 | void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, | |
852 | struct bkey *end) | |
853 | { | |
854 | if (k != end) | |
855 | BUG_ON(!heap_add(iter, | |
856 | ((struct btree_iter_set) { k, end }), | |
857 | btree_iter_cmp)); | |
858 | } | |
859 | ||
860 | struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter, | |
861 | struct bkey *search, struct bset_tree *start) | |
862 | { | |
863 | struct bkey *ret = NULL; | |
864 | iter->size = ARRAY_SIZE(iter->data); | |
865 | iter->used = 0; | |
866 | ||
867 | for (; start <= &b->sets[b->nsets]; start++) { | |
868 | ret = bch_bset_search(b, start, search); | |
869 | bch_btree_iter_push(iter, ret, end(start->data)); | |
870 | } | |
871 | ||
872 | return ret; | |
873 | } | |
874 | ||
875 | struct bkey *bch_btree_iter_next(struct btree_iter *iter) | |
876 | { | |
877 | struct btree_iter_set unused; | |
878 | struct bkey *ret = NULL; | |
879 | ||
880 | if (!btree_iter_end(iter)) { | |
881 | ret = iter->data->k; | |
882 | iter->data->k = bkey_next(iter->data->k); | |
883 | ||
884 | if (iter->data->k > iter->data->end) { | |
885 | __WARN(); | |
886 | iter->data->k = iter->data->end; | |
887 | } | |
888 | ||
889 | if (iter->data->k == iter->data->end) | |
890 | heap_pop(iter, unused, btree_iter_cmp); | |
891 | else | |
892 | heap_sift(iter, 0, btree_iter_cmp); | |
893 | } | |
894 | ||
895 | return ret; | |
896 | } | |
897 | ||
898 | struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, | |
899 | struct btree *b, ptr_filter_fn fn) | |
900 | { | |
901 | struct bkey *ret; | |
902 | ||
903 | do { | |
904 | ret = bch_btree_iter_next(iter); | |
905 | } while (ret && fn(b, ret)); | |
906 | ||
907 | return ret; | |
908 | } | |
909 | ||
910 | struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search) | |
911 | { | |
912 | struct btree_iter iter; | |
913 | ||
914 | bch_btree_iter_init(b, &iter, search); | |
915 | return bch_btree_iter_next_filter(&iter, b, bch_ptr_bad); | |
916 | } | |
917 | ||
918 | /* Mergesort */ | |
919 | ||
920 | static void btree_sort_fixup(struct btree_iter *iter) | |
921 | { | |
922 | while (iter->used > 1) { | |
923 | struct btree_iter_set *top = iter->data, *i = top + 1; | |
924 | struct bkey *k; | |
925 | ||
926 | if (iter->used > 2 && | |
927 | btree_iter_cmp(i[0], i[1])) | |
928 | i++; | |
929 | ||
930 | for (k = i->k; | |
931 | k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0; | |
932 | k = bkey_next(k)) | |
933 | if (top->k > i->k) | |
934 | __bch_cut_front(top->k, k); | |
935 | else if (KEY_SIZE(k)) | |
936 | bch_cut_back(&START_KEY(k), top->k); | |
937 | ||
938 | if (top->k < i->k || k == i->k) | |
939 | break; | |
940 | ||
941 | heap_sift(iter, i - top, btree_iter_cmp); | |
942 | } | |
943 | } | |
944 | ||
945 | static void btree_mergesort(struct btree *b, struct bset *out, | |
946 | struct btree_iter *iter, | |
947 | bool fixup, bool remove_stale) | |
948 | { | |
949 | struct bkey *k, *last = NULL; | |
950 | bool (*bad)(struct btree *, const struct bkey *) = remove_stale | |
951 | ? bch_ptr_bad | |
952 | : bch_ptr_invalid; | |
953 | ||
954 | while (!btree_iter_end(iter)) { | |
955 | if (fixup && !b->level) | |
956 | btree_sort_fixup(iter); | |
957 | ||
958 | k = bch_btree_iter_next(iter); | |
959 | if (bad(b, k)) | |
960 | continue; | |
961 | ||
962 | if (!last) { | |
963 | last = out->start; | |
964 | bkey_copy(last, k); | |
965 | } else if (b->level || | |
966 | !bch_bkey_try_merge(b, last, k)) { | |
967 | last = bkey_next(last); | |
968 | bkey_copy(last, k); | |
969 | } | |
970 | } | |
971 | ||
972 | out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0; | |
973 | ||
974 | pr_debug("sorted %i keys", out->keys); | |
975 | bch_check_key_order(b, out); | |
976 | } | |
977 | ||
978 | static void __btree_sort(struct btree *b, struct btree_iter *iter, | |
979 | unsigned start, unsigned order, bool fixup) | |
980 | { | |
981 | uint64_t start_time; | |
982 | bool remove_stale = !b->written; | |
983 | struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO, | |
984 | order); | |
985 | if (!out) { | |
986 | mutex_lock(&b->c->sort_lock); | |
987 | out = b->c->sort; | |
988 | order = ilog2(bucket_pages(b->c)); | |
989 | } | |
990 | ||
991 | start_time = local_clock(); | |
992 | ||
993 | btree_mergesort(b, out, iter, fixup, remove_stale); | |
994 | b->nsets = start; | |
995 | ||
996 | if (!fixup && !start && b->written) | |
997 | bch_btree_verify(b, out); | |
998 | ||
999 | if (!start && order == b->page_order) { | |
1000 | /* | |
1001 | * Our temporary buffer is the same size as the btree node's | |
1002 | * buffer, we can just swap buffers instead of doing a big | |
1003 | * memcpy() | |
1004 | */ | |
1005 | ||
1006 | out->magic = bset_magic(b->c); | |
1007 | out->seq = b->sets[0].data->seq; | |
1008 | out->version = b->sets[0].data->version; | |
1009 | swap(out, b->sets[0].data); | |
1010 | ||
1011 | if (b->c->sort == b->sets[0].data) | |
1012 | b->c->sort = out; | |
1013 | } else { | |
1014 | b->sets[start].data->keys = out->keys; | |
1015 | memcpy(b->sets[start].data->start, out->start, | |
1016 | (void *) end(out) - (void *) out->start); | |
1017 | } | |
1018 | ||
1019 | if (out == b->c->sort) | |
1020 | mutex_unlock(&b->c->sort_lock); | |
1021 | else | |
1022 | free_pages((unsigned long) out, order); | |
1023 | ||
1024 | if (b->written) | |
1025 | bset_build_written_tree(b); | |
1026 | ||
1027 | if (!start) { | |
1028 | spin_lock(&b->c->sort_time_lock); | |
1029 | time_stats_update(&b->c->sort_time, start_time); | |
1030 | spin_unlock(&b->c->sort_time_lock); | |
1031 | } | |
1032 | } | |
1033 | ||
1034 | void bch_btree_sort_partial(struct btree *b, unsigned start) | |
1035 | { | |
1036 | size_t oldsize = 0, order = b->page_order, keys = 0; | |
1037 | struct btree_iter iter; | |
1038 | __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]); | |
1039 | ||
1040 | BUG_ON(b->sets[b->nsets].data == write_block(b) && | |
1041 | (b->sets[b->nsets].size || b->nsets)); | |
1042 | ||
1043 | if (b->written) | |
1044 | oldsize = bch_count_data(b); | |
1045 | ||
1046 | if (start) { | |
1047 | unsigned i; | |
1048 | ||
1049 | for (i = start; i <= b->nsets; i++) | |
1050 | keys += b->sets[i].data->keys; | |
1051 | ||
1052 | order = roundup_pow_of_two(__set_bytes(b->sets->data, keys)) / PAGE_SIZE; | |
1053 | if (order) | |
1054 | order = ilog2(order); | |
1055 | } | |
1056 | ||
1057 | __btree_sort(b, &iter, start, order, false); | |
1058 | ||
1059 | EBUG_ON(b->written && bch_count_data(b) != oldsize); | |
1060 | } | |
1061 | ||
1062 | void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter) | |
1063 | { | |
1064 | BUG_ON(!b->written); | |
1065 | __btree_sort(b, iter, 0, b->page_order, true); | |
1066 | } | |
1067 | ||
1068 | void bch_btree_sort_into(struct btree *b, struct btree *new) | |
1069 | { | |
1070 | uint64_t start_time = local_clock(); | |
1071 | ||
1072 | struct btree_iter iter; | |
1073 | bch_btree_iter_init(b, &iter, NULL); | |
1074 | ||
1075 | btree_mergesort(b, new->sets->data, &iter, false, true); | |
1076 | ||
1077 | spin_lock(&b->c->sort_time_lock); | |
1078 | time_stats_update(&b->c->sort_time, start_time); | |
1079 | spin_unlock(&b->c->sort_time_lock); | |
1080 | ||
1081 | bkey_copy_key(&new->key, &b->key); | |
1082 | new->sets->size = 0; | |
1083 | } | |
1084 | ||
1085 | void bch_btree_sort_lazy(struct btree *b) | |
1086 | { | |
1087 | if (b->nsets) { | |
1088 | unsigned i, j, keys = 0, total; | |
1089 | ||
1090 | for (i = 0; i <= b->nsets; i++) | |
1091 | keys += b->sets[i].data->keys; | |
1092 | ||
1093 | total = keys; | |
1094 | ||
1095 | for (j = 0; j < b->nsets; j++) { | |
1096 | if (keys * 2 < total || | |
1097 | keys < 1000) { | |
1098 | bch_btree_sort_partial(b, j); | |
1099 | return; | |
1100 | } | |
1101 | ||
1102 | keys -= b->sets[j].data->keys; | |
1103 | } | |
1104 | ||
1105 | /* Must sort if b->nsets == 3 or we'll overflow */ | |
1106 | if (b->nsets >= (MAX_BSETS - 1) - b->level) { | |
1107 | bch_btree_sort(b); | |
1108 | return; | |
1109 | } | |
1110 | } | |
1111 | ||
1112 | bset_build_written_tree(b); | |
1113 | } | |
1114 | ||
1115 | /* Sysfs stuff */ | |
1116 | ||
1117 | struct bset_stats { | |
1118 | size_t nodes; | |
1119 | size_t sets_written, sets_unwritten; | |
1120 | size_t bytes_written, bytes_unwritten; | |
1121 | size_t floats, failed; | |
1122 | }; | |
1123 | ||
1124 | static int bch_btree_bset_stats(struct btree *b, struct btree_op *op, | |
1125 | struct bset_stats *stats) | |
1126 | { | |
1127 | struct bkey *k; | |
1128 | unsigned i; | |
1129 | ||
1130 | stats->nodes++; | |
1131 | ||
1132 | for (i = 0; i <= b->nsets; i++) { | |
1133 | struct bset_tree *t = &b->sets[i]; | |
1134 | size_t bytes = t->data->keys * sizeof(uint64_t); | |
1135 | size_t j; | |
1136 | ||
1137 | if (bset_written(b, t)) { | |
1138 | stats->sets_written++; | |
1139 | stats->bytes_written += bytes; | |
1140 | ||
1141 | stats->floats += t->size - 1; | |
1142 | ||
1143 | for (j = 1; j < t->size; j++) | |
1144 | if (t->tree[j].exponent == 127) | |
1145 | stats->failed++; | |
1146 | } else { | |
1147 | stats->sets_unwritten++; | |
1148 | stats->bytes_unwritten += bytes; | |
1149 | } | |
1150 | } | |
1151 | ||
1152 | if (b->level) { | |
1153 | struct btree_iter iter; | |
1154 | ||
1155 | for_each_key_filter(b, k, &iter, bch_ptr_bad) { | |
1156 | int ret = btree(bset_stats, k, b, op, stats); | |
1157 | if (ret) | |
1158 | return ret; | |
1159 | } | |
1160 | } | |
1161 | ||
1162 | return 0; | |
1163 | } | |
1164 | ||
1165 | int bch_bset_print_stats(struct cache_set *c, char *buf) | |
1166 | { | |
1167 | struct btree_op op; | |
1168 | struct bset_stats t; | |
1169 | int ret; | |
1170 | ||
1171 | bch_btree_op_init_stack(&op); | |
1172 | memset(&t, 0, sizeof(struct bset_stats)); | |
1173 | ||
1174 | ret = btree_root(bset_stats, c, &op, &t); | |
1175 | if (ret) | |
1176 | return ret; | |
1177 | ||
1178 | return snprintf(buf, PAGE_SIZE, | |
1179 | "btree nodes: %zu\n" | |
1180 | "written sets: %zu\n" | |
1181 | "unwritten sets: %zu\n" | |
1182 | "written key bytes: %zu\n" | |
1183 | "unwritten key bytes: %zu\n" | |
1184 | "floats: %zu\n" | |
1185 | "failed: %zu\n", | |
1186 | t.nodes, | |
1187 | t.sets_written, t.sets_unwritten, | |
1188 | t.bytes_written, t.bytes_unwritten, | |
1189 | t.floats, t.failed); | |
1190 | } |