Commit | Line | Data |
---|---|---|
65d45231 KO |
1 | /* |
2 | * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> | |
3 | * | |
4 | * Uses a block device as cache for other block devices; optimized for SSDs. | |
5 | * All allocation is done in buckets, which should match the erase block size | |
6 | * of the device. | |
7 | * | |
8 | * Buckets containing cached data are kept on a heap sorted by priority; | |
9 | * bucket priority is increased on cache hit, and periodically all the buckets | |
10 | * on the heap have their priority scaled down. This currently is just used as | |
11 | * an LRU but in the future should allow for more intelligent heuristics. | |
12 | * | |
13 | * Buckets have an 8 bit counter; freeing is accomplished by incrementing the | |
14 | * counter. Garbage collection is used to remove stale pointers. | |
15 | * | |
16 | * Indexing is done via a btree; nodes are not necessarily fully sorted, rather | |
17 | * as keys are inserted we only sort the pages that have not yet been written. | |
18 | * When garbage collection is run, we resort the entire node. | |
19 | * | |
20 | * All configuration is done via sysfs; see Documentation/bcache.txt. | |
21 | */ | |
22 | ||
23 | #include "bcache.h" | |
24 | #include "btree.h" | |
25 | #include "debug.h" | |
26 | #include "extents.h" | |
27 | #include "writeback.h" | |
28 | ||
29 | static void sort_key_next(struct btree_iter *iter, | |
30 | struct btree_iter_set *i) | |
31 | { | |
32 | i->k = bkey_next(i->k); | |
33 | ||
34 | if (i->k == i->end) | |
35 | *i = iter->data[--iter->used]; | |
36 | } | |
37 | ||
38 | static bool bch_key_sort_cmp(struct btree_iter_set l, | |
39 | struct btree_iter_set r) | |
40 | { | |
41 | int64_t c = bkey_cmp(l.k, r.k); | |
42 | ||
43 | return c ? c > 0 : l.k < r.k; | |
44 | } | |
45 | ||
46 | static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) | |
47 | { | |
48 | unsigned i; | |
49 | ||
50 | for (i = 0; i < KEY_PTRS(k); i++) | |
51 | if (ptr_available(c, k, i)) { | |
52 | struct cache *ca = PTR_CACHE(c, k, i); | |
53 | size_t bucket = PTR_BUCKET_NR(c, k, i); | |
54 | size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); | |
55 | ||
56 | if (KEY_SIZE(k) + r > c->sb.bucket_size || | |
57 | bucket < ca->sb.first_bucket || | |
58 | bucket >= ca->sb.nbuckets) | |
59 | return true; | |
60 | } | |
61 | ||
62 | return false; | |
63 | } | |
64 | ||
dc9d98d6 KO |
65 | /* Common among btree and extent ptrs */ |
66 | ||
67 | static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) | |
68 | { | |
69 | unsigned i; | |
70 | ||
71 | for (i = 0; i < KEY_PTRS(k); i++) | |
72 | if (ptr_available(c, k, i)) { | |
73 | struct cache *ca = PTR_CACHE(c, k, i); | |
74 | size_t bucket = PTR_BUCKET_NR(c, k, i); | |
75 | size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); | |
76 | ||
77 | if (KEY_SIZE(k) + r > c->sb.bucket_size) | |
78 | return "bad, length too big"; | |
79 | if (bucket < ca->sb.first_bucket) | |
80 | return "bad, short offset"; | |
81 | if (bucket >= ca->sb.nbuckets) | |
82 | return "bad, offset past end of device"; | |
83 | if (ptr_stale(c, k, i)) | |
84 | return "stale"; | |
85 | } | |
86 | ||
87 | if (!bkey_cmp(k, &ZERO_KEY)) | |
88 | return "bad, null key"; | |
89 | if (!KEY_PTRS(k)) | |
90 | return "bad, no pointers"; | |
91 | if (!KEY_SIZE(k)) | |
92 | return "zeroed key"; | |
93 | return ""; | |
94 | } | |
95 | ||
96 | void bch_extent_to_text(char *buf, size_t size, const struct bkey *k) | |
97 | { | |
98 | unsigned i = 0; | |
99 | char *out = buf, *end = buf + size; | |
100 | ||
101 | #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__)) | |
102 | ||
103 | p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k)); | |
104 | ||
105 | for (i = 0; i < KEY_PTRS(k); i++) { | |
106 | if (i) | |
107 | p(", "); | |
108 | ||
109 | if (PTR_DEV(k, i) == PTR_CHECK_DEV) | |
110 | p("check dev"); | |
111 | else | |
112 | p("%llu:%llu gen %llu", PTR_DEV(k, i), | |
113 | PTR_OFFSET(k, i), PTR_GEN(k, i)); | |
114 | } | |
115 | ||
116 | p("]"); | |
117 | ||
118 | if (KEY_DIRTY(k)) | |
119 | p(" dirty"); | |
120 | if (KEY_CSUM(k)) | |
121 | p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); | |
122 | #undef p | |
123 | } | |
124 | ||
125 | static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k) | |
126 | { | |
127 | struct btree *b = container_of(keys, struct btree, keys); | |
128 | unsigned j; | |
129 | char buf[80]; | |
130 | ||
131 | bch_extent_to_text(buf, sizeof(buf), k); | |
132 | printk(" %s", buf); | |
133 | ||
134 | for (j = 0; j < KEY_PTRS(k); j++) { | |
135 | size_t n = PTR_BUCKET_NR(b->c, k, j); | |
136 | printk(" bucket %zu", n); | |
137 | ||
138 | if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets) | |
139 | printk(" prio %i", | |
140 | PTR_BUCKET(b->c, k, j)->prio); | |
141 | } | |
142 | ||
143 | printk(" %s\n", bch_ptr_status(b->c, k)); | |
144 | } | |
145 | ||
65d45231 KO |
146 | /* Btree ptrs */ |
147 | ||
148 | bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k) | |
149 | { | |
150 | char buf[80]; | |
151 | ||
152 | if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)) | |
153 | goto bad; | |
154 | ||
155 | if (__ptr_invalid(c, k)) | |
156 | goto bad; | |
157 | ||
158 | return false; | |
159 | bad: | |
dc9d98d6 | 160 | bch_extent_to_text(buf, sizeof(buf), k); |
65d45231 KO |
161 | cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k)); |
162 | return true; | |
163 | } | |
164 | ||
a85e968e | 165 | static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k) |
65d45231 | 166 | { |
a85e968e | 167 | struct btree *b = container_of(bk, struct btree, keys); |
65d45231 KO |
168 | return __bch_btree_ptr_invalid(b->c, k); |
169 | } | |
170 | ||
171 | static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) | |
172 | { | |
173 | unsigned i; | |
174 | char buf[80]; | |
175 | struct bucket *g; | |
176 | ||
177 | if (mutex_trylock(&b->c->bucket_lock)) { | |
178 | for (i = 0; i < KEY_PTRS(k); i++) | |
179 | if (ptr_available(b->c, k, i)) { | |
180 | g = PTR_BUCKET(b->c, k, i); | |
181 | ||
182 | if (KEY_DIRTY(k) || | |
183 | g->prio != BTREE_PRIO || | |
184 | (b->c->gc_mark_valid && | |
185 | GC_MARK(g) != GC_MARK_METADATA)) | |
186 | goto err; | |
187 | } | |
188 | ||
189 | mutex_unlock(&b->c->bucket_lock); | |
190 | } | |
191 | ||
192 | return false; | |
193 | err: | |
194 | mutex_unlock(&b->c->bucket_lock); | |
dc9d98d6 | 195 | bch_extent_to_text(buf, sizeof(buf), k); |
65d45231 | 196 | btree_bug(b, |
3a2fd9d5 | 197 | "inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu", |
65d45231 | 198 | buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), |
3a2fd9d5 | 199 | g->prio, g->gen, g->last_gc, GC_MARK(g)); |
65d45231 KO |
200 | return true; |
201 | } | |
202 | ||
a85e968e | 203 | static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k) |
65d45231 | 204 | { |
a85e968e | 205 | struct btree *b = container_of(bk, struct btree, keys); |
65d45231 KO |
206 | unsigned i; |
207 | ||
208 | if (!bkey_cmp(k, &ZERO_KEY) || | |
209 | !KEY_PTRS(k) || | |
a85e968e | 210 | bch_ptr_invalid(bk, k)) |
65d45231 KO |
211 | return true; |
212 | ||
213 | for (i = 0; i < KEY_PTRS(k); i++) | |
214 | if (!ptr_available(b->c, k, i) || | |
215 | ptr_stale(b->c, k, i)) | |
216 | return true; | |
217 | ||
218 | if (expensive_debug_checks(b->c) && | |
219 | btree_ptr_bad_expensive(b, k)) | |
220 | return true; | |
221 | ||
222 | return false; | |
223 | } | |
224 | ||
829a60b9 KO |
225 | static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk, |
226 | struct bkey *insert, | |
227 | struct btree_iter *iter, | |
228 | struct bkey *replace_key) | |
229 | { | |
230 | struct btree *b = container_of(bk, struct btree, keys); | |
231 | ||
232 | if (!KEY_OFFSET(insert)) | |
233 | btree_current_write(b)->prio_blocked++; | |
234 | ||
235 | return false; | |
236 | } | |
237 | ||
65d45231 KO |
238 | const struct btree_keys_ops bch_btree_keys_ops = { |
239 | .sort_cmp = bch_key_sort_cmp, | |
829a60b9 | 240 | .insert_fixup = bch_btree_ptr_insert_fixup, |
65d45231 KO |
241 | .key_invalid = bch_btree_ptr_invalid, |
242 | .key_bad = bch_btree_ptr_bad, | |
dc9d98d6 KO |
243 | .key_to_text = bch_extent_to_text, |
244 | .key_dump = bch_bkey_dump, | |
65d45231 KO |
245 | }; |
246 | ||
247 | /* Extents */ | |
248 | ||
249 | /* | |
250 | * Returns true if l > r - unless l == r, in which case returns true if l is | |
251 | * older than r. | |
252 | * | |
253 | * Necessary for btree_sort_fixup() - if there are multiple keys that compare | |
254 | * equal in different sets, we have to process them newest to oldest. | |
255 | */ | |
256 | static bool bch_extent_sort_cmp(struct btree_iter_set l, | |
257 | struct btree_iter_set r) | |
258 | { | |
259 | int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k)); | |
260 | ||
261 | return c ? c > 0 : l.k < r.k; | |
262 | } | |
263 | ||
264 | static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, | |
265 | struct bkey *tmp) | |
266 | { | |
267 | while (iter->used > 1) { | |
268 | struct btree_iter_set *top = iter->data, *i = top + 1; | |
269 | ||
270 | if (iter->used > 2 && | |
271 | bch_extent_sort_cmp(i[0], i[1])) | |
272 | i++; | |
273 | ||
274 | if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0) | |
275 | break; | |
276 | ||
277 | if (!KEY_SIZE(i->k)) { | |
278 | sort_key_next(iter, i); | |
279 | heap_sift(iter, i - top, bch_extent_sort_cmp); | |
280 | continue; | |
281 | } | |
282 | ||
283 | if (top->k > i->k) { | |
284 | if (bkey_cmp(top->k, i->k) >= 0) | |
285 | sort_key_next(iter, i); | |
286 | else | |
287 | bch_cut_front(top->k, i->k); | |
288 | ||
289 | heap_sift(iter, i - top, bch_extent_sort_cmp); | |
290 | } else { | |
291 | /* can't happen because of comparison func */ | |
292 | BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); | |
293 | ||
294 | if (bkey_cmp(i->k, top->k) < 0) { | |
295 | bkey_copy(tmp, top->k); | |
296 | ||
297 | bch_cut_back(&START_KEY(i->k), tmp); | |
298 | bch_cut_front(i->k, top->k); | |
299 | heap_sift(iter, 0, bch_extent_sort_cmp); | |
300 | ||
301 | return tmp; | |
302 | } else { | |
303 | bch_cut_back(&START_KEY(i->k), top->k); | |
304 | } | |
305 | } | |
306 | } | |
307 | ||
308 | return NULL; | |
309 | } | |
310 | ||
cb851149 JS |
311 | static void bch_subtract_dirty(struct bkey *k, |
312 | struct cache_set *c, | |
313 | uint64_t offset, | |
314 | int sectors) | |
315 | { | |
316 | if (KEY_DIRTY(k)) | |
317 | bcache_dev_sectors_dirty_add(c, KEY_INODE(k), | |
318 | offset, -sectors); | |
319 | } | |
320 | ||
829a60b9 KO |
321 | static bool bch_extent_insert_fixup(struct btree_keys *b, |
322 | struct bkey *insert, | |
323 | struct btree_iter *iter, | |
324 | struct bkey *replace_key) | |
325 | { | |
326 | struct cache_set *c = container_of(b, struct btree, keys)->c; | |
327 | ||
829a60b9 KO |
328 | uint64_t old_offset; |
329 | unsigned old_size, sectors_found = 0; | |
330 | ||
331 | BUG_ON(!KEY_OFFSET(insert)); | |
332 | BUG_ON(!KEY_SIZE(insert)); | |
333 | ||
334 | while (1) { | |
335 | struct bkey *k = bch_btree_iter_next(iter); | |
336 | if (!k) | |
337 | break; | |
338 | ||
339 | if (bkey_cmp(&START_KEY(k), insert) >= 0) { | |
340 | if (KEY_SIZE(k)) | |
341 | break; | |
342 | else | |
343 | continue; | |
344 | } | |
345 | ||
346 | if (bkey_cmp(k, &START_KEY(insert)) <= 0) | |
347 | continue; | |
348 | ||
349 | old_offset = KEY_START(k); | |
350 | old_size = KEY_SIZE(k); | |
351 | ||
352 | /* | |
353 | * We might overlap with 0 size extents; we can't skip these | |
354 | * because if they're in the set we're inserting to we have to | |
355 | * adjust them so they don't overlap with the key we're | |
356 | * inserting. But we don't want to check them for replace | |
357 | * operations. | |
358 | */ | |
359 | ||
360 | if (replace_key && KEY_SIZE(k)) { | |
361 | /* | |
362 | * k might have been split since we inserted/found the | |
363 | * key we're replacing | |
364 | */ | |
365 | unsigned i; | |
366 | uint64_t offset = KEY_START(k) - | |
367 | KEY_START(replace_key); | |
368 | ||
369 | /* But it must be a subset of the replace key */ | |
370 | if (KEY_START(k) < KEY_START(replace_key) || | |
371 | KEY_OFFSET(k) > KEY_OFFSET(replace_key)) | |
372 | goto check_failed; | |
373 | ||
374 | /* We didn't find a key that we were supposed to */ | |
375 | if (KEY_START(k) > KEY_START(insert) + sectors_found) | |
376 | goto check_failed; | |
377 | ||
3bdad1e4 | 378 | if (!bch_bkey_equal_header(k, replace_key)) |
829a60b9 KO |
379 | goto check_failed; |
380 | ||
381 | /* skip past gen */ | |
382 | offset <<= 8; | |
383 | ||
384 | BUG_ON(!KEY_PTRS(replace_key)); | |
385 | ||
386 | for (i = 0; i < KEY_PTRS(replace_key); i++) | |
387 | if (k->ptr[i] != replace_key->ptr[i] + offset) | |
388 | goto check_failed; | |
389 | ||
390 | sectors_found = KEY_OFFSET(k) - KEY_START(insert); | |
391 | } | |
392 | ||
393 | if (bkey_cmp(insert, k) < 0 && | |
394 | bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { | |
395 | /* | |
396 | * We overlapped in the middle of an existing key: that | |
397 | * means we have to split the old key. But we have to do | |
398 | * slightly different things depending on whether the | |
399 | * old key has been written out yet. | |
400 | */ | |
401 | ||
402 | struct bkey *top; | |
403 | ||
cb851149 JS |
404 | bch_subtract_dirty(k, c, KEY_START(insert), |
405 | KEY_SIZE(insert)); | |
829a60b9 KO |
406 | |
407 | if (bkey_written(b, k)) { | |
408 | /* | |
409 | * We insert a new key to cover the top of the | |
410 | * old key, and the old key is modified in place | |
411 | * to represent the bottom split. | |
412 | * | |
413 | * It's completely arbitrary whether the new key | |
414 | * is the top or the bottom, but it has to match | |
415 | * up with what btree_sort_fixup() does - it | |
416 | * doesn't check for this kind of overlap, it | |
417 | * depends on us inserting a new key for the top | |
418 | * here. | |
419 | */ | |
420 | top = bch_bset_search(b, bset_tree_last(b), | |
421 | insert); | |
422 | bch_bset_insert(b, top, k); | |
423 | } else { | |
424 | BKEY_PADDED(key) temp; | |
425 | bkey_copy(&temp.key, k); | |
426 | bch_bset_insert(b, k, &temp.key); | |
427 | top = bkey_next(k); | |
428 | } | |
429 | ||
430 | bch_cut_front(insert, top); | |
431 | bch_cut_back(&START_KEY(insert), k); | |
432 | bch_bset_fix_invalidated_key(b, k); | |
433 | goto out; | |
434 | } | |
435 | ||
436 | if (bkey_cmp(insert, k) < 0) { | |
437 | bch_cut_front(insert, k); | |
438 | } else { | |
439 | if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) | |
440 | old_offset = KEY_START(insert); | |
441 | ||
442 | if (bkey_written(b, k) && | |
443 | bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { | |
444 | /* | |
445 | * Completely overwrote, so we don't have to | |
446 | * invalidate the binary search tree | |
447 | */ | |
448 | bch_cut_front(k, k); | |
449 | } else { | |
450 | __bch_cut_back(&START_KEY(insert), k); | |
451 | bch_bset_fix_invalidated_key(b, k); | |
452 | } | |
453 | } | |
454 | ||
cb851149 | 455 | bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); |
829a60b9 KO |
456 | } |
457 | ||
458 | check_failed: | |
459 | if (replace_key) { | |
460 | if (!sectors_found) { | |
461 | return true; | |
462 | } else if (sectors_found < KEY_SIZE(insert)) { | |
463 | SET_KEY_OFFSET(insert, KEY_OFFSET(insert) - | |
464 | (KEY_SIZE(insert) - sectors_found)); | |
465 | SET_KEY_SIZE(insert, sectors_found); | |
466 | } | |
467 | } | |
468 | out: | |
469 | if (KEY_DIRTY(insert)) | |
470 | bcache_dev_sectors_dirty_add(c, KEY_INODE(insert), | |
471 | KEY_START(insert), | |
472 | KEY_SIZE(insert)); | |
473 | ||
474 | return false; | |
475 | } | |
476 | ||
9aa61a99 | 477 | bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k) |
65d45231 KO |
478 | { |
479 | char buf[80]; | |
480 | ||
481 | if (!KEY_SIZE(k)) | |
482 | return true; | |
483 | ||
484 | if (KEY_SIZE(k) > KEY_OFFSET(k)) | |
485 | goto bad; | |
486 | ||
9aa61a99 | 487 | if (__ptr_invalid(c, k)) |
65d45231 KO |
488 | goto bad; |
489 | ||
490 | return false; | |
491 | bad: | |
dc9d98d6 | 492 | bch_extent_to_text(buf, sizeof(buf), k); |
9aa61a99 | 493 | cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k)); |
65d45231 KO |
494 | return true; |
495 | } | |
496 | ||
9aa61a99 KO |
497 | static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) |
498 | { | |
499 | struct btree *b = container_of(bk, struct btree, keys); | |
500 | return __bch_extent_invalid(b->c, k); | |
501 | } | |
502 | ||
65d45231 KO |
503 | static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, |
504 | unsigned ptr) | |
505 | { | |
506 | struct bucket *g = PTR_BUCKET(b->c, k, ptr); | |
507 | char buf[80]; | |
508 | ||
509 | if (mutex_trylock(&b->c->bucket_lock)) { | |
510 | if (b->c->gc_mark_valid && | |
4fe6a816 KO |
511 | (!GC_MARK(g) || |
512 | GC_MARK(g) == GC_MARK_METADATA || | |
513 | (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k)))) | |
65d45231 KO |
514 | goto err; |
515 | ||
516 | if (g->prio == BTREE_PRIO) | |
517 | goto err; | |
518 | ||
519 | mutex_unlock(&b->c->bucket_lock); | |
520 | } | |
521 | ||
522 | return false; | |
523 | err: | |
524 | mutex_unlock(&b->c->bucket_lock); | |
dc9d98d6 | 525 | bch_extent_to_text(buf, sizeof(buf), k); |
65d45231 | 526 | btree_bug(b, |
3a2fd9d5 | 527 | "inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu", |
65d45231 | 528 | buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), |
3a2fd9d5 | 529 | g->prio, g->gen, g->last_gc, GC_MARK(g)); |
65d45231 KO |
530 | return true; |
531 | } | |
532 | ||
a85e968e | 533 | static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) |
65d45231 | 534 | { |
a85e968e | 535 | struct btree *b = container_of(bk, struct btree, keys); |
65d45231 KO |
536 | struct bucket *g; |
537 | unsigned i, stale; | |
538 | ||
539 | if (!KEY_PTRS(k) || | |
a85e968e | 540 | bch_extent_invalid(bk, k)) |
65d45231 KO |
541 | return true; |
542 | ||
543 | for (i = 0; i < KEY_PTRS(k); i++) | |
544 | if (!ptr_available(b->c, k, i)) | |
545 | return true; | |
546 | ||
547 | if (!expensive_debug_checks(b->c) && KEY_DIRTY(k)) | |
548 | return false; | |
549 | ||
550 | for (i = 0; i < KEY_PTRS(k); i++) { | |
551 | g = PTR_BUCKET(b->c, k, i); | |
552 | stale = ptr_stale(b->c, k, i); | |
553 | ||
554 | btree_bug_on(stale > 96, b, | |
555 | "key too stale: %i, need_gc %u", | |
556 | stale, b->c->need_gc); | |
557 | ||
558 | btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k), | |
559 | b, "stale dirty pointer"); | |
560 | ||
561 | if (stale) | |
562 | return true; | |
563 | ||
564 | if (expensive_debug_checks(b->c) && | |
565 | bch_extent_bad_expensive(b, k, i)) | |
566 | return true; | |
567 | } | |
568 | ||
569 | return false; | |
570 | } | |
571 | ||
572 | static uint64_t merge_chksums(struct bkey *l, struct bkey *r) | |
573 | { | |
574 | return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) & | |
575 | ~((uint64_t)1 << 63); | |
576 | } | |
577 | ||
a85e968e | 578 | static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r) |
65d45231 | 579 | { |
a85e968e | 580 | struct btree *b = container_of(bk, struct btree, keys); |
65d45231 KO |
581 | unsigned i; |
582 | ||
583 | if (key_merging_disabled(b->c)) | |
584 | return false; | |
585 | ||
65d45231 KO |
586 | for (i = 0; i < KEY_PTRS(l); i++) |
587 | if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || | |
588 | PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) | |
589 | return false; | |
590 | ||
591 | /* Keys with no pointers aren't restricted to one bucket and could | |
592 | * overflow KEY_SIZE | |
593 | */ | |
594 | if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) { | |
595 | SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l)); | |
596 | SET_KEY_SIZE(l, USHRT_MAX); | |
597 | ||
598 | bch_cut_front(l, r); | |
599 | return false; | |
600 | } | |
601 | ||
602 | if (KEY_CSUM(l)) { | |
603 | if (KEY_CSUM(r)) | |
604 | l->ptr[KEY_PTRS(l)] = merge_chksums(l, r); | |
605 | else | |
606 | SET_KEY_CSUM(l, 0); | |
607 | } | |
608 | ||
609 | SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r)); | |
610 | SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r)); | |
611 | ||
612 | return true; | |
613 | } | |
614 | ||
615 | const struct btree_keys_ops bch_extent_keys_ops = { | |
616 | .sort_cmp = bch_extent_sort_cmp, | |
617 | .sort_fixup = bch_extent_sort_fixup, | |
829a60b9 | 618 | .insert_fixup = bch_extent_insert_fixup, |
65d45231 KO |
619 | .key_invalid = bch_extent_invalid, |
620 | .key_bad = bch_extent_bad, | |
621 | .key_merge = bch_extent_merge, | |
dc9d98d6 KO |
622 | .key_to_text = bch_extent_to_text, |
623 | .key_dump = bch_bkey_dump, | |
65d45231 KO |
624 | .is_extents = true, |
625 | }; |