2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
8 #include "dm-btree-internal.h"
9 #include "dm-transaction-manager.h"
11 #include <linux/export.h>
14 * Removing an entry from a btree
15 * ==============================
17 * A very important constraint for our btree is that no node, except the
18 * root, may have fewer than a certain number of entries.
19 * (MIN_ENTRIES <= nr_entries <= MAX_ENTRIES).
21 * Ensuring this is complicated by the way we want to only ever hold the
22 * locks on 2 nodes concurrently, and only change nodes in a top to bottom
25 * Each node may have a left or right sibling. When decending the spine,
26 * if a node contains only MIN_ENTRIES then we try and increase this to at
27 * least MIN_ENTRIES + 1. We do this in the following ways:
29 * [A] No siblings => this can only happen if the node is the root, in which
30 * case we copy the childs contents over the root.
33 * ==> rebalance(node, right sibling)
35 * [C] No right sibling
36 * ==> rebalance(left sibling, node)
38 * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD
39 * ==> delete node adding it's contents to left and right
41 * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD
42 * ==> rebalance(left, node, right)
44 * After these operations it's possible that the our original node no
45 * longer contains the desired sub tree. For this reason this rebalancing
46 * is performed on the children of the current node. This also avoids
47 * having a special case for the root.
49 * Once this rebalancing has occurred we can then step into the child node
50 * for internal nodes. Or delete the entry for leaf nodes.
54 * Some little utilities for moving node data around.
56 static void node_shift(struct btree_node *n, int shift)
58 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
59 uint32_t value_size = le32_to_cpu(n->header.value_size);
63 BUG_ON(shift > nr_entries);
64 BUG_ON((void *) key_ptr(n, shift) >= value_ptr(n, shift));
65 memmove(key_ptr(n, 0),
67 (nr_entries - shift) * sizeof(__le64));
68 memmove(value_ptr(n, 0),
70 (nr_entries - shift) * value_size);
72 BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries));
73 memmove(key_ptr(n, shift),
75 nr_entries * sizeof(__le64));
76 memmove(value_ptr(n, shift),
78 nr_entries * value_size);
82 static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
84 uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
85 uint32_t value_size = le32_to_cpu(left->header.value_size);
86 BUG_ON(value_size != le32_to_cpu(right->header.value_size));
90 BUG_ON(nr_left + shift > le32_to_cpu(left->header.max_entries));
91 memcpy(key_ptr(left, nr_left),
93 shift * sizeof(__le64));
94 memcpy(value_ptr(left, nr_left),
98 BUG_ON(shift > le32_to_cpu(right->header.max_entries));
99 memcpy(key_ptr(right, 0),
100 key_ptr(left, nr_left - shift),
101 shift * sizeof(__le64));
102 memcpy(value_ptr(right, 0),
103 value_ptr(left, nr_left - shift),
109 * Delete a specific entry from a leaf node.
111 static void delete_at(struct btree_node *n, unsigned index)
113 unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
114 unsigned nr_to_copy = nr_entries - (index + 1);
115 uint32_t value_size = le32_to_cpu(n->header.value_size);
116 BUG_ON(index >= nr_entries);
119 memmove(key_ptr(n, index),
120 key_ptr(n, index + 1),
121 nr_to_copy * sizeof(__le64));
123 memmove(value_ptr(n, index),
124 value_ptr(n, index + 1),
125 nr_to_copy * value_size);
128 n->header.nr_entries = cpu_to_le32(nr_entries - 1);
131 static unsigned merge_threshold(struct btree_node *n)
133 return le32_to_cpu(n->header.max_entries) / 3;
138 struct dm_block *block;
139 struct btree_node *n;
142 static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
143 struct btree_node *parent,
144 unsigned index, struct child *result)
149 result->index = index;
150 root = value64(parent, index);
152 r = dm_tm_shadow_block(info->tm, root, &btree_node_validator,
153 &result->block, &inc);
157 result->n = dm_block_data(result->block);
160 inc_children(info->tm, result->n, vt);
162 *((__le64 *) value_ptr(parent, index)) =
163 cpu_to_le64(dm_block_location(result->block));
168 static void exit_child(struct dm_btree_info *info, struct child *c)
170 dm_tm_unlock(info->tm, c->block);
173 static void shift(struct btree_node *left, struct btree_node *right, int count)
175 uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
176 uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
177 uint32_t max_entries = le32_to_cpu(left->header.max_entries);
178 uint32_t r_max_entries = le32_to_cpu(right->header.max_entries);
180 BUG_ON(max_entries != r_max_entries);
181 BUG_ON(nr_left - count > max_entries);
182 BUG_ON(nr_right + count > max_entries);
188 node_shift(right, count);
189 node_copy(left, right, count);
191 node_copy(left, right, count);
192 node_shift(right, count);
195 left->header.nr_entries = cpu_to_le32(nr_left - count);
196 right->header.nr_entries = cpu_to_le32(nr_right + count);
199 static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
200 struct child *l, struct child *r)
202 struct btree_node *left = l->n;
203 struct btree_node *right = r->n;
204 uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
205 uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
206 unsigned threshold = 2 * merge_threshold(left) + 1;
208 if (nr_left + nr_right < threshold) {
212 node_copy(left, right, -nr_right);
213 left->header.nr_entries = cpu_to_le32(nr_left + nr_right);
214 delete_at(parent, r->index);
217 * We need to decrement the right block, but not it's
218 * children, since they're still referenced by left.
220 dm_tm_dec(info->tm, dm_block_location(r->block));
225 unsigned target_left = (nr_left + nr_right) / 2;
226 shift(left, right, nr_left - target_left);
227 *key_ptr(parent, r->index) = right->keys[0];
231 static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
232 struct dm_btree_value_type *vt, unsigned left_index)
235 struct btree_node *parent;
236 struct child left, right;
238 parent = dm_block_data(shadow_current(s));
240 r = init_child(info, vt, parent, left_index, &left);
244 r = init_child(info, vt, parent, left_index + 1, &right);
246 exit_child(info, &left);
250 __rebalance2(info, parent, &left, &right);
252 exit_child(info, &left);
253 exit_child(info, &right);
259 * We dump as many entries from center as possible into left, then the rest
260 * in right, then rebalance2. This wastes some cpu, but I want something
263 static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
264 struct child *l, struct child *c, struct child *r,
265 struct btree_node *left, struct btree_node *center, struct btree_node *right,
266 uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
268 uint32_t max_entries = le32_to_cpu(left->header.max_entries);
269 unsigned shift = min(max_entries - nr_left, nr_center);
271 BUG_ON(nr_left + shift > max_entries);
272 node_copy(left, center, -shift);
273 left->header.nr_entries = cpu_to_le32(nr_left + shift);
275 if (shift != nr_center) {
276 shift = nr_center - shift;
277 BUG_ON((nr_right + shift) > max_entries);
278 node_shift(right, shift);
279 node_copy(center, right, shift);
280 right->header.nr_entries = cpu_to_le32(nr_right + shift);
282 *key_ptr(parent, r->index) = right->keys[0];
284 delete_at(parent, c->index);
287 dm_tm_dec(info->tm, dm_block_location(c->block));
288 __rebalance2(info, parent, l, r);
292 * Redistributes entries among 3 sibling nodes.
294 static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
295 struct child *l, struct child *c, struct child *r,
296 struct btree_node *left, struct btree_node *center, struct btree_node *right,
297 uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
300 uint32_t max_entries = le32_to_cpu(left->header.max_entries);
301 unsigned target = (nr_left + nr_center + nr_right) / 3;
302 BUG_ON(target > max_entries);
304 if (nr_left < nr_right) {
305 s = nr_left - target;
307 if (s < 0 && nr_center < -s) {
308 /* not enough in central node */
309 shift(left, center, -nr_center);
311 shift(left, right, s);
314 shift(left, center, s);
316 shift(center, right, target - nr_right);
319 s = target - nr_right;
320 if (s > 0 && nr_center < s) {
321 /* not enough in central node */
322 shift(center, right, nr_center);
324 shift(left, right, s);
327 shift(center, right, s);
329 shift(left, center, nr_left - target);
332 *key_ptr(parent, c->index) = center->keys[0];
333 *key_ptr(parent, r->index) = right->keys[0];
336 static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
337 struct child *l, struct child *c, struct child *r)
339 struct btree_node *left = l->n;
340 struct btree_node *center = c->n;
341 struct btree_node *right = r->n;
343 uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
344 uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
345 uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
347 unsigned threshold = merge_threshold(left) * 4 + 1;
349 BUG_ON(left->header.max_entries != center->header.max_entries);
350 BUG_ON(center->header.max_entries != right->header.max_entries);
352 if ((nr_left + nr_center + nr_right) < threshold)
353 delete_center_node(info, parent, l, c, r, left, center, right,
354 nr_left, nr_center, nr_right);
356 redistribute3(info, parent, l, c, r, left, center, right,
357 nr_left, nr_center, nr_right);
360 static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
361 struct dm_btree_value_type *vt, unsigned left_index)
364 struct btree_node *parent = dm_block_data(shadow_current(s));
365 struct child left, center, right;
368 * FIXME: fill out an array?
370 r = init_child(info, vt, parent, left_index, &left);
374 r = init_child(info, vt, parent, left_index + 1, ¢er);
376 exit_child(info, &left);
380 r = init_child(info, vt, parent, left_index + 2, &right);
382 exit_child(info, &left);
383 exit_child(info, ¢er);
387 __rebalance3(info, parent, &left, ¢er, &right);
389 exit_child(info, &left);
390 exit_child(info, ¢er);
391 exit_child(info, &right);
396 static int rebalance_children(struct shadow_spine *s,
397 struct dm_btree_info *info,
398 struct dm_btree_value_type *vt, uint64_t key)
400 int i, r, has_left_sibling, has_right_sibling;
401 struct btree_node *n;
403 n = dm_block_data(shadow_current(s));
405 if (le32_to_cpu(n->header.nr_entries) == 1) {
406 struct dm_block *child;
407 dm_block_t b = value64(n, 0);
409 r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child);
413 memcpy(n, dm_block_data(child),
414 dm_bm_block_size(dm_tm_get_bm(info->tm)));
415 dm_tm_unlock(info->tm, child);
417 dm_tm_dec(info->tm, dm_block_location(child));
421 i = lower_bound(n, key);
425 has_left_sibling = i > 0;
426 has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
428 if (!has_left_sibling)
429 r = rebalance2(s, info, vt, i);
431 else if (!has_right_sibling)
432 r = rebalance2(s, info, vt, i - 1);
435 r = rebalance3(s, info, vt, i - 1);
440 static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
442 int i = lower_bound(n, key);
445 (i >= le32_to_cpu(n->header.nr_entries)) ||
446 (le64_to_cpu(n->keys[i]) != key))
455 * Prepares for removal from one level of the hierarchy. The caller must
456 * call delete_at() to remove the entry at index.
458 static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
459 struct dm_btree_value_type *vt, dm_block_t root,
460 uint64_t key, unsigned *index)
463 struct btree_node *n;
466 r = shadow_step(s, root, vt);
471 * We have to patch up the parent node, ugly, but I don't
472 * see a way to do this automatically as part of the spine
475 if (shadow_has_parent(s)) {
476 __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
477 memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
478 &location, sizeof(__le64));
481 n = dm_block_data(shadow_current(s));
483 if (le32_to_cpu(n->header.flags) & LEAF_NODE)
484 return do_leaf(n, key, index);
486 r = rebalance_children(s, info, vt, key);
490 n = dm_block_data(shadow_current(s));
491 if (le32_to_cpu(n->header.flags) & LEAF_NODE)
492 return do_leaf(n, key, index);
494 i = lower_bound(n, key);
497 * We know the key is present, or else
498 * rebalance_children would have returned
501 root = value64(n, i);
507 int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
508 uint64_t *keys, dm_block_t *new_root)
510 unsigned level, last_level = info->levels - 1;
511 int index = 0, r = 0;
512 struct shadow_spine spine;
513 struct btree_node *n;
514 struct dm_btree_value_type le64_vt;
516 init_le64_type(info->tm, &le64_vt);
517 init_shadow_spine(&spine, info);
518 for (level = 0; level < info->levels; level++) {
519 r = remove_raw(&spine, info,
520 (level == last_level ?
521 &info->value_type : &le64_vt),
522 root, keys[level], (unsigned *)&index);
526 n = dm_block_data(shadow_current(&spine));
527 if (level != last_level) {
528 root = value64(n, index);
532 BUG_ON(index < 0 || index >= le32_to_cpu(n->header.nr_entries));
534 if (info->value_type.dec)
535 info->value_type.dec(info->value_type.context,
536 value_ptr(n, index));
541 *new_root = shadow_root(&spine);
542 exit_shadow_spine(&spine);
546 EXPORT_SYMBOL_GPL(dm_btree_remove);
548 /*----------------------------------------------------------------*/
550 static int remove_nearest(struct shadow_spine *s, struct dm_btree_info *info,
551 struct dm_btree_value_type *vt, dm_block_t root,
552 uint64_t key, int *index)
555 struct btree_node *n;
558 r = shadow_step(s, root, vt);
563 * We have to patch up the parent node, ugly, but I don't
564 * see a way to do this automatically as part of the spine
567 if (shadow_has_parent(s)) {
568 __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
569 memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
570 &location, sizeof(__le64));
573 n = dm_block_data(shadow_current(s));
575 if (le32_to_cpu(n->header.flags) & LEAF_NODE) {
576 *index = lower_bound(n, key);
580 r = rebalance_children(s, info, vt, key);
584 n = dm_block_data(shadow_current(s));
585 if (le32_to_cpu(n->header.flags) & LEAF_NODE) {
586 *index = lower_bound(n, key);
590 i = lower_bound(n, key);
593 * We know the key is present, or else
594 * rebalance_children would have returned
597 root = value64(n, i);
603 static int remove_one(struct dm_btree_info *info, dm_block_t root,
604 uint64_t *keys, uint64_t end_key,
605 dm_block_t *new_root, unsigned *nr_removed)
607 unsigned level, last_level = info->levels - 1;
608 int index = 0, r = 0;
609 struct shadow_spine spine;
610 struct btree_node *n;
611 struct dm_btree_value_type le64_vt;
614 init_le64_type(info->tm, &le64_vt);
615 init_shadow_spine(&spine, info);
616 for (level = 0; level < last_level; level++) {
617 r = remove_raw(&spine, info, &le64_vt,
618 root, keys[level], (unsigned *) &index);
622 n = dm_block_data(shadow_current(&spine));
623 root = value64(n, index);
626 r = remove_nearest(&spine, info, &info->value_type,
627 root, keys[last_level], &index);
631 n = dm_block_data(shadow_current(&spine));
636 if (index >= le32_to_cpu(n->header.nr_entries)) {
641 k = le64_to_cpu(n->keys[index]);
642 if (k >= keys[last_level] && k < end_key) {
643 if (info->value_type.dec)
644 info->value_type.dec(info->value_type.context,
645 value_ptr(n, index));
648 keys[last_level] = k + 1ull;
654 *new_root = shadow_root(&spine);
655 exit_shadow_spine(&spine);
660 int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
661 uint64_t *first_key, uint64_t end_key,
662 dm_block_t *new_root, unsigned *nr_removed)
668 r = remove_one(info, root, first_key, end_key, &root, nr_removed);
674 return r == -ENODATA ? 0 : r;
676 EXPORT_SYMBOL_GPL(dm_btree_remove_leaves);