1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
10 #include <linux/error-injection.h>
13 #include "transaction.h"
14 #include "print-tree.h"
18 #include "tree-mod-log.h"
20 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
21 *root, struct btrfs_path *path, int level);
22 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
23 const struct btrfs_key *ins_key, struct btrfs_path *path,
24 int data_size, int extend);
25 static int push_node_left(struct btrfs_trans_handle *trans,
26 struct extent_buffer *dst,
27 struct extent_buffer *src, int empty);
28 static int balance_node_right(struct btrfs_trans_handle *trans,
29 struct extent_buffer *dst_buf,
30 struct extent_buffer *src_buf);
31 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
34 static const struct btrfs_csums {
37 const char driver[12];
39 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
40 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
41 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
42 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
43 .driver = "blake2b-256" },
46 int btrfs_super_csum_size(const struct btrfs_super_block *s)
48 u16 t = btrfs_super_csum_type(s);
50 * csum type is validated at mount time
52 return btrfs_csums[t].size;
55 const char *btrfs_super_csum_name(u16 csum_type)
57 /* csum type is validated at mount time */
58 return btrfs_csums[csum_type].name;
62 * Return driver name if defined, otherwise the name that's also a valid driver
65 const char *btrfs_super_csum_driver(u16 csum_type)
67 /* csum type is validated at mount time */
68 return btrfs_csums[csum_type].driver[0] ?
69 btrfs_csums[csum_type].driver :
70 btrfs_csums[csum_type].name;
73 size_t __attribute_const__ btrfs_get_num_csums(void)
75 return ARRAY_SIZE(btrfs_csums);
78 struct btrfs_path *btrfs_alloc_path(void)
80 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
83 /* this also releases the path */
84 void btrfs_free_path(struct btrfs_path *p)
88 btrfs_release_path(p);
89 kmem_cache_free(btrfs_path_cachep, p);
93 * path release drops references on the extent buffers in the path
94 * and it drops any locks held by this path
96 * It is safe to call this on paths that no locks or extent buffers held.
98 noinline void btrfs_release_path(struct btrfs_path *p)
102 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
107 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
110 free_extent_buffer(p->nodes[i]);
116 * safely gets a reference on the root node of a tree. A lock
117 * is not taken, so a concurrent writer may put a different node
118 * at the root of the tree. See btrfs_lock_root_node for the
121 * The extent buffer returned by this has a reference taken, so
122 * it won't disappear. It may stop being the root of the tree
123 * at any time because there are no locks held.
125 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
127 struct extent_buffer *eb;
131 eb = rcu_dereference(root->node);
134 * RCU really hurts here, we could free up the root node because
135 * it was COWed but we may not get the new root node yet so do
136 * the inc_not_zero dance and if it doesn't work then
137 * synchronize_rcu and try again.
139 if (atomic_inc_not_zero(&eb->refs)) {
150 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
151 * just get put onto a simple dirty list. Transaction walks this list to make
152 * sure they get properly updated on disk.
154 static void add_root_to_dirty_list(struct btrfs_root *root)
156 struct btrfs_fs_info *fs_info = root->fs_info;
158 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
159 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
162 spin_lock(&fs_info->trans_lock);
163 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
164 /* Want the extent tree to be the last on the list */
165 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
166 list_move_tail(&root->dirty_list,
167 &fs_info->dirty_cowonly_roots);
169 list_move(&root->dirty_list,
170 &fs_info->dirty_cowonly_roots);
172 spin_unlock(&fs_info->trans_lock);
176 * used by snapshot creation to make a copy of a root for a tree with
177 * a given objectid. The buffer with the new root node is returned in
178 * cow_ret, and this func returns zero on success or a negative error code.
180 int btrfs_copy_root(struct btrfs_trans_handle *trans,
181 struct btrfs_root *root,
182 struct extent_buffer *buf,
183 struct extent_buffer **cow_ret, u64 new_root_objectid)
185 struct btrfs_fs_info *fs_info = root->fs_info;
186 struct extent_buffer *cow;
189 struct btrfs_disk_key disk_key;
191 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
192 trans->transid != fs_info->running_transaction->transid);
193 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
194 trans->transid != root->last_trans);
196 level = btrfs_header_level(buf);
198 btrfs_item_key(buf, &disk_key, 0);
200 btrfs_node_key(buf, &disk_key, 0);
202 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
203 &disk_key, level, buf->start, 0,
204 BTRFS_NESTING_NEW_ROOT);
208 copy_extent_buffer_full(cow, buf);
209 btrfs_set_header_bytenr(cow, cow->start);
210 btrfs_set_header_generation(cow, trans->transid);
211 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
212 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
213 BTRFS_HEADER_FLAG_RELOC);
214 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
215 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
217 btrfs_set_header_owner(cow, new_root_objectid);
219 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
221 WARN_ON(btrfs_header_generation(buf) > trans->transid);
222 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
223 ret = btrfs_inc_ref(trans, root, cow, 1);
225 ret = btrfs_inc_ref(trans, root, cow, 0);
227 btrfs_tree_unlock(cow);
228 free_extent_buffer(cow);
229 btrfs_abort_transaction(trans, ret);
233 btrfs_mark_buffer_dirty(cow);
239 * check if the tree block can be shared by multiple trees
241 int btrfs_block_can_be_shared(struct btrfs_root *root,
242 struct extent_buffer *buf)
245 * Tree blocks not in shareable trees and tree roots are never shared.
246 * If a block was allocated after the last snapshot and the block was
247 * not allocated by tree relocation, we know the block is not shared.
249 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
250 buf != root->node && buf != root->commit_root &&
251 (btrfs_header_generation(buf) <=
252 btrfs_root_last_snapshot(&root->root_item) ||
253 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
259 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
260 struct btrfs_root *root,
261 struct extent_buffer *buf,
262 struct extent_buffer *cow,
265 struct btrfs_fs_info *fs_info = root->fs_info;
273 * Backrefs update rules:
275 * Always use full backrefs for extent pointers in tree block
276 * allocated by tree relocation.
278 * If a shared tree block is no longer referenced by its owner
279 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
280 * use full backrefs for extent pointers in tree block.
282 * If a tree block is been relocating
283 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
284 * use full backrefs for extent pointers in tree block.
285 * The reason for this is some operations (such as drop tree)
286 * are only allowed for blocks use full backrefs.
289 if (btrfs_block_can_be_shared(root, buf)) {
290 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
291 btrfs_header_level(buf), 1,
297 btrfs_handle_fs_error(fs_info, ret, NULL);
302 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
303 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
304 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
309 owner = btrfs_header_owner(buf);
310 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
311 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
314 if ((owner == root->root_key.objectid ||
315 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
316 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
317 ret = btrfs_inc_ref(trans, root, buf, 1);
321 if (root->root_key.objectid ==
322 BTRFS_TREE_RELOC_OBJECTID) {
323 ret = btrfs_dec_ref(trans, root, buf, 0);
326 ret = btrfs_inc_ref(trans, root, cow, 1);
330 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
333 if (root->root_key.objectid ==
334 BTRFS_TREE_RELOC_OBJECTID)
335 ret = btrfs_inc_ref(trans, root, cow, 1);
337 ret = btrfs_inc_ref(trans, root, cow, 0);
341 if (new_flags != 0) {
342 int level = btrfs_header_level(buf);
344 ret = btrfs_set_disk_extent_flags(trans, buf,
345 new_flags, level, 0);
350 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
351 if (root->root_key.objectid ==
352 BTRFS_TREE_RELOC_OBJECTID)
353 ret = btrfs_inc_ref(trans, root, cow, 1);
355 ret = btrfs_inc_ref(trans, root, cow, 0);
358 ret = btrfs_dec_ref(trans, root, buf, 1);
362 btrfs_clean_tree_block(buf);
369 * does the dirty work in cow of a single block. The parent block (if
370 * supplied) is updated to point to the new cow copy. The new buffer is marked
371 * dirty and returned locked. If you modify the block it needs to be marked
374 * search_start -- an allocation hint for the new block
376 * empty_size -- a hint that you plan on doing more cow. This is the size in
377 * bytes the allocator should try to find free next to the block it returns.
378 * This is just a hint and may be ignored by the allocator.
380 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
381 struct btrfs_root *root,
382 struct extent_buffer *buf,
383 struct extent_buffer *parent, int parent_slot,
384 struct extent_buffer **cow_ret,
385 u64 search_start, u64 empty_size,
386 enum btrfs_lock_nesting nest)
388 struct btrfs_fs_info *fs_info = root->fs_info;
389 struct btrfs_disk_key disk_key;
390 struct extent_buffer *cow;
394 u64 parent_start = 0;
399 btrfs_assert_tree_write_locked(buf);
401 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
402 trans->transid != fs_info->running_transaction->transid);
403 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
404 trans->transid != root->last_trans);
406 level = btrfs_header_level(buf);
409 btrfs_item_key(buf, &disk_key, 0);
411 btrfs_node_key(buf, &disk_key, 0);
413 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
414 parent_start = parent->start;
416 cow = btrfs_alloc_tree_block(trans, root, parent_start,
417 root->root_key.objectid, &disk_key, level,
418 search_start, empty_size, nest);
422 /* cow is set to blocking by btrfs_init_new_buffer */
424 copy_extent_buffer_full(cow, buf);
425 btrfs_set_header_bytenr(cow, cow->start);
426 btrfs_set_header_generation(cow, trans->transid);
427 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
428 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
429 BTRFS_HEADER_FLAG_RELOC);
430 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
431 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
433 btrfs_set_header_owner(cow, root->root_key.objectid);
435 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
437 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
439 btrfs_tree_unlock(cow);
440 free_extent_buffer(cow);
441 btrfs_abort_transaction(trans, ret);
445 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
446 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
448 btrfs_tree_unlock(cow);
449 free_extent_buffer(cow);
450 btrfs_abort_transaction(trans, ret);
455 if (buf == root->node) {
456 WARN_ON(parent && parent != buf);
457 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
458 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
459 parent_start = buf->start;
461 atomic_inc(&cow->refs);
462 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
464 rcu_assign_pointer(root->node, cow);
466 btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
467 parent_start, last_ref);
468 free_extent_buffer(buf);
469 add_root_to_dirty_list(root);
471 WARN_ON(trans->transid != btrfs_header_generation(parent));
472 btrfs_tree_mod_log_insert_key(parent, parent_slot,
473 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
474 btrfs_set_node_blockptr(parent, parent_slot,
476 btrfs_set_node_ptr_generation(parent, parent_slot,
478 btrfs_mark_buffer_dirty(parent);
480 ret = btrfs_tree_mod_log_free_eb(buf);
482 btrfs_tree_unlock(cow);
483 free_extent_buffer(cow);
484 btrfs_abort_transaction(trans, ret);
488 btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
489 parent_start, last_ref);
492 btrfs_tree_unlock(buf);
493 free_extent_buffer_stale(buf);
494 btrfs_mark_buffer_dirty(cow);
499 static inline int should_cow_block(struct btrfs_trans_handle *trans,
500 struct btrfs_root *root,
501 struct extent_buffer *buf)
503 if (btrfs_is_testing(root->fs_info))
506 /* Ensure we can see the FORCE_COW bit */
507 smp_mb__before_atomic();
510 * We do not need to cow a block if
511 * 1) this block is not created or changed in this transaction;
512 * 2) this block does not belong to TREE_RELOC tree;
513 * 3) the root is not forced COW.
515 * What is forced COW:
516 * when we create snapshot during committing the transaction,
517 * after we've finished copying src root, we must COW the shared
518 * block to ensure the metadata consistency.
520 if (btrfs_header_generation(buf) == trans->transid &&
521 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
522 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
523 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
524 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
530 * cows a single block, see __btrfs_cow_block for the real work.
531 * This version of it has extra checks so that a block isn't COWed more than
532 * once per transaction, as long as it hasn't been written yet
534 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
535 struct btrfs_root *root, struct extent_buffer *buf,
536 struct extent_buffer *parent, int parent_slot,
537 struct extent_buffer **cow_ret,
538 enum btrfs_lock_nesting nest)
540 struct btrfs_fs_info *fs_info = root->fs_info;
544 if (test_bit(BTRFS_ROOT_DELETING, &root->state))
546 "COW'ing blocks on a fs root that's being dropped");
548 if (trans->transaction != fs_info->running_transaction)
549 WARN(1, KERN_CRIT "trans %llu running %llu\n",
551 fs_info->running_transaction->transid);
553 if (trans->transid != fs_info->generation)
554 WARN(1, KERN_CRIT "trans %llu running %llu\n",
555 trans->transid, fs_info->generation);
557 if (!should_cow_block(trans, root, buf)) {
562 search_start = buf->start & ~((u64)SZ_1G - 1);
565 * Before CoWing this block for later modification, check if it's
566 * the subtree root and do the delayed subtree trace if needed.
568 * Also We don't care about the error, as it's handled internally.
570 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
571 ret = __btrfs_cow_block(trans, root, buf, parent,
572 parent_slot, cow_ret, search_start, 0, nest);
574 trace_btrfs_cow_block(root, buf, *cow_ret);
578 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
581 * helper function for defrag to decide if two blocks pointed to by a
582 * node are actually close by
584 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
586 if (blocknr < other && other - (blocknr + blocksize) < 32768)
588 if (blocknr > other && blocknr - (other + blocksize) < 32768)
593 #ifdef __LITTLE_ENDIAN
596 * Compare two keys, on little-endian the disk order is same as CPU order and
597 * we can avoid the conversion.
599 static int comp_keys(const struct btrfs_disk_key *disk_key,
600 const struct btrfs_key *k2)
602 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
604 return btrfs_comp_cpu_keys(k1, k2);
610 * compare two keys in a memcmp fashion
612 static int comp_keys(const struct btrfs_disk_key *disk,
613 const struct btrfs_key *k2)
617 btrfs_disk_key_to_cpu(&k1, disk);
619 return btrfs_comp_cpu_keys(&k1, k2);
624 * same as comp_keys only with two btrfs_key's
626 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
628 if (k1->objectid > k2->objectid)
630 if (k1->objectid < k2->objectid)
632 if (k1->type > k2->type)
634 if (k1->type < k2->type)
636 if (k1->offset > k2->offset)
638 if (k1->offset < k2->offset)
644 * this is used by the defrag code to go through all the
645 * leaves pointed to by a node and reallocate them so that
646 * disk order is close to key order
648 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
649 struct btrfs_root *root, struct extent_buffer *parent,
650 int start_slot, u64 *last_ret,
651 struct btrfs_key *progress)
653 struct btrfs_fs_info *fs_info = root->fs_info;
654 struct extent_buffer *cur;
656 u64 search_start = *last_ret;
664 int progress_passed = 0;
665 struct btrfs_disk_key disk_key;
667 WARN_ON(trans->transaction != fs_info->running_transaction);
668 WARN_ON(trans->transid != fs_info->generation);
670 parent_nritems = btrfs_header_nritems(parent);
671 blocksize = fs_info->nodesize;
672 end_slot = parent_nritems - 1;
674 if (parent_nritems <= 1)
677 for (i = start_slot; i <= end_slot; i++) {
680 btrfs_node_key(parent, &disk_key, i);
681 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
685 blocknr = btrfs_node_blockptr(parent, i);
687 last_block = blocknr;
690 other = btrfs_node_blockptr(parent, i - 1);
691 close = close_blocks(blocknr, other, blocksize);
693 if (!close && i < end_slot) {
694 other = btrfs_node_blockptr(parent, i + 1);
695 close = close_blocks(blocknr, other, blocksize);
698 last_block = blocknr;
702 cur = btrfs_read_node_slot(parent, i);
705 if (search_start == 0)
706 search_start = last_block;
708 btrfs_tree_lock(cur);
709 err = __btrfs_cow_block(trans, root, cur, parent, i,
712 (end_slot - i) * blocksize),
715 btrfs_tree_unlock(cur);
716 free_extent_buffer(cur);
719 search_start = cur->start;
720 last_block = cur->start;
721 *last_ret = search_start;
722 btrfs_tree_unlock(cur);
723 free_extent_buffer(cur);
729 * Search for a key in the given extent_buffer.
731 * The lower boundary for the search is specified by the slot number @low. Use a
732 * value of 0 to search over the whole extent buffer.
734 * The slot in the extent buffer is returned via @slot. If the key exists in the
735 * extent buffer, then @slot will point to the slot where the key is, otherwise
736 * it points to the slot where you would insert the key.
738 * Slot may point to the total number of items (i.e. one position beyond the last
739 * key) if the key is bigger than the last key in the extent buffer.
741 static noinline int generic_bin_search(struct extent_buffer *eb, int low,
742 const struct btrfs_key *key, int *slot)
746 int high = btrfs_header_nritems(eb);
748 const int key_size = sizeof(struct btrfs_disk_key);
751 btrfs_err(eb->fs_info,
752 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
753 __func__, low, high, eb->start,
754 btrfs_header_owner(eb), btrfs_header_level(eb));
758 if (btrfs_header_level(eb) == 0) {
759 p = offsetof(struct btrfs_leaf, items);
760 item_size = sizeof(struct btrfs_item);
762 p = offsetof(struct btrfs_node, ptrs);
763 item_size = sizeof(struct btrfs_key_ptr);
768 unsigned long offset;
769 struct btrfs_disk_key *tmp;
770 struct btrfs_disk_key unaligned;
773 mid = (low + high) / 2;
774 offset = p + mid * item_size;
775 oip = offset_in_page(offset);
777 if (oip + key_size <= PAGE_SIZE) {
778 const unsigned long idx = get_eb_page_index(offset);
779 char *kaddr = page_address(eb->pages[idx]);
781 oip = get_eb_offset_in_page(eb, offset);
782 tmp = (struct btrfs_disk_key *)(kaddr + oip);
784 read_extent_buffer(eb, &unaligned, offset, key_size);
788 ret = comp_keys(tmp, key);
804 * Simple binary search on an extent buffer. Works for both leaves and nodes, and
805 * always searches over the whole range of keys (slot 0 to slot 'nritems - 1').
807 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
810 return generic_bin_search(eb, 0, key, slot);
813 static void root_add_used(struct btrfs_root *root, u32 size)
815 spin_lock(&root->accounting_lock);
816 btrfs_set_root_used(&root->root_item,
817 btrfs_root_used(&root->root_item) + size);
818 spin_unlock(&root->accounting_lock);
821 static void root_sub_used(struct btrfs_root *root, u32 size)
823 spin_lock(&root->accounting_lock);
824 btrfs_set_root_used(&root->root_item,
825 btrfs_root_used(&root->root_item) - size);
826 spin_unlock(&root->accounting_lock);
829 /* given a node and slot number, this reads the blocks it points to. The
830 * extent buffer is returned with a reference taken (but unlocked).
832 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
835 int level = btrfs_header_level(parent);
836 struct extent_buffer *eb;
837 struct btrfs_key first_key;
839 if (slot < 0 || slot >= btrfs_header_nritems(parent))
840 return ERR_PTR(-ENOENT);
844 btrfs_node_key_to_cpu(parent, &first_key, slot);
845 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
846 btrfs_header_owner(parent),
847 btrfs_node_ptr_generation(parent, slot),
848 level - 1, &first_key);
851 if (!extent_buffer_uptodate(eb)) {
852 free_extent_buffer(eb);
853 return ERR_PTR(-EIO);
860 * node level balancing, used to make sure nodes are in proper order for
861 * item deletion. We balance from the top down, so we have to make sure
862 * that a deletion won't leave an node completely empty later on.
864 static noinline int balance_level(struct btrfs_trans_handle *trans,
865 struct btrfs_root *root,
866 struct btrfs_path *path, int level)
868 struct btrfs_fs_info *fs_info = root->fs_info;
869 struct extent_buffer *right = NULL;
870 struct extent_buffer *mid;
871 struct extent_buffer *left = NULL;
872 struct extent_buffer *parent = NULL;
876 int orig_slot = path->slots[level];
881 mid = path->nodes[level];
883 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
884 WARN_ON(btrfs_header_generation(mid) != trans->transid);
886 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
888 if (level < BTRFS_MAX_LEVEL - 1) {
889 parent = path->nodes[level + 1];
890 pslot = path->slots[level + 1];
894 * deal with the case where there is only one pointer in the root
895 * by promoting the node below to a root
898 struct extent_buffer *child;
900 if (btrfs_header_nritems(mid) != 1)
903 /* promote the child to a root */
904 child = btrfs_read_node_slot(mid, 0);
906 ret = PTR_ERR(child);
907 btrfs_handle_fs_error(fs_info, ret, NULL);
911 btrfs_tree_lock(child);
912 ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
915 btrfs_tree_unlock(child);
916 free_extent_buffer(child);
920 ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
922 rcu_assign_pointer(root->node, child);
924 add_root_to_dirty_list(root);
925 btrfs_tree_unlock(child);
927 path->locks[level] = 0;
928 path->nodes[level] = NULL;
929 btrfs_clean_tree_block(mid);
930 btrfs_tree_unlock(mid);
931 /* once for the path */
932 free_extent_buffer(mid);
934 root_sub_used(root, mid->len);
935 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
936 /* once for the root ptr */
937 free_extent_buffer_stale(mid);
940 if (btrfs_header_nritems(mid) >
941 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
944 left = btrfs_read_node_slot(parent, pslot - 1);
949 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
950 wret = btrfs_cow_block(trans, root, left,
951 parent, pslot - 1, &left,
952 BTRFS_NESTING_LEFT_COW);
959 right = btrfs_read_node_slot(parent, pslot + 1);
964 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
965 wret = btrfs_cow_block(trans, root, right,
966 parent, pslot + 1, &right,
967 BTRFS_NESTING_RIGHT_COW);
974 /* first, try to make some room in the middle buffer */
976 orig_slot += btrfs_header_nritems(left);
977 wret = push_node_left(trans, left, mid, 1);
983 * then try to empty the right most buffer into the middle
986 wret = push_node_left(trans, mid, right, 1);
987 if (wret < 0 && wret != -ENOSPC)
989 if (btrfs_header_nritems(right) == 0) {
990 btrfs_clean_tree_block(right);
991 btrfs_tree_unlock(right);
992 del_ptr(root, path, level + 1, pslot + 1);
993 root_sub_used(root, right->len);
994 btrfs_free_tree_block(trans, btrfs_root_id(root), right,
996 free_extent_buffer_stale(right);
999 struct btrfs_disk_key right_key;
1000 btrfs_node_key(right, &right_key, 0);
1001 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1002 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1004 btrfs_set_node_key(parent, &right_key, pslot + 1);
1005 btrfs_mark_buffer_dirty(parent);
1008 if (btrfs_header_nritems(mid) == 1) {
1010 * we're not allowed to leave a node with one item in the
1011 * tree during a delete. A deletion from lower in the tree
1012 * could try to delete the only pointer in this node.
1013 * So, pull some keys from the left.
1014 * There has to be a left pointer at this point because
1015 * otherwise we would have pulled some pointers from the
1020 btrfs_handle_fs_error(fs_info, ret, NULL);
1023 wret = balance_node_right(trans, mid, left);
1029 wret = push_node_left(trans, left, mid, 1);
1035 if (btrfs_header_nritems(mid) == 0) {
1036 btrfs_clean_tree_block(mid);
1037 btrfs_tree_unlock(mid);
1038 del_ptr(root, path, level + 1, pslot);
1039 root_sub_used(root, mid->len);
1040 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
1041 free_extent_buffer_stale(mid);
1044 /* update the parent key to reflect our changes */
1045 struct btrfs_disk_key mid_key;
1046 btrfs_node_key(mid, &mid_key, 0);
1047 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1048 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1050 btrfs_set_node_key(parent, &mid_key, pslot);
1051 btrfs_mark_buffer_dirty(parent);
1054 /* update the path */
1056 if (btrfs_header_nritems(left) > orig_slot) {
1057 atomic_inc(&left->refs);
1058 /* left was locked after cow */
1059 path->nodes[level] = left;
1060 path->slots[level + 1] -= 1;
1061 path->slots[level] = orig_slot;
1063 btrfs_tree_unlock(mid);
1064 free_extent_buffer(mid);
1067 orig_slot -= btrfs_header_nritems(left);
1068 path->slots[level] = orig_slot;
1071 /* double check we haven't messed things up */
1073 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1077 btrfs_tree_unlock(right);
1078 free_extent_buffer(right);
1081 if (path->nodes[level] != left)
1082 btrfs_tree_unlock(left);
1083 free_extent_buffer(left);
1088 /* Node balancing for insertion. Here we only split or push nodes around
1089 * when they are completely full. This is also done top down, so we
1090 * have to be pessimistic.
1092 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1093 struct btrfs_root *root,
1094 struct btrfs_path *path, int level)
1096 struct btrfs_fs_info *fs_info = root->fs_info;
1097 struct extent_buffer *right = NULL;
1098 struct extent_buffer *mid;
1099 struct extent_buffer *left = NULL;
1100 struct extent_buffer *parent = NULL;
1104 int orig_slot = path->slots[level];
1109 mid = path->nodes[level];
1110 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1112 if (level < BTRFS_MAX_LEVEL - 1) {
1113 parent = path->nodes[level + 1];
1114 pslot = path->slots[level + 1];
1120 left = btrfs_read_node_slot(parent, pslot - 1);
1124 /* first, try to make some room in the middle buffer */
1128 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1130 left_nr = btrfs_header_nritems(left);
1131 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1134 ret = btrfs_cow_block(trans, root, left, parent,
1136 BTRFS_NESTING_LEFT_COW);
1140 wret = push_node_left(trans, left, mid, 0);
1146 struct btrfs_disk_key disk_key;
1147 orig_slot += left_nr;
1148 btrfs_node_key(mid, &disk_key, 0);
1149 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1150 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1152 btrfs_set_node_key(parent, &disk_key, pslot);
1153 btrfs_mark_buffer_dirty(parent);
1154 if (btrfs_header_nritems(left) > orig_slot) {
1155 path->nodes[level] = left;
1156 path->slots[level + 1] -= 1;
1157 path->slots[level] = orig_slot;
1158 btrfs_tree_unlock(mid);
1159 free_extent_buffer(mid);
1162 btrfs_header_nritems(left);
1163 path->slots[level] = orig_slot;
1164 btrfs_tree_unlock(left);
1165 free_extent_buffer(left);
1169 btrfs_tree_unlock(left);
1170 free_extent_buffer(left);
1172 right = btrfs_read_node_slot(parent, pslot + 1);
1177 * then try to empty the right most buffer into the middle
1182 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1184 right_nr = btrfs_header_nritems(right);
1185 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1188 ret = btrfs_cow_block(trans, root, right,
1190 &right, BTRFS_NESTING_RIGHT_COW);
1194 wret = balance_node_right(trans, right, mid);
1200 struct btrfs_disk_key disk_key;
1202 btrfs_node_key(right, &disk_key, 0);
1203 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1204 BTRFS_MOD_LOG_KEY_REPLACE, GFP_NOFS);
1206 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1207 btrfs_mark_buffer_dirty(parent);
1209 if (btrfs_header_nritems(mid) <= orig_slot) {
1210 path->nodes[level] = right;
1211 path->slots[level + 1] += 1;
1212 path->slots[level] = orig_slot -
1213 btrfs_header_nritems(mid);
1214 btrfs_tree_unlock(mid);
1215 free_extent_buffer(mid);
1217 btrfs_tree_unlock(right);
1218 free_extent_buffer(right);
1222 btrfs_tree_unlock(right);
1223 free_extent_buffer(right);
1229 * readahead one full node of leaves, finding things that are close
1230 * to the block in 'slot', and triggering ra on them.
1232 static void reada_for_search(struct btrfs_fs_info *fs_info,
1233 struct btrfs_path *path,
1234 int level, int slot, u64 objectid)
1236 struct extent_buffer *node;
1237 struct btrfs_disk_key disk_key;
1247 if (level != 1 && path->reada != READA_FORWARD_ALWAYS)
1250 if (!path->nodes[level])
1253 node = path->nodes[level];
1256 * Since the time between visiting leaves is much shorter than the time
1257 * between visiting nodes, limit read ahead of nodes to 1, to avoid too
1258 * much IO at once (possibly random).
1260 if (path->reada == READA_FORWARD_ALWAYS) {
1262 nread_max = node->fs_info->nodesize;
1264 nread_max = SZ_128K;
1269 search = btrfs_node_blockptr(node, slot);
1270 blocksize = fs_info->nodesize;
1271 if (path->reada != READA_FORWARD_ALWAYS) {
1272 struct extent_buffer *eb;
1274 eb = find_extent_buffer(fs_info, search);
1276 free_extent_buffer(eb);
1283 nritems = btrfs_header_nritems(node);
1287 if (path->reada == READA_BACK) {
1291 } else if (path->reada == READA_FORWARD ||
1292 path->reada == READA_FORWARD_ALWAYS) {
1297 if (path->reada == READA_BACK && objectid) {
1298 btrfs_node_key(node, &disk_key, nr);
1299 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1302 search = btrfs_node_blockptr(node, nr);
1303 if (path->reada == READA_FORWARD_ALWAYS ||
1304 (search <= target && target - search <= 65536) ||
1305 (search > target && search - target <= 65536)) {
1306 btrfs_readahead_node_child(node, nr);
1310 if (nread > nread_max || nscan > 32)
1315 static noinline void reada_for_balance(struct btrfs_path *path, int level)
1317 struct extent_buffer *parent;
1321 parent = path->nodes[level + 1];
1325 nritems = btrfs_header_nritems(parent);
1326 slot = path->slots[level + 1];
1329 btrfs_readahead_node_child(parent, slot - 1);
1330 if (slot + 1 < nritems)
1331 btrfs_readahead_node_child(parent, slot + 1);
1336 * when we walk down the tree, it is usually safe to unlock the higher layers
1337 * in the tree. The exceptions are when our path goes through slot 0, because
1338 * operations on the tree might require changing key pointers higher up in the
1341 * callers might also have set path->keep_locks, which tells this code to keep
1342 * the lock if the path points to the last slot in the block. This is part of
1343 * walking through the tree, and selecting the next slot in the higher block.
1345 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1346 * if lowest_unlock is 1, level 0 won't be unlocked
1348 static noinline void unlock_up(struct btrfs_path *path, int level,
1349 int lowest_unlock, int min_write_lock_level,
1350 int *write_lock_level)
1353 int skip_level = level;
1354 bool check_skip = true;
1356 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1357 if (!path->nodes[i])
1359 if (!path->locks[i])
1363 if (path->slots[i] == 0) {
1368 if (path->keep_locks) {
1371 nritems = btrfs_header_nritems(path->nodes[i]);
1372 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1379 if (i >= lowest_unlock && i > skip_level) {
1381 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
1383 if (write_lock_level &&
1384 i > min_write_lock_level &&
1385 i <= *write_lock_level) {
1386 *write_lock_level = i - 1;
1393 * helper function for btrfs_search_slot. The goal is to find a block
1394 * in cache without setting the path to blocking. If we find the block
1395 * we return zero and the path is unchanged.
1397 * If we can't find the block, we set the path blocking and do some
1398 * reada. -EAGAIN is returned and the search must be repeated.
1401 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
1402 struct extent_buffer **eb_ret, int level, int slot,
1403 const struct btrfs_key *key)
1405 struct btrfs_fs_info *fs_info = root->fs_info;
1408 struct extent_buffer *tmp;
1409 struct btrfs_key first_key;
1413 blocknr = btrfs_node_blockptr(*eb_ret, slot);
1414 gen = btrfs_node_ptr_generation(*eb_ret, slot);
1415 parent_level = btrfs_header_level(*eb_ret);
1416 btrfs_node_key_to_cpu(*eb_ret, &first_key, slot);
1418 tmp = find_extent_buffer(fs_info, blocknr);
1420 if (p->reada == READA_FORWARD_ALWAYS)
1421 reada_for_search(fs_info, p, level, slot, key->objectid);
1423 /* first we do an atomic uptodate check */
1424 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
1426 * Do extra check for first_key, eb can be stale due to
1427 * being cached, read from scrub, or have multiple
1428 * parents (shared tree blocks).
1430 if (btrfs_verify_level_key(tmp,
1431 parent_level - 1, &first_key, gen)) {
1432 free_extent_buffer(tmp);
1439 /* now we're allowed to do a blocking uptodate check */
1440 ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
1442 free_extent_buffer(tmp);
1443 btrfs_release_path(p);
1451 * reduce lock contention at high levels
1452 * of the btree by dropping locks before
1453 * we read. Don't release the lock on the current
1454 * level because we need to walk this node to figure
1455 * out which blocks to read.
1457 btrfs_unlock_up_safe(p, level + 1);
1459 if (p->reada != READA_NONE)
1460 reada_for_search(fs_info, p, level, slot, key->objectid);
1463 tmp = read_tree_block(fs_info, blocknr, root->root_key.objectid,
1464 gen, parent_level - 1, &first_key);
1466 btrfs_release_path(p);
1467 return PTR_ERR(tmp);
1470 * If the read above didn't mark this buffer up to date,
1471 * it will never end up being up to date. Set ret to EIO now
1472 * and give up so that our caller doesn't loop forever
1475 if (!extent_buffer_uptodate(tmp))
1477 free_extent_buffer(tmp);
1479 btrfs_release_path(p);
1484 * helper function for btrfs_search_slot. This does all of the checks
1485 * for node-level blocks and does any balancing required based on
1488 * If no extra work was required, zero is returned. If we had to
1489 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1493 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1494 struct btrfs_root *root, struct btrfs_path *p,
1495 struct extent_buffer *b, int level, int ins_len,
1496 int *write_lock_level)
1498 struct btrfs_fs_info *fs_info = root->fs_info;
1501 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1502 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
1504 if (*write_lock_level < level + 1) {
1505 *write_lock_level = level + 1;
1506 btrfs_release_path(p);
1510 reada_for_balance(p, level);
1511 ret = split_node(trans, root, p, level);
1513 b = p->nodes[level];
1514 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1515 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
1517 if (*write_lock_level < level + 1) {
1518 *write_lock_level = level + 1;
1519 btrfs_release_path(p);
1523 reada_for_balance(p, level);
1524 ret = balance_level(trans, root, p, level);
1528 b = p->nodes[level];
1530 btrfs_release_path(p);
1533 BUG_ON(btrfs_header_nritems(b) == 1);
1538 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1539 u64 iobjectid, u64 ioff, u8 key_type,
1540 struct btrfs_key *found_key)
1543 struct btrfs_key key;
1544 struct extent_buffer *eb;
1549 key.type = key_type;
1550 key.objectid = iobjectid;
1553 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1557 eb = path->nodes[0];
1558 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1559 ret = btrfs_next_leaf(fs_root, path);
1562 eb = path->nodes[0];
1565 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1566 if (found_key->type != key.type ||
1567 found_key->objectid != key.objectid)
1573 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
1574 struct btrfs_path *p,
1575 int write_lock_level)
1577 struct extent_buffer *b;
1581 if (p->search_commit_root) {
1582 b = root->commit_root;
1583 atomic_inc(&b->refs);
1584 level = btrfs_header_level(b);
1586 * Ensure that all callers have set skip_locking when
1587 * p->search_commit_root = 1.
1589 ASSERT(p->skip_locking == 1);
1594 if (p->skip_locking) {
1595 b = btrfs_root_node(root);
1596 level = btrfs_header_level(b);
1600 /* We try very hard to do read locks on the root */
1601 root_lock = BTRFS_READ_LOCK;
1604 * If the level is set to maximum, we can skip trying to get the read
1607 if (write_lock_level < BTRFS_MAX_LEVEL) {
1609 * We don't know the level of the root node until we actually
1610 * have it read locked
1612 b = btrfs_read_lock_root_node(root);
1613 level = btrfs_header_level(b);
1614 if (level > write_lock_level)
1617 /* Whoops, must trade for write lock */
1618 btrfs_tree_read_unlock(b);
1619 free_extent_buffer(b);
1622 b = btrfs_lock_root_node(root);
1623 root_lock = BTRFS_WRITE_LOCK;
1625 /* The level might have changed, check again */
1626 level = btrfs_header_level(b);
1630 * The root may have failed to write out at some point, and thus is no
1631 * longer valid, return an error in this case.
1633 if (!extent_buffer_uptodate(b)) {
1635 btrfs_tree_unlock_rw(b, root_lock);
1636 free_extent_buffer(b);
1637 return ERR_PTR(-EIO);
1640 p->nodes[level] = b;
1641 if (!p->skip_locking)
1642 p->locks[level] = root_lock;
1644 * Callers are responsible for dropping b's references.
1650 * Replace the extent buffer at the lowest level of the path with a cloned
1651 * version. The purpose is to be able to use it safely, after releasing the
1652 * commit root semaphore, even if relocation is happening in parallel, the
1653 * transaction used for relocation is committed and the extent buffer is
1654 * reallocated in the next transaction.
1656 * This is used in a context where the caller does not prevent transaction
1657 * commits from happening, either by holding a transaction handle or holding
1658 * some lock, while it's doing searches through a commit root.
1659 * At the moment it's only used for send operations.
1661 static int finish_need_commit_sem_search(struct btrfs_path *path)
1663 const int i = path->lowest_level;
1664 const int slot = path->slots[i];
1665 struct extent_buffer *lowest = path->nodes[i];
1666 struct extent_buffer *clone;
1668 ASSERT(path->need_commit_sem);
1673 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem);
1675 clone = btrfs_clone_extent_buffer(lowest);
1679 btrfs_release_path(path);
1680 path->nodes[i] = clone;
1681 path->slots[i] = slot;
1686 static inline int search_for_key_slot(struct extent_buffer *eb,
1687 int search_low_slot,
1688 const struct btrfs_key *key,
1693 * If a previous call to btrfs_bin_search() on a parent node returned an
1694 * exact match (prev_cmp == 0), we can safely assume the target key will
1695 * always be at slot 0 on lower levels, since each key pointer
1696 * (struct btrfs_key_ptr) refers to the lowest key accessible from the
1697 * subtree it points to. Thus we can skip searching lower levels.
1699 if (prev_cmp == 0) {
1704 return generic_bin_search(eb, search_low_slot, key, slot);
1707 static int search_leaf(struct btrfs_trans_handle *trans,
1708 struct btrfs_root *root,
1709 const struct btrfs_key *key,
1710 struct btrfs_path *path,
1714 struct extent_buffer *leaf = path->nodes[0];
1715 int leaf_free_space = -1;
1716 int search_low_slot = 0;
1718 bool do_bin_search = true;
1721 * If we are doing an insertion, the leaf has enough free space and the
1722 * destination slot for the key is not slot 0, then we can unlock our
1723 * write lock on the parent, and any other upper nodes, before doing the
1724 * binary search on the leaf (with search_for_key_slot()), allowing other
1725 * tasks to lock the parent and any other upper nodes.
1729 * Cache the leaf free space, since we will need it later and it
1730 * will not change until then.
1732 leaf_free_space = btrfs_leaf_free_space(leaf);
1735 * !path->locks[1] means we have a single node tree, the leaf is
1736 * the root of the tree.
1738 if (path->locks[1] && leaf_free_space >= ins_len) {
1739 struct btrfs_disk_key first_key;
1741 ASSERT(btrfs_header_nritems(leaf) > 0);
1742 btrfs_item_key(leaf, &first_key, 0);
1745 * Doing the extra comparison with the first key is cheap,
1746 * taking into account that the first key is very likely
1747 * already in a cache line because it immediately follows
1748 * the extent buffer's header and we have recently accessed
1749 * the header's level field.
1751 ret = comp_keys(&first_key, key);
1754 * The first key is smaller than the key we want
1755 * to insert, so we are safe to unlock all upper
1756 * nodes and we have to do the binary search.
1758 * We do use btrfs_unlock_up_safe() and not
1759 * unlock_up() because the later does not unlock
1760 * nodes with a slot of 0 - we can safely unlock
1761 * any node even if its slot is 0 since in this
1762 * case the key does not end up at slot 0 of the
1763 * leaf and there's no need to split the leaf.
1765 btrfs_unlock_up_safe(path, 1);
1766 search_low_slot = 1;
1769 * The first key is >= then the key we want to
1770 * insert, so we can skip the binary search as
1771 * the target key will be at slot 0.
1773 * We can not unlock upper nodes when the key is
1774 * less than the first key, because we will need
1775 * to update the key at slot 0 of the parent node
1776 * and possibly of other upper nodes too.
1777 * If the key matches the first key, then we can
1778 * unlock all the upper nodes, using
1779 * btrfs_unlock_up_safe() instead of unlock_up()
1783 btrfs_unlock_up_safe(path, 1);
1785 * ret is already 0 or 1, matching the result of
1786 * a btrfs_bin_search() call, so there is no need
1789 do_bin_search = false;
1795 if (do_bin_search) {
1796 ret = search_for_key_slot(leaf, search_low_slot, key,
1797 prev_cmp, &path->slots[0]);
1804 * Item key already exists. In this case, if we are allowed to
1805 * insert the item (for example, in dir_item case, item key
1806 * collision is allowed), it will be merged with the original
1807 * item. Only the item size grows, no new btrfs item will be
1808 * added. If search_for_extension is not set, ins_len already
1809 * accounts the size btrfs_item, deduct it here so leaf space
1810 * check will be correct.
1812 if (ret == 0 && !path->search_for_extension) {
1813 ASSERT(ins_len >= sizeof(struct btrfs_item));
1814 ins_len -= sizeof(struct btrfs_item);
1817 ASSERT(leaf_free_space >= 0);
1819 if (leaf_free_space < ins_len) {
1822 err = split_leaf(trans, root, key, path, ins_len,
1825 if (WARN_ON(err > 0))
1836 * btrfs_search_slot - look for a key in a tree and perform necessary
1837 * modifications to preserve tree invariants.
1839 * @trans: Handle of transaction, used when modifying the tree
1840 * @p: Holds all btree nodes along the search path
1841 * @root: The root node of the tree
1842 * @key: The key we are looking for
1843 * @ins_len: Indicates purpose of search:
1844 * >0 for inserts it's size of item inserted (*)
1846 * 0 for plain searches, not modifying the tree
1848 * (*) If size of item inserted doesn't include
1849 * sizeof(struct btrfs_item), then p->search_for_extension must
1851 * @cow: boolean should CoW operations be performed. Must always be 1
1852 * when modifying the tree.
1854 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
1855 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
1857 * If @key is found, 0 is returned and you can find the item in the leaf level
1858 * of the path (level 0)
1860 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
1861 * points to the slot where it should be inserted
1863 * If an error is encountered while searching the tree a negative error number
1866 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1867 const struct btrfs_key *key, struct btrfs_path *p,
1868 int ins_len, int cow)
1870 struct btrfs_fs_info *fs_info = root->fs_info;
1871 struct extent_buffer *b;
1876 int lowest_unlock = 1;
1877 /* everything at write_lock_level or lower must be write locked */
1878 int write_lock_level = 0;
1879 u8 lowest_level = 0;
1880 int min_write_lock_level;
1883 lowest_level = p->lowest_level;
1884 WARN_ON(lowest_level && ins_len > 0);
1885 WARN_ON(p->nodes[0] != NULL);
1886 BUG_ON(!cow && ins_len);
1891 /* when we are removing items, we might have to go up to level
1892 * two as we update tree pointers Make sure we keep write
1893 * for those levels as well
1895 write_lock_level = 2;
1896 } else if (ins_len > 0) {
1898 * for inserting items, make sure we have a write lock on
1899 * level 1 so we can update keys
1901 write_lock_level = 1;
1905 write_lock_level = -1;
1907 if (cow && (p->keep_locks || p->lowest_level))
1908 write_lock_level = BTRFS_MAX_LEVEL;
1910 min_write_lock_level = write_lock_level;
1912 if (p->need_commit_sem) {
1913 ASSERT(p->search_commit_root);
1914 down_read(&fs_info->commit_root_sem);
1919 b = btrfs_search_slot_get_root(root, p, write_lock_level);
1928 level = btrfs_header_level(b);
1931 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
1934 * if we don't really need to cow this block
1935 * then we don't want to set the path blocking,
1936 * so we test it here
1938 if (!should_cow_block(trans, root, b))
1942 * must have write locks on this node and the
1945 if (level > write_lock_level ||
1946 (level + 1 > write_lock_level &&
1947 level + 1 < BTRFS_MAX_LEVEL &&
1948 p->nodes[level + 1])) {
1949 write_lock_level = level + 1;
1950 btrfs_release_path(p);
1955 err = btrfs_cow_block(trans, root, b, NULL, 0,
1959 err = btrfs_cow_block(trans, root, b,
1960 p->nodes[level + 1],
1961 p->slots[level + 1], &b,
1969 p->nodes[level] = b;
1972 * we have a lock on b and as long as we aren't changing
1973 * the tree, there is no way to for the items in b to change.
1974 * It is safe to drop the lock on our parent before we
1975 * go through the expensive btree search on b.
1977 * If we're inserting or deleting (ins_len != 0), then we might
1978 * be changing slot zero, which may require changing the parent.
1979 * So, we can't drop the lock until after we know which slot
1980 * we're operating on.
1982 if (!ins_len && !p->keep_locks) {
1985 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
1986 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
1993 ASSERT(write_lock_level >= 1);
1995 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp);
1996 if (!p->search_for_split)
1997 unlock_up(p, level, lowest_unlock,
1998 min_write_lock_level, NULL);
2002 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot);
2007 if (ret && slot > 0) {
2011 p->slots[level] = slot;
2012 err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
2020 b = p->nodes[level];
2021 slot = p->slots[level];
2024 * Slot 0 is special, if we change the key we have to update
2025 * the parent pointer which means we must have a write lock on
2028 if (slot == 0 && ins_len && write_lock_level < level + 1) {
2029 write_lock_level = level + 1;
2030 btrfs_release_path(p);
2034 unlock_up(p, level, lowest_unlock, min_write_lock_level,
2037 if (level == lowest_level) {
2043 err = read_block_for_search(root, p, &b, level, slot, key);
2051 if (!p->skip_locking) {
2052 level = btrfs_header_level(b);
2053 if (level <= write_lock_level) {
2055 p->locks[level] = BTRFS_WRITE_LOCK;
2057 btrfs_tree_read_lock(b);
2058 p->locks[level] = BTRFS_READ_LOCK;
2060 p->nodes[level] = b;
2065 if (ret < 0 && !p->skip_release_on_error)
2066 btrfs_release_path(p);
2068 if (p->need_commit_sem) {
2071 ret2 = finish_need_commit_sem_search(p);
2072 up_read(&fs_info->commit_root_sem);
2079 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
2082 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2083 * current state of the tree together with the operations recorded in the tree
2084 * modification log to search for the key in a previous version of this tree, as
2085 * denoted by the time_seq parameter.
2087 * Naturally, there is no support for insert, delete or cow operations.
2089 * The resulting path and return value will be set up as if we called
2090 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2092 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2093 struct btrfs_path *p, u64 time_seq)
2095 struct btrfs_fs_info *fs_info = root->fs_info;
2096 struct extent_buffer *b;
2101 int lowest_unlock = 1;
2102 u8 lowest_level = 0;
2104 lowest_level = p->lowest_level;
2105 WARN_ON(p->nodes[0] != NULL);
2107 if (p->search_commit_root) {
2109 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2113 b = btrfs_get_old_root(root, time_seq);
2118 level = btrfs_header_level(b);
2119 p->locks[level] = BTRFS_READ_LOCK;
2124 level = btrfs_header_level(b);
2125 p->nodes[level] = b;
2128 * we have a lock on b and as long as we aren't changing
2129 * the tree, there is no way to for the items in b to change.
2130 * It is safe to drop the lock on our parent before we
2131 * go through the expensive btree search on b.
2133 btrfs_unlock_up_safe(p, level + 1);
2135 ret = btrfs_bin_search(b, key, &slot);
2140 p->slots[level] = slot;
2141 unlock_up(p, level, lowest_unlock, 0, NULL);
2145 if (ret && slot > 0) {
2149 p->slots[level] = slot;
2150 unlock_up(p, level, lowest_unlock, 0, NULL);
2152 if (level == lowest_level) {
2158 err = read_block_for_search(root, p, &b, level, slot, key);
2166 level = btrfs_header_level(b);
2167 btrfs_tree_read_lock(b);
2168 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq);
2173 p->locks[level] = BTRFS_READ_LOCK;
2174 p->nodes[level] = b;
2179 btrfs_release_path(p);
2185 * helper to use instead of search slot if no exact match is needed but
2186 * instead the next or previous item should be returned.
2187 * When find_higher is true, the next higher item is returned, the next lower
2189 * When return_any and find_higher are both true, and no higher item is found,
2190 * return the next lower instead.
2191 * When return_any is true and find_higher is false, and no lower item is found,
2192 * return the next higher instead.
2193 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2196 int btrfs_search_slot_for_read(struct btrfs_root *root,
2197 const struct btrfs_key *key,
2198 struct btrfs_path *p, int find_higher,
2202 struct extent_buffer *leaf;
2205 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2209 * a return value of 1 means the path is at the position where the
2210 * item should be inserted. Normally this is the next bigger item,
2211 * but in case the previous item is the last in a leaf, path points
2212 * to the first free slot in the previous leaf, i.e. at an invalid
2218 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2219 ret = btrfs_next_leaf(root, p);
2225 * no higher item found, return the next
2230 btrfs_release_path(p);
2234 if (p->slots[0] == 0) {
2235 ret = btrfs_prev_leaf(root, p);
2240 if (p->slots[0] == btrfs_header_nritems(leaf))
2247 * no lower item found, return the next
2252 btrfs_release_path(p);
2262 * Execute search and call btrfs_previous_item to traverse backwards if the item
2265 * Return 0 if found, 1 if not found and < 0 if error.
2267 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
2268 struct btrfs_path *path)
2272 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
2274 ret = btrfs_previous_item(root, path, key->objectid, key->type);
2277 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]);
2283 * adjust the pointers going up the tree, starting at level
2284 * making sure the right key of each node is points to 'key'.
2285 * This is used after shifting pointers to the left, so it stops
2286 * fixing up pointers when a given leaf/node is not in slot 0 of the
2290 static void fixup_low_keys(struct btrfs_path *path,
2291 struct btrfs_disk_key *key, int level)
2294 struct extent_buffer *t;
2297 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2298 int tslot = path->slots[i];
2300 if (!path->nodes[i])
2303 ret = btrfs_tree_mod_log_insert_key(t, tslot,
2304 BTRFS_MOD_LOG_KEY_REPLACE, GFP_ATOMIC);
2306 btrfs_set_node_key(t, key, tslot);
2307 btrfs_mark_buffer_dirty(path->nodes[i]);
2316 * This function isn't completely safe. It's the caller's responsibility
2317 * that the new key won't break the order
2319 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
2320 struct btrfs_path *path,
2321 const struct btrfs_key *new_key)
2323 struct btrfs_disk_key disk_key;
2324 struct extent_buffer *eb;
2327 eb = path->nodes[0];
2328 slot = path->slots[0];
2330 btrfs_item_key(eb, &disk_key, slot - 1);
2331 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
2333 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2334 slot, btrfs_disk_key_objectid(&disk_key),
2335 btrfs_disk_key_type(&disk_key),
2336 btrfs_disk_key_offset(&disk_key),
2337 new_key->objectid, new_key->type,
2339 btrfs_print_leaf(eb);
2343 if (slot < btrfs_header_nritems(eb) - 1) {
2344 btrfs_item_key(eb, &disk_key, slot + 1);
2345 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
2347 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2348 slot, btrfs_disk_key_objectid(&disk_key),
2349 btrfs_disk_key_type(&disk_key),
2350 btrfs_disk_key_offset(&disk_key),
2351 new_key->objectid, new_key->type,
2353 btrfs_print_leaf(eb);
2358 btrfs_cpu_key_to_disk(&disk_key, new_key);
2359 btrfs_set_item_key(eb, &disk_key, slot);
2360 btrfs_mark_buffer_dirty(eb);
2362 fixup_low_keys(path, &disk_key, 1);
2366 * Check key order of two sibling extent buffers.
2368 * Return true if something is wrong.
2369 * Return false if everything is fine.
2371 * Tree-checker only works inside one tree block, thus the following
2372 * corruption can not be detected by tree-checker:
2374 * Leaf @left | Leaf @right
2375 * --------------------------------------------------------------
2376 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 |
2378 * Key f6 in leaf @left itself is valid, but not valid when the next
2379 * key in leaf @right is 7.
2380 * This can only be checked at tree block merge time.
2381 * And since tree checker has ensured all key order in each tree block
2382 * is correct, we only need to bother the last key of @left and the first
2385 static bool check_sibling_keys(struct extent_buffer *left,
2386 struct extent_buffer *right)
2388 struct btrfs_key left_last;
2389 struct btrfs_key right_first;
2390 int level = btrfs_header_level(left);
2391 int nr_left = btrfs_header_nritems(left);
2392 int nr_right = btrfs_header_nritems(right);
2394 /* No key to check in one of the tree blocks */
2395 if (!nr_left || !nr_right)
2399 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1);
2400 btrfs_node_key_to_cpu(right, &right_first, 0);
2402 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1);
2403 btrfs_item_key_to_cpu(right, &right_first, 0);
2406 if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) {
2407 btrfs_crit(left->fs_info,
2408 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
2409 left_last.objectid, left_last.type,
2410 left_last.offset, right_first.objectid,
2411 right_first.type, right_first.offset);
2418 * try to push data from one node into the next node left in the
2421 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2422 * error, and > 0 if there was no room in the left hand block.
2424 static int push_node_left(struct btrfs_trans_handle *trans,
2425 struct extent_buffer *dst,
2426 struct extent_buffer *src, int empty)
2428 struct btrfs_fs_info *fs_info = trans->fs_info;
2434 src_nritems = btrfs_header_nritems(src);
2435 dst_nritems = btrfs_header_nritems(dst);
2436 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2437 WARN_ON(btrfs_header_generation(src) != trans->transid);
2438 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2440 if (!empty && src_nritems <= 8)
2443 if (push_items <= 0)
2447 push_items = min(src_nritems, push_items);
2448 if (push_items < src_nritems) {
2449 /* leave at least 8 pointers in the node if
2450 * we aren't going to empty it
2452 if (src_nritems - push_items < 8) {
2453 if (push_items <= 8)
2459 push_items = min(src_nritems - 8, push_items);
2461 /* dst is the left eb, src is the middle eb */
2462 if (check_sibling_keys(dst, src)) {
2464 btrfs_abort_transaction(trans, ret);
2467 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
2469 btrfs_abort_transaction(trans, ret);
2472 copy_extent_buffer(dst, src,
2473 btrfs_node_key_ptr_offset(dst_nritems),
2474 btrfs_node_key_ptr_offset(0),
2475 push_items * sizeof(struct btrfs_key_ptr));
2477 if (push_items < src_nritems) {
2479 * Don't call btrfs_tree_mod_log_insert_move() here, key removal
2480 * was already fully logged by btrfs_tree_mod_log_eb_copy() above.
2482 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2483 btrfs_node_key_ptr_offset(push_items),
2484 (src_nritems - push_items) *
2485 sizeof(struct btrfs_key_ptr));
2487 btrfs_set_header_nritems(src, src_nritems - push_items);
2488 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2489 btrfs_mark_buffer_dirty(src);
2490 btrfs_mark_buffer_dirty(dst);
2496 * try to push data from one node into the next node right in the
2499 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2500 * error, and > 0 if there was no room in the right hand block.
2502 * this will only push up to 1/2 the contents of the left node over
2504 static int balance_node_right(struct btrfs_trans_handle *trans,
2505 struct extent_buffer *dst,
2506 struct extent_buffer *src)
2508 struct btrfs_fs_info *fs_info = trans->fs_info;
2515 WARN_ON(btrfs_header_generation(src) != trans->transid);
2516 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2518 src_nritems = btrfs_header_nritems(src);
2519 dst_nritems = btrfs_header_nritems(dst);
2520 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2521 if (push_items <= 0)
2524 if (src_nritems < 4)
2527 max_push = src_nritems / 2 + 1;
2528 /* don't try to empty the node */
2529 if (max_push >= src_nritems)
2532 if (max_push < push_items)
2533 push_items = max_push;
2535 /* dst is the right eb, src is the middle eb */
2536 if (check_sibling_keys(src, dst)) {
2538 btrfs_abort_transaction(trans, ret);
2541 ret = btrfs_tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
2543 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2544 btrfs_node_key_ptr_offset(0),
2546 sizeof(struct btrfs_key_ptr));
2548 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
2551 btrfs_abort_transaction(trans, ret);
2554 copy_extent_buffer(dst, src,
2555 btrfs_node_key_ptr_offset(0),
2556 btrfs_node_key_ptr_offset(src_nritems - push_items),
2557 push_items * sizeof(struct btrfs_key_ptr));
2559 btrfs_set_header_nritems(src, src_nritems - push_items);
2560 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2562 btrfs_mark_buffer_dirty(src);
2563 btrfs_mark_buffer_dirty(dst);
2569 * helper function to insert a new root level in the tree.
2570 * A new node is allocated, and a single item is inserted to
2571 * point to the existing root
2573 * returns zero on success or < 0 on failure.
2575 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2576 struct btrfs_root *root,
2577 struct btrfs_path *path, int level)
2579 struct btrfs_fs_info *fs_info = root->fs_info;
2581 struct extent_buffer *lower;
2582 struct extent_buffer *c;
2583 struct extent_buffer *old;
2584 struct btrfs_disk_key lower_key;
2587 BUG_ON(path->nodes[level]);
2588 BUG_ON(path->nodes[level-1] != root->node);
2590 lower = path->nodes[level-1];
2592 btrfs_item_key(lower, &lower_key, 0);
2594 btrfs_node_key(lower, &lower_key, 0);
2596 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2597 &lower_key, level, root->node->start, 0,
2598 BTRFS_NESTING_NEW_ROOT);
2602 root_add_used(root, fs_info->nodesize);
2604 btrfs_set_header_nritems(c, 1);
2605 btrfs_set_node_key(c, &lower_key, 0);
2606 btrfs_set_node_blockptr(c, 0, lower->start);
2607 lower_gen = btrfs_header_generation(lower);
2608 WARN_ON(lower_gen != trans->transid);
2610 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2612 btrfs_mark_buffer_dirty(c);
2615 ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
2617 rcu_assign_pointer(root->node, c);
2619 /* the super has an extra ref to root->node */
2620 free_extent_buffer(old);
2622 add_root_to_dirty_list(root);
2623 atomic_inc(&c->refs);
2624 path->nodes[level] = c;
2625 path->locks[level] = BTRFS_WRITE_LOCK;
2626 path->slots[level] = 0;
2631 * worker function to insert a single pointer in a node.
2632 * the node should have enough room for the pointer already
2634 * slot and level indicate where you want the key to go, and
2635 * blocknr is the block the key points to.
2637 static void insert_ptr(struct btrfs_trans_handle *trans,
2638 struct btrfs_path *path,
2639 struct btrfs_disk_key *key, u64 bytenr,
2640 int slot, int level)
2642 struct extent_buffer *lower;
2646 BUG_ON(!path->nodes[level]);
2647 btrfs_assert_tree_write_locked(path->nodes[level]);
2648 lower = path->nodes[level];
2649 nritems = btrfs_header_nritems(lower);
2650 BUG_ON(slot > nritems);
2651 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
2652 if (slot != nritems) {
2654 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1,
2655 slot, nritems - slot);
2658 memmove_extent_buffer(lower,
2659 btrfs_node_key_ptr_offset(slot + 1),
2660 btrfs_node_key_ptr_offset(slot),
2661 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2664 ret = btrfs_tree_mod_log_insert_key(lower, slot,
2665 BTRFS_MOD_LOG_KEY_ADD, GFP_NOFS);
2668 btrfs_set_node_key(lower, key, slot);
2669 btrfs_set_node_blockptr(lower, slot, bytenr);
2670 WARN_ON(trans->transid == 0);
2671 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2672 btrfs_set_header_nritems(lower, nritems + 1);
2673 btrfs_mark_buffer_dirty(lower);
2677 * split the node at the specified level in path in two.
2678 * The path is corrected to point to the appropriate node after the split
2680 * Before splitting this tries to make some room in the node by pushing
2681 * left and right, if either one works, it returns right away.
2683 * returns 0 on success and < 0 on failure
2685 static noinline int split_node(struct btrfs_trans_handle *trans,
2686 struct btrfs_root *root,
2687 struct btrfs_path *path, int level)
2689 struct btrfs_fs_info *fs_info = root->fs_info;
2690 struct extent_buffer *c;
2691 struct extent_buffer *split;
2692 struct btrfs_disk_key disk_key;
2697 c = path->nodes[level];
2698 WARN_ON(btrfs_header_generation(c) != trans->transid);
2699 if (c == root->node) {
2701 * trying to split the root, lets make a new one
2703 * tree mod log: We don't log_removal old root in
2704 * insert_new_root, because that root buffer will be kept as a
2705 * normal node. We are going to log removal of half of the
2706 * elements below with btrfs_tree_mod_log_eb_copy(). We're
2707 * holding a tree lock on the buffer, which is why we cannot
2708 * race with other tree_mod_log users.
2710 ret = insert_new_root(trans, root, path, level + 1);
2714 ret = push_nodes_for_insert(trans, root, path, level);
2715 c = path->nodes[level];
2716 if (!ret && btrfs_header_nritems(c) <
2717 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
2723 c_nritems = btrfs_header_nritems(c);
2724 mid = (c_nritems + 1) / 2;
2725 btrfs_node_key(c, &disk_key, mid);
2727 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2728 &disk_key, level, c->start, 0,
2729 BTRFS_NESTING_SPLIT);
2731 return PTR_ERR(split);
2733 root_add_used(root, fs_info->nodesize);
2734 ASSERT(btrfs_header_level(c) == level);
2736 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
2738 btrfs_abort_transaction(trans, ret);
2741 copy_extent_buffer(split, c,
2742 btrfs_node_key_ptr_offset(0),
2743 btrfs_node_key_ptr_offset(mid),
2744 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2745 btrfs_set_header_nritems(split, c_nritems - mid);
2746 btrfs_set_header_nritems(c, mid);
2748 btrfs_mark_buffer_dirty(c);
2749 btrfs_mark_buffer_dirty(split);
2751 insert_ptr(trans, path, &disk_key, split->start,
2752 path->slots[level + 1] + 1, level + 1);
2754 if (path->slots[level] >= mid) {
2755 path->slots[level] -= mid;
2756 btrfs_tree_unlock(c);
2757 free_extent_buffer(c);
2758 path->nodes[level] = split;
2759 path->slots[level + 1] += 1;
2761 btrfs_tree_unlock(split);
2762 free_extent_buffer(split);
2768 * how many bytes are required to store the items in a leaf. start
2769 * and nr indicate which items in the leaf to check. This totals up the
2770 * space used both by the item structs and the item data
2772 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2775 int nritems = btrfs_header_nritems(l);
2776 int end = min(nritems, start + nr) - 1;
2780 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start);
2781 data_len = data_len - btrfs_item_offset(l, end);
2782 data_len += sizeof(struct btrfs_item) * nr;
2783 WARN_ON(data_len < 0);
2788 * The space between the end of the leaf items and
2789 * the start of the leaf data. IOW, how much room
2790 * the leaf has left for both items and data
2792 noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
2794 struct btrfs_fs_info *fs_info = leaf->fs_info;
2795 int nritems = btrfs_header_nritems(leaf);
2798 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
2801 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
2803 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
2804 leaf_space_used(leaf, 0, nritems), nritems);
2810 * min slot controls the lowest index we're willing to push to the
2811 * right. We'll push up to and including min_slot, but no lower
2813 static noinline int __push_leaf_right(struct btrfs_path *path,
2814 int data_size, int empty,
2815 struct extent_buffer *right,
2816 int free_space, u32 left_nritems,
2819 struct btrfs_fs_info *fs_info = right->fs_info;
2820 struct extent_buffer *left = path->nodes[0];
2821 struct extent_buffer *upper = path->nodes[1];
2822 struct btrfs_map_token token;
2823 struct btrfs_disk_key disk_key;
2836 nr = max_t(u32, 1, min_slot);
2838 if (path->slots[0] >= left_nritems)
2839 push_space += data_size;
2841 slot = path->slots[1];
2842 i = left_nritems - 1;
2844 if (!empty && push_items > 0) {
2845 if (path->slots[0] > i)
2847 if (path->slots[0] == i) {
2848 int space = btrfs_leaf_free_space(left);
2850 if (space + push_space * 2 > free_space)
2855 if (path->slots[0] == i)
2856 push_space += data_size;
2858 this_item_size = btrfs_item_size(left, i);
2859 if (this_item_size + sizeof(struct btrfs_item) +
2860 push_space > free_space)
2864 push_space += this_item_size + sizeof(struct btrfs_item);
2870 if (push_items == 0)
2873 WARN_ON(!empty && push_items == left_nritems);
2875 /* push left to right */
2876 right_nritems = btrfs_header_nritems(right);
2878 push_space = btrfs_item_data_end(left, left_nritems - push_items);
2879 push_space -= leaf_data_end(left);
2881 /* make room in the right data area */
2882 data_end = leaf_data_end(right);
2883 memmove_extent_buffer(right,
2884 BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
2885 BTRFS_LEAF_DATA_OFFSET + data_end,
2886 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
2888 /* copy from the left data area */
2889 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
2890 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
2891 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left),
2894 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2895 btrfs_item_nr_offset(0),
2896 right_nritems * sizeof(struct btrfs_item));
2898 /* copy the items from left to right */
2899 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2900 btrfs_item_nr_offset(left_nritems - push_items),
2901 push_items * sizeof(struct btrfs_item));
2903 /* update the item pointers */
2904 btrfs_init_map_token(&token, right);
2905 right_nritems += push_items;
2906 btrfs_set_header_nritems(right, right_nritems);
2907 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
2908 for (i = 0; i < right_nritems; i++) {
2909 push_space -= btrfs_token_item_size(&token, i);
2910 btrfs_set_token_item_offset(&token, i, push_space);
2913 left_nritems -= push_items;
2914 btrfs_set_header_nritems(left, left_nritems);
2917 btrfs_mark_buffer_dirty(left);
2919 btrfs_clean_tree_block(left);
2921 btrfs_mark_buffer_dirty(right);
2923 btrfs_item_key(right, &disk_key, 0);
2924 btrfs_set_node_key(upper, &disk_key, slot + 1);
2925 btrfs_mark_buffer_dirty(upper);
2927 /* then fixup the leaf pointer in the path */
2928 if (path->slots[0] >= left_nritems) {
2929 path->slots[0] -= left_nritems;
2930 if (btrfs_header_nritems(path->nodes[0]) == 0)
2931 btrfs_clean_tree_block(path->nodes[0]);
2932 btrfs_tree_unlock(path->nodes[0]);
2933 free_extent_buffer(path->nodes[0]);
2934 path->nodes[0] = right;
2935 path->slots[1] += 1;
2937 btrfs_tree_unlock(right);
2938 free_extent_buffer(right);
2943 btrfs_tree_unlock(right);
2944 free_extent_buffer(right);
2949 * push some data in the path leaf to the right, trying to free up at
2950 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2952 * returns 1 if the push failed because the other node didn't have enough
2953 * room, 0 if everything worked out and < 0 if there were major errors.
2955 * this will push starting from min_slot to the end of the leaf. It won't
2956 * push any slot lower than min_slot
2958 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2959 *root, struct btrfs_path *path,
2960 int min_data_size, int data_size,
2961 int empty, u32 min_slot)
2963 struct extent_buffer *left = path->nodes[0];
2964 struct extent_buffer *right;
2965 struct extent_buffer *upper;
2971 if (!path->nodes[1])
2974 slot = path->slots[1];
2975 upper = path->nodes[1];
2976 if (slot >= btrfs_header_nritems(upper) - 1)
2979 btrfs_assert_tree_write_locked(path->nodes[1]);
2981 right = btrfs_read_node_slot(upper, slot + 1);
2983 * slot + 1 is not valid or we fail to read the right node,
2984 * no big deal, just return.
2989 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
2991 free_space = btrfs_leaf_free_space(right);
2992 if (free_space < data_size)
2995 ret = btrfs_cow_block(trans, root, right, upper,
2996 slot + 1, &right, BTRFS_NESTING_RIGHT_COW);
3000 left_nritems = btrfs_header_nritems(left);
3001 if (left_nritems == 0)
3004 if (check_sibling_keys(left, right)) {
3006 btrfs_tree_unlock(right);
3007 free_extent_buffer(right);
3010 if (path->slots[0] == left_nritems && !empty) {
3011 /* Key greater than all keys in the leaf, right neighbor has
3012 * enough room for it and we're not emptying our leaf to delete
3013 * it, therefore use right neighbor to insert the new item and
3014 * no need to touch/dirty our left leaf. */
3015 btrfs_tree_unlock(left);
3016 free_extent_buffer(left);
3017 path->nodes[0] = right;
3023 return __push_leaf_right(path, min_data_size, empty,
3024 right, free_space, left_nritems, min_slot);
3026 btrfs_tree_unlock(right);
3027 free_extent_buffer(right);
3032 * push some data in the path leaf to the left, trying to free up at
3033 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3035 * max_slot can put a limit on how far into the leaf we'll push items. The
3036 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3039 static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
3040 int empty, struct extent_buffer *left,
3041 int free_space, u32 right_nritems,
3044 struct btrfs_fs_info *fs_info = left->fs_info;
3045 struct btrfs_disk_key disk_key;
3046 struct extent_buffer *right = path->nodes[0];
3050 u32 old_left_nritems;
3054 u32 old_left_item_size;
3055 struct btrfs_map_token token;
3058 nr = min(right_nritems, max_slot);
3060 nr = min(right_nritems - 1, max_slot);
3062 for (i = 0; i < nr; i++) {
3063 if (!empty && push_items > 0) {
3064 if (path->slots[0] < i)
3066 if (path->slots[0] == i) {
3067 int space = btrfs_leaf_free_space(right);
3069 if (space + push_space * 2 > free_space)
3074 if (path->slots[0] == i)
3075 push_space += data_size;
3077 this_item_size = btrfs_item_size(right, i);
3078 if (this_item_size + sizeof(struct btrfs_item) + push_space >
3083 push_space += this_item_size + sizeof(struct btrfs_item);
3086 if (push_items == 0) {
3090 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3092 /* push data from right to left */
3093 copy_extent_buffer(left, right,
3094 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3095 btrfs_item_nr_offset(0),
3096 push_items * sizeof(struct btrfs_item));
3098 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3099 btrfs_item_offset(right, push_items - 1);
3101 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3102 leaf_data_end(left) - push_space,
3103 BTRFS_LEAF_DATA_OFFSET +
3104 btrfs_item_offset(right, push_items - 1),
3106 old_left_nritems = btrfs_header_nritems(left);
3107 BUG_ON(old_left_nritems <= 0);
3109 btrfs_init_map_token(&token, left);
3110 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1);
3111 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3114 ioff = btrfs_token_item_offset(&token, i);
3115 btrfs_set_token_item_offset(&token, i,
3116 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
3118 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3120 /* fixup right node */
3121 if (push_items > right_nritems)
3122 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3125 if (push_items < right_nritems) {
3126 push_space = btrfs_item_offset(right, push_items - 1) -
3127 leaf_data_end(right);
3128 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3129 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3130 BTRFS_LEAF_DATA_OFFSET +
3131 leaf_data_end(right), push_space);
3133 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3134 btrfs_item_nr_offset(push_items),
3135 (btrfs_header_nritems(right) - push_items) *
3136 sizeof(struct btrfs_item));
3139 btrfs_init_map_token(&token, right);
3140 right_nritems -= push_items;
3141 btrfs_set_header_nritems(right, right_nritems);
3142 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3143 for (i = 0; i < right_nritems; i++) {
3144 push_space = push_space - btrfs_token_item_size(&token, i);
3145 btrfs_set_token_item_offset(&token, i, push_space);
3148 btrfs_mark_buffer_dirty(left);
3150 btrfs_mark_buffer_dirty(right);
3152 btrfs_clean_tree_block(right);
3154 btrfs_item_key(right, &disk_key, 0);
3155 fixup_low_keys(path, &disk_key, 1);
3157 /* then fixup the leaf pointer in the path */
3158 if (path->slots[0] < push_items) {
3159 path->slots[0] += old_left_nritems;
3160 btrfs_tree_unlock(path->nodes[0]);
3161 free_extent_buffer(path->nodes[0]);
3162 path->nodes[0] = left;
3163 path->slots[1] -= 1;
3165 btrfs_tree_unlock(left);
3166 free_extent_buffer(left);
3167 path->slots[0] -= push_items;
3169 BUG_ON(path->slots[0] < 0);
3172 btrfs_tree_unlock(left);
3173 free_extent_buffer(left);
3178 * push some data in the path leaf to the left, trying to free up at
3179 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3181 * max_slot can put a limit on how far into the leaf we'll push items. The
3182 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3185 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3186 *root, struct btrfs_path *path, int min_data_size,
3187 int data_size, int empty, u32 max_slot)
3189 struct extent_buffer *right = path->nodes[0];
3190 struct extent_buffer *left;
3196 slot = path->slots[1];
3199 if (!path->nodes[1])
3202 right_nritems = btrfs_header_nritems(right);
3203 if (right_nritems == 0)
3206 btrfs_assert_tree_write_locked(path->nodes[1]);
3208 left = btrfs_read_node_slot(path->nodes[1], slot - 1);
3210 * slot - 1 is not valid or we fail to read the left node,
3211 * no big deal, just return.
3216 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
3218 free_space = btrfs_leaf_free_space(left);
3219 if (free_space < data_size) {
3224 ret = btrfs_cow_block(trans, root, left,
3225 path->nodes[1], slot - 1, &left,
3226 BTRFS_NESTING_LEFT_COW);
3228 /* we hit -ENOSPC, but it isn't fatal here */
3234 if (check_sibling_keys(left, right)) {
3238 return __push_leaf_left(path, min_data_size,
3239 empty, left, free_space, right_nritems,
3242 btrfs_tree_unlock(left);
3243 free_extent_buffer(left);
3248 * split the path's leaf in two, making sure there is at least data_size
3249 * available for the resulting leaf level of the path.
3251 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3252 struct btrfs_path *path,
3253 struct extent_buffer *l,
3254 struct extent_buffer *right,
3255 int slot, int mid, int nritems)
3257 struct btrfs_fs_info *fs_info = trans->fs_info;
3261 struct btrfs_disk_key disk_key;
3262 struct btrfs_map_token token;
3264 nritems = nritems - mid;
3265 btrfs_set_header_nritems(right, nritems);
3266 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l);
3268 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3269 btrfs_item_nr_offset(mid),
3270 nritems * sizeof(struct btrfs_item));
3272 copy_extent_buffer(right, l,
3273 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
3274 data_copy_size, BTRFS_LEAF_DATA_OFFSET +
3275 leaf_data_end(l), data_copy_size);
3277 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid);
3279 btrfs_init_map_token(&token, right);
3280 for (i = 0; i < nritems; i++) {
3283 ioff = btrfs_token_item_offset(&token, i);
3284 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off);
3287 btrfs_set_header_nritems(l, mid);
3288 btrfs_item_key(right, &disk_key, 0);
3289 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
3291 btrfs_mark_buffer_dirty(right);
3292 btrfs_mark_buffer_dirty(l);
3293 BUG_ON(path->slots[0] != slot);
3296 btrfs_tree_unlock(path->nodes[0]);
3297 free_extent_buffer(path->nodes[0]);
3298 path->nodes[0] = right;
3299 path->slots[0] -= mid;
3300 path->slots[1] += 1;
3302 btrfs_tree_unlock(right);
3303 free_extent_buffer(right);
3306 BUG_ON(path->slots[0] < 0);
3310 * double splits happen when we need to insert a big item in the middle
3311 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3312 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3315 * We avoid this by trying to push the items on either side of our target
3316 * into the adjacent leaves. If all goes well we can avoid the double split
3319 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3320 struct btrfs_root *root,
3321 struct btrfs_path *path,
3328 int space_needed = data_size;
3330 slot = path->slots[0];
3331 if (slot < btrfs_header_nritems(path->nodes[0]))
3332 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3335 * try to push all the items after our slot into the
3338 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
3345 nritems = btrfs_header_nritems(path->nodes[0]);
3347 * our goal is to get our slot at the start or end of a leaf. If
3348 * we've done so we're done
3350 if (path->slots[0] == 0 || path->slots[0] == nritems)
3353 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3356 /* try to push all the items before our slot into the next leaf */
3357 slot = path->slots[0];
3358 space_needed = data_size;
3360 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3361 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
3374 * split the path's leaf in two, making sure there is at least data_size
3375 * available for the resulting leaf level of the path.
3377 * returns 0 if all went well and < 0 on failure.
3379 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3380 struct btrfs_root *root,
3381 const struct btrfs_key *ins_key,
3382 struct btrfs_path *path, int data_size,
3385 struct btrfs_disk_key disk_key;
3386 struct extent_buffer *l;
3390 struct extent_buffer *right;
3391 struct btrfs_fs_info *fs_info = root->fs_info;
3395 int num_doubles = 0;
3396 int tried_avoid_double = 0;
3399 slot = path->slots[0];
3400 if (extend && data_size + btrfs_item_size(l, slot) +
3401 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
3404 /* first try to make some room by pushing left and right */
3405 if (data_size && path->nodes[1]) {
3406 int space_needed = data_size;
3408 if (slot < btrfs_header_nritems(l))
3409 space_needed -= btrfs_leaf_free_space(l);
3411 wret = push_leaf_right(trans, root, path, space_needed,
3412 space_needed, 0, 0);
3416 space_needed = data_size;
3418 space_needed -= btrfs_leaf_free_space(l);
3419 wret = push_leaf_left(trans, root, path, space_needed,
3420 space_needed, 0, (u32)-1);
3426 /* did the pushes work? */
3427 if (btrfs_leaf_free_space(l) >= data_size)
3431 if (!path->nodes[1]) {
3432 ret = insert_new_root(trans, root, path, 1);
3439 slot = path->slots[0];
3440 nritems = btrfs_header_nritems(l);
3441 mid = (nritems + 1) / 2;
3445 leaf_space_used(l, mid, nritems - mid) + data_size >
3446 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3447 if (slot >= nritems) {
3451 if (mid != nritems &&
3452 leaf_space_used(l, mid, nritems - mid) +
3453 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3454 if (data_size && !tried_avoid_double)
3455 goto push_for_double;
3461 if (leaf_space_used(l, 0, mid) + data_size >
3462 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3463 if (!extend && data_size && slot == 0) {
3465 } else if ((extend || !data_size) && slot == 0) {
3469 if (mid != nritems &&
3470 leaf_space_used(l, mid, nritems - mid) +
3471 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3472 if (data_size && !tried_avoid_double)
3473 goto push_for_double;
3481 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3483 btrfs_item_key(l, &disk_key, mid);
3486 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double
3487 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES
3488 * subclasses, which is 8 at the time of this patch, and we've maxed it
3489 * out. In the future we could add a
3490 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
3491 * use BTRFS_NESTING_NEW_ROOT.
3493 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3494 &disk_key, 0, l->start, 0,
3495 num_doubles ? BTRFS_NESTING_NEW_ROOT :
3496 BTRFS_NESTING_SPLIT);
3498 return PTR_ERR(right);
3500 root_add_used(root, fs_info->nodesize);
3504 btrfs_set_header_nritems(right, 0);
3505 insert_ptr(trans, path, &disk_key,
3506 right->start, path->slots[1] + 1, 1);
3507 btrfs_tree_unlock(path->nodes[0]);
3508 free_extent_buffer(path->nodes[0]);
3509 path->nodes[0] = right;
3511 path->slots[1] += 1;
3513 btrfs_set_header_nritems(right, 0);
3514 insert_ptr(trans, path, &disk_key,
3515 right->start, path->slots[1], 1);
3516 btrfs_tree_unlock(path->nodes[0]);
3517 free_extent_buffer(path->nodes[0]);
3518 path->nodes[0] = right;
3520 if (path->slots[1] == 0)
3521 fixup_low_keys(path, &disk_key, 1);
3524 * We create a new leaf 'right' for the required ins_len and
3525 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
3526 * the content of ins_len to 'right'.
3531 copy_for_split(trans, path, l, right, slot, mid, nritems);
3534 BUG_ON(num_doubles != 0);
3542 push_for_double_split(trans, root, path, data_size);
3543 tried_avoid_double = 1;
3544 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3549 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3550 struct btrfs_root *root,
3551 struct btrfs_path *path, int ins_len)
3553 struct btrfs_key key;
3554 struct extent_buffer *leaf;
3555 struct btrfs_file_extent_item *fi;
3560 leaf = path->nodes[0];
3561 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3563 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3564 key.type != BTRFS_EXTENT_CSUM_KEY);
3566 if (btrfs_leaf_free_space(leaf) >= ins_len)
3569 item_size = btrfs_item_size(leaf, path->slots[0]);
3570 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3571 fi = btrfs_item_ptr(leaf, path->slots[0],
3572 struct btrfs_file_extent_item);
3573 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3575 btrfs_release_path(path);
3577 path->keep_locks = 1;
3578 path->search_for_split = 1;
3579 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3580 path->search_for_split = 0;
3587 leaf = path->nodes[0];
3588 /* if our item isn't there, return now */
3589 if (item_size != btrfs_item_size(leaf, path->slots[0]))
3592 /* the leaf has changed, it now has room. return now */
3593 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
3596 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3597 fi = btrfs_item_ptr(leaf, path->slots[0],
3598 struct btrfs_file_extent_item);
3599 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3603 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3607 path->keep_locks = 0;
3608 btrfs_unlock_up_safe(path, 1);
3611 path->keep_locks = 0;
3615 static noinline int split_item(struct btrfs_path *path,
3616 const struct btrfs_key *new_key,
3617 unsigned long split_offset)
3619 struct extent_buffer *leaf;
3620 int orig_slot, slot;
3625 struct btrfs_disk_key disk_key;
3627 leaf = path->nodes[0];
3628 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
3630 orig_slot = path->slots[0];
3631 orig_offset = btrfs_item_offset(leaf, path->slots[0]);
3632 item_size = btrfs_item_size(leaf, path->slots[0]);
3634 buf = kmalloc(item_size, GFP_NOFS);
3638 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3639 path->slots[0]), item_size);
3641 slot = path->slots[0] + 1;
3642 nritems = btrfs_header_nritems(leaf);
3643 if (slot != nritems) {
3644 /* shift the items */
3645 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3646 btrfs_item_nr_offset(slot),
3647 (nritems - slot) * sizeof(struct btrfs_item));
3650 btrfs_cpu_key_to_disk(&disk_key, new_key);
3651 btrfs_set_item_key(leaf, &disk_key, slot);
3653 btrfs_set_item_offset(leaf, slot, orig_offset);
3654 btrfs_set_item_size(leaf, slot, item_size - split_offset);
3656 btrfs_set_item_offset(leaf, orig_slot,
3657 orig_offset + item_size - split_offset);
3658 btrfs_set_item_size(leaf, orig_slot, split_offset);
3660 btrfs_set_header_nritems(leaf, nritems + 1);
3662 /* write the data for the start of the original item */
3663 write_extent_buffer(leaf, buf,
3664 btrfs_item_ptr_offset(leaf, path->slots[0]),
3667 /* write the data for the new item */
3668 write_extent_buffer(leaf, buf + split_offset,
3669 btrfs_item_ptr_offset(leaf, slot),
3670 item_size - split_offset);
3671 btrfs_mark_buffer_dirty(leaf);
3673 BUG_ON(btrfs_leaf_free_space(leaf) < 0);
3679 * This function splits a single item into two items,
3680 * giving 'new_key' to the new item and splitting the
3681 * old one at split_offset (from the start of the item).
3683 * The path may be released by this operation. After
3684 * the split, the path is pointing to the old item. The
3685 * new item is going to be in the same node as the old one.
3687 * Note, the item being split must be smaller enough to live alone on
3688 * a tree block with room for one extra struct btrfs_item
3690 * This allows us to split the item in place, keeping a lock on the
3691 * leaf the entire time.
3693 int btrfs_split_item(struct btrfs_trans_handle *trans,
3694 struct btrfs_root *root,
3695 struct btrfs_path *path,
3696 const struct btrfs_key *new_key,
3697 unsigned long split_offset)
3700 ret = setup_leaf_for_split(trans, root, path,
3701 sizeof(struct btrfs_item));
3705 ret = split_item(path, new_key, split_offset);
3710 * make the item pointed to by the path smaller. new_size indicates
3711 * how small to make it, and from_end tells us if we just chop bytes
3712 * off the end of the item or if we shift the item to chop bytes off
3715 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
3718 struct extent_buffer *leaf;
3720 unsigned int data_end;
3721 unsigned int old_data_start;
3722 unsigned int old_size;
3723 unsigned int size_diff;
3725 struct btrfs_map_token token;
3727 leaf = path->nodes[0];
3728 slot = path->slots[0];
3730 old_size = btrfs_item_size(leaf, slot);
3731 if (old_size == new_size)
3734 nritems = btrfs_header_nritems(leaf);
3735 data_end = leaf_data_end(leaf);
3737 old_data_start = btrfs_item_offset(leaf, slot);
3739 size_diff = old_size - new_size;
3742 BUG_ON(slot >= nritems);
3745 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3747 /* first correct the data pointers */
3748 btrfs_init_map_token(&token, leaf);
3749 for (i = slot; i < nritems; i++) {
3752 ioff = btrfs_token_item_offset(&token, i);
3753 btrfs_set_token_item_offset(&token, i, ioff + size_diff);
3756 /* shift the data */
3758 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3759 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
3760 data_end, old_data_start + new_size - data_end);
3762 struct btrfs_disk_key disk_key;
3765 btrfs_item_key(leaf, &disk_key, slot);
3767 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3769 struct btrfs_file_extent_item *fi;
3771 fi = btrfs_item_ptr(leaf, slot,
3772 struct btrfs_file_extent_item);
3773 fi = (struct btrfs_file_extent_item *)(
3774 (unsigned long)fi - size_diff);
3776 if (btrfs_file_extent_type(leaf, fi) ==
3777 BTRFS_FILE_EXTENT_INLINE) {
3778 ptr = btrfs_item_ptr_offset(leaf, slot);
3779 memmove_extent_buffer(leaf, ptr,
3781 BTRFS_FILE_EXTENT_INLINE_DATA_START);
3785 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3786 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
3787 data_end, old_data_start - data_end);
3789 offset = btrfs_disk_key_offset(&disk_key);
3790 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3791 btrfs_set_item_key(leaf, &disk_key, slot);
3793 fixup_low_keys(path, &disk_key, 1);
3796 btrfs_set_item_size(leaf, slot, new_size);
3797 btrfs_mark_buffer_dirty(leaf);
3799 if (btrfs_leaf_free_space(leaf) < 0) {
3800 btrfs_print_leaf(leaf);
3806 * make the item pointed to by the path bigger, data_size is the added size.
3808 void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
3811 struct extent_buffer *leaf;
3813 unsigned int data_end;
3814 unsigned int old_data;
3815 unsigned int old_size;
3817 struct btrfs_map_token token;
3819 leaf = path->nodes[0];
3821 nritems = btrfs_header_nritems(leaf);
3822 data_end = leaf_data_end(leaf);
3824 if (btrfs_leaf_free_space(leaf) < data_size) {
3825 btrfs_print_leaf(leaf);
3828 slot = path->slots[0];
3829 old_data = btrfs_item_data_end(leaf, slot);
3832 if (slot >= nritems) {
3833 btrfs_print_leaf(leaf);
3834 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
3840 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3842 /* first correct the data pointers */
3843 btrfs_init_map_token(&token, leaf);
3844 for (i = slot; i < nritems; i++) {
3847 ioff = btrfs_token_item_offset(&token, i);
3848 btrfs_set_token_item_offset(&token, i, ioff - data_size);
3851 /* shift the data */
3852 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3853 data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
3854 data_end, old_data - data_end);
3856 data_end = old_data;
3857 old_size = btrfs_item_size(leaf, slot);
3858 btrfs_set_item_size(leaf, slot, old_size + data_size);
3859 btrfs_mark_buffer_dirty(leaf);
3861 if (btrfs_leaf_free_space(leaf) < 0) {
3862 btrfs_print_leaf(leaf);
3868 * setup_items_for_insert - Helper called before inserting one or more items
3869 * to a leaf. Main purpose is to save stack depth by doing the bulk of the work
3870 * in a function that doesn't call btrfs_search_slot
3872 * @root: root we are inserting items to
3873 * @path: points to the leaf/slot where we are going to insert new items
3874 * @batch: information about the batch of items to insert
3876 static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
3877 const struct btrfs_item_batch *batch)
3879 struct btrfs_fs_info *fs_info = root->fs_info;
3882 unsigned int data_end;
3883 struct btrfs_disk_key disk_key;
3884 struct extent_buffer *leaf;
3886 struct btrfs_map_token token;
3890 * Before anything else, update keys in the parent and other ancestors
3891 * if needed, then release the write locks on them, so that other tasks
3892 * can use them while we modify the leaf.
3894 if (path->slots[0] == 0) {
3895 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
3896 fixup_low_keys(path, &disk_key, 1);
3898 btrfs_unlock_up_safe(path, 1);
3900 leaf = path->nodes[0];
3901 slot = path->slots[0];
3903 nritems = btrfs_header_nritems(leaf);
3904 data_end = leaf_data_end(leaf);
3905 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
3907 if (btrfs_leaf_free_space(leaf) < total_size) {
3908 btrfs_print_leaf(leaf);
3909 btrfs_crit(fs_info, "not enough freespace need %u have %d",
3910 total_size, btrfs_leaf_free_space(leaf));
3914 btrfs_init_map_token(&token, leaf);
3915 if (slot != nritems) {
3916 unsigned int old_data = btrfs_item_data_end(leaf, slot);
3918 if (old_data < data_end) {
3919 btrfs_print_leaf(leaf);
3921 "item at slot %d with data offset %u beyond data end of leaf %u",
3922 slot, old_data, data_end);
3926 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3928 /* first correct the data pointers */
3929 for (i = slot; i < nritems; i++) {
3932 ioff = btrfs_token_item_offset(&token, i);
3933 btrfs_set_token_item_offset(&token, i,
3934 ioff - batch->total_data_size);
3936 /* shift the items */
3937 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + batch->nr),
3938 btrfs_item_nr_offset(slot),
3939 (nritems - slot) * sizeof(struct btrfs_item));
3941 /* shift the data */
3942 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
3943 data_end - batch->total_data_size,
3944 BTRFS_LEAF_DATA_OFFSET + data_end,
3945 old_data - data_end);
3946 data_end = old_data;
3949 /* setup the item for the new data */
3950 for (i = 0; i < batch->nr; i++) {
3951 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]);
3952 btrfs_set_item_key(leaf, &disk_key, slot + i);
3953 data_end -= batch->data_sizes[i];
3954 btrfs_set_token_item_offset(&token, slot + i, data_end);
3955 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]);
3958 btrfs_set_header_nritems(leaf, nritems + batch->nr);
3959 btrfs_mark_buffer_dirty(leaf);
3961 if (btrfs_leaf_free_space(leaf) < 0) {
3962 btrfs_print_leaf(leaf);
3968 * Insert a new item into a leaf.
3970 * @root: The root of the btree.
3971 * @path: A path pointing to the target leaf and slot.
3972 * @key: The key of the new item.
3973 * @data_size: The size of the data associated with the new key.
3975 void btrfs_setup_item_for_insert(struct btrfs_root *root,
3976 struct btrfs_path *path,
3977 const struct btrfs_key *key,
3980 struct btrfs_item_batch batch;
3983 batch.data_sizes = &data_size;
3984 batch.total_data_size = data_size;
3987 setup_items_for_insert(root, path, &batch);
3991 * Given a key and some data, insert items into the tree.
3992 * This does all the path init required, making room in the tree if needed.
3994 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3995 struct btrfs_root *root,
3996 struct btrfs_path *path,
3997 const struct btrfs_item_batch *batch)
4003 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
4004 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1);
4010 slot = path->slots[0];
4013 setup_items_for_insert(root, path, batch);
4018 * Given a key and some data, insert an item into the tree.
4019 * This does all the path init required, making room in the tree if needed.
4021 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4022 const struct btrfs_key *cpu_key, void *data,
4026 struct btrfs_path *path;
4027 struct extent_buffer *leaf;
4030 path = btrfs_alloc_path();
4033 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4035 leaf = path->nodes[0];
4036 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4037 write_extent_buffer(leaf, data, ptr, data_size);
4038 btrfs_mark_buffer_dirty(leaf);
4040 btrfs_free_path(path);
4045 * This function duplicates an item, giving 'new_key' to the new item.
4046 * It guarantees both items live in the same tree leaf and the new item is
4047 * contiguous with the original item.
4049 * This allows us to split a file extent in place, keeping a lock on the leaf
4052 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4053 struct btrfs_root *root,
4054 struct btrfs_path *path,
4055 const struct btrfs_key *new_key)
4057 struct extent_buffer *leaf;
4061 leaf = path->nodes[0];
4062 item_size = btrfs_item_size(leaf, path->slots[0]);
4063 ret = setup_leaf_for_split(trans, root, path,
4064 item_size + sizeof(struct btrfs_item));
4069 btrfs_setup_item_for_insert(root, path, new_key, item_size);
4070 leaf = path->nodes[0];
4071 memcpy_extent_buffer(leaf,
4072 btrfs_item_ptr_offset(leaf, path->slots[0]),
4073 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4079 * delete the pointer from a given node.
4081 * the tree should have been previously balanced so the deletion does not
4084 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4085 int level, int slot)
4087 struct extent_buffer *parent = path->nodes[level];
4091 nritems = btrfs_header_nritems(parent);
4092 if (slot != nritems - 1) {
4094 ret = btrfs_tree_mod_log_insert_move(parent, slot,
4095 slot + 1, nritems - slot - 1);
4098 memmove_extent_buffer(parent,
4099 btrfs_node_key_ptr_offset(slot),
4100 btrfs_node_key_ptr_offset(slot + 1),
4101 sizeof(struct btrfs_key_ptr) *
4102 (nritems - slot - 1));
4104 ret = btrfs_tree_mod_log_insert_key(parent, slot,
4105 BTRFS_MOD_LOG_KEY_REMOVE, GFP_NOFS);
4110 btrfs_set_header_nritems(parent, nritems);
4111 if (nritems == 0 && parent == root->node) {
4112 BUG_ON(btrfs_header_level(root->node) != 1);
4113 /* just turn the root into a leaf and break */
4114 btrfs_set_header_level(root->node, 0);
4115 } else if (slot == 0) {
4116 struct btrfs_disk_key disk_key;
4118 btrfs_node_key(parent, &disk_key, 0);
4119 fixup_low_keys(path, &disk_key, level + 1);
4121 btrfs_mark_buffer_dirty(parent);
4125 * a helper function to delete the leaf pointed to by path->slots[1] and
4128 * This deletes the pointer in path->nodes[1] and frees the leaf
4129 * block extent. zero is returned if it all worked out, < 0 otherwise.
4131 * The path must have already been setup for deleting the leaf, including
4132 * all the proper balancing. path->nodes[1] must be locked.
4134 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4135 struct btrfs_root *root,
4136 struct btrfs_path *path,
4137 struct extent_buffer *leaf)
4139 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4140 del_ptr(root, path, 1, path->slots[1]);
4143 * btrfs_free_extent is expensive, we want to make sure we
4144 * aren't holding any locks when we call it
4146 btrfs_unlock_up_safe(path, 0);
4148 root_sub_used(root, leaf->len);
4150 atomic_inc(&leaf->refs);
4151 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
4152 free_extent_buffer_stale(leaf);
4155 * delete the item at the leaf level in path. If that empties
4156 * the leaf, remove it from the tree
4158 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4159 struct btrfs_path *path, int slot, int nr)
4161 struct btrfs_fs_info *fs_info = root->fs_info;
4162 struct extent_buffer *leaf;
4167 leaf = path->nodes[0];
4168 nritems = btrfs_header_nritems(leaf);
4170 if (slot + nr != nritems) {
4171 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1);
4172 const int data_end = leaf_data_end(leaf);
4173 struct btrfs_map_token token;
4177 for (i = 0; i < nr; i++)
4178 dsize += btrfs_item_size(leaf, slot + i);
4180 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4182 BTRFS_LEAF_DATA_OFFSET + data_end,
4183 last_off - data_end);
4185 btrfs_init_map_token(&token, leaf);
4186 for (i = slot + nr; i < nritems; i++) {
4189 ioff = btrfs_token_item_offset(&token, i);
4190 btrfs_set_token_item_offset(&token, i, ioff + dsize);
4193 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4194 btrfs_item_nr_offset(slot + nr),
4195 sizeof(struct btrfs_item) *
4196 (nritems - slot - nr));
4198 btrfs_set_header_nritems(leaf, nritems - nr);
4201 /* delete the leaf if we've emptied it */
4203 if (leaf == root->node) {
4204 btrfs_set_header_level(leaf, 0);
4206 btrfs_clean_tree_block(leaf);
4207 btrfs_del_leaf(trans, root, path, leaf);
4210 int used = leaf_space_used(leaf, 0, nritems);
4212 struct btrfs_disk_key disk_key;
4214 btrfs_item_key(leaf, &disk_key, 0);
4215 fixup_low_keys(path, &disk_key, 1);
4219 * Try to delete the leaf if it is mostly empty. We do this by
4220 * trying to move all its items into its left and right neighbours.
4221 * If we can't move all the items, then we don't delete it - it's
4222 * not ideal, but future insertions might fill the leaf with more
4223 * items, or items from other leaves might be moved later into our
4224 * leaf due to deletions on those leaves.
4226 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4229 /* push_leaf_left fixes the path.
4230 * make sure the path still points to our leaf
4231 * for possible call to del_ptr below
4233 slot = path->slots[1];
4234 atomic_inc(&leaf->refs);
4236 * We want to be able to at least push one item to the
4237 * left neighbour leaf, and that's the first item.
4239 min_push_space = sizeof(struct btrfs_item) +
4240 btrfs_item_size(leaf, 0);
4241 wret = push_leaf_left(trans, root, path, 0,
4242 min_push_space, 1, (u32)-1);
4243 if (wret < 0 && wret != -ENOSPC)
4246 if (path->nodes[0] == leaf &&
4247 btrfs_header_nritems(leaf)) {
4249 * If we were not able to push all items from our
4250 * leaf to its left neighbour, then attempt to
4251 * either push all the remaining items to the
4252 * right neighbour or none. There's no advantage
4253 * in pushing only some items, instead of all, as
4254 * it's pointless to end up with a leaf having
4255 * too few items while the neighbours can be full
4258 nritems = btrfs_header_nritems(leaf);
4259 min_push_space = leaf_space_used(leaf, 0, nritems);
4260 wret = push_leaf_right(trans, root, path, 0,
4261 min_push_space, 1, 0);
4262 if (wret < 0 && wret != -ENOSPC)
4266 if (btrfs_header_nritems(leaf) == 0) {
4267 path->slots[1] = slot;
4268 btrfs_del_leaf(trans, root, path, leaf);
4269 free_extent_buffer(leaf);
4272 /* if we're still in the path, make sure
4273 * we're dirty. Otherwise, one of the
4274 * push_leaf functions must have already
4275 * dirtied this buffer
4277 if (path->nodes[0] == leaf)
4278 btrfs_mark_buffer_dirty(leaf);
4279 free_extent_buffer(leaf);
4282 btrfs_mark_buffer_dirty(leaf);
4289 * search the tree again to find a leaf with lesser keys
4290 * returns 0 if it found something or 1 if there are no lesser leaves.
4291 * returns < 0 on io errors.
4293 * This may release the path, and so you may lose any locks held at the
4296 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4298 struct btrfs_key key;
4299 struct btrfs_disk_key found_key;
4302 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4304 if (key.offset > 0) {
4306 } else if (key.type > 0) {
4308 key.offset = (u64)-1;
4309 } else if (key.objectid > 0) {
4312 key.offset = (u64)-1;
4317 btrfs_release_path(path);
4318 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4321 btrfs_item_key(path->nodes[0], &found_key, 0);
4322 ret = comp_keys(&found_key, &key);
4324 * We might have had an item with the previous key in the tree right
4325 * before we released our path. And after we released our path, that
4326 * item might have been pushed to the first slot (0) of the leaf we
4327 * were holding due to a tree balance. Alternatively, an item with the
4328 * previous key can exist as the only element of a leaf (big fat item).
4329 * Therefore account for these 2 cases, so that our callers (like
4330 * btrfs_previous_item) don't miss an existing item with a key matching
4331 * the previous key we computed above.
4339 * A helper function to walk down the tree starting at min_key, and looking
4340 * for nodes or leaves that are have a minimum transaction id.
4341 * This is used by the btree defrag code, and tree logging
4343 * This does not cow, but it does stuff the starting key it finds back
4344 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4345 * key and get a writable path.
4347 * This honors path->lowest_level to prevent descent past a given level
4350 * min_trans indicates the oldest transaction that you are interested
4351 * in walking through. Any nodes or leaves older than min_trans are
4352 * skipped over (without reading them).
4354 * returns zero if something useful was found, < 0 on error and 1 if there
4355 * was nothing in the tree that matched the search criteria.
4357 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4358 struct btrfs_path *path,
4361 struct extent_buffer *cur;
4362 struct btrfs_key found_key;
4368 int keep_locks = path->keep_locks;
4370 path->keep_locks = 1;
4372 cur = btrfs_read_lock_root_node(root);
4373 level = btrfs_header_level(cur);
4374 WARN_ON(path->nodes[level]);
4375 path->nodes[level] = cur;
4376 path->locks[level] = BTRFS_READ_LOCK;
4378 if (btrfs_header_generation(cur) < min_trans) {
4383 nritems = btrfs_header_nritems(cur);
4384 level = btrfs_header_level(cur);
4385 sret = btrfs_bin_search(cur, min_key, &slot);
4391 /* at the lowest level, we're done, setup the path and exit */
4392 if (level == path->lowest_level) {
4393 if (slot >= nritems)
4396 path->slots[level] = slot;
4397 btrfs_item_key_to_cpu(cur, &found_key, slot);
4400 if (sret && slot > 0)
4403 * check this node pointer against the min_trans parameters.
4404 * If it is too old, skip to the next one.
4406 while (slot < nritems) {
4409 gen = btrfs_node_ptr_generation(cur, slot);
4410 if (gen < min_trans) {
4418 * we didn't find a candidate key in this node, walk forward
4419 * and find another one
4421 if (slot >= nritems) {
4422 path->slots[level] = slot;
4423 sret = btrfs_find_next_key(root, path, min_key, level,
4426 btrfs_release_path(path);
4432 /* save our key for returning back */
4433 btrfs_node_key_to_cpu(cur, &found_key, slot);
4434 path->slots[level] = slot;
4435 if (level == path->lowest_level) {
4439 cur = btrfs_read_node_slot(cur, slot);
4445 btrfs_tree_read_lock(cur);
4447 path->locks[level - 1] = BTRFS_READ_LOCK;
4448 path->nodes[level - 1] = cur;
4449 unlock_up(path, level, 1, 0, NULL);
4452 path->keep_locks = keep_locks;
4454 btrfs_unlock_up_safe(path, path->lowest_level + 1);
4455 memcpy(min_key, &found_key, sizeof(found_key));
4461 * this is similar to btrfs_next_leaf, but does not try to preserve
4462 * and fixup the path. It looks for and returns the next key in the
4463 * tree based on the current path and the min_trans parameters.
4465 * 0 is returned if another key is found, < 0 if there are any errors
4466 * and 1 is returned if there are no higher keys in the tree
4468 * path->keep_locks should be set to 1 on the search made before
4469 * calling this function.
4471 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4472 struct btrfs_key *key, int level, u64 min_trans)
4475 struct extent_buffer *c;
4477 WARN_ON(!path->keep_locks && !path->skip_locking);
4478 while (level < BTRFS_MAX_LEVEL) {
4479 if (!path->nodes[level])
4482 slot = path->slots[level] + 1;
4483 c = path->nodes[level];
4485 if (slot >= btrfs_header_nritems(c)) {
4488 struct btrfs_key cur_key;
4489 if (level + 1 >= BTRFS_MAX_LEVEL ||
4490 !path->nodes[level + 1])
4493 if (path->locks[level + 1] || path->skip_locking) {
4498 slot = btrfs_header_nritems(c) - 1;
4500 btrfs_item_key_to_cpu(c, &cur_key, slot);
4502 btrfs_node_key_to_cpu(c, &cur_key, slot);
4504 orig_lowest = path->lowest_level;
4505 btrfs_release_path(path);
4506 path->lowest_level = level;
4507 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4509 path->lowest_level = orig_lowest;
4513 c = path->nodes[level];
4514 slot = path->slots[level];
4521 btrfs_item_key_to_cpu(c, key, slot);
4523 u64 gen = btrfs_node_ptr_generation(c, slot);
4525 if (gen < min_trans) {
4529 btrfs_node_key_to_cpu(c, key, slot);
4536 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
4541 struct extent_buffer *c;
4542 struct extent_buffer *next;
4543 struct btrfs_fs_info *fs_info = root->fs_info;
4544 struct btrfs_key key;
4545 bool need_commit_sem = false;
4550 nritems = btrfs_header_nritems(path->nodes[0]);
4554 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4558 btrfs_release_path(path);
4560 path->keep_locks = 1;
4563 ret = btrfs_search_old_slot(root, &key, path, time_seq);
4565 if (path->need_commit_sem) {
4566 path->need_commit_sem = 0;
4567 need_commit_sem = true;
4568 down_read(&fs_info->commit_root_sem);
4570 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4572 path->keep_locks = 0;
4577 nritems = btrfs_header_nritems(path->nodes[0]);
4579 * by releasing the path above we dropped all our locks. A balance
4580 * could have added more items next to the key that used to be
4581 * at the very end of the block. So, check again here and
4582 * advance the path if there are now more items available.
4584 if (nritems > 0 && path->slots[0] < nritems - 1) {
4591 * So the above check misses one case:
4592 * - after releasing the path above, someone has removed the item that
4593 * used to be at the very end of the block, and balance between leafs
4594 * gets another one with bigger key.offset to replace it.
4596 * This one should be returned as well, or we can get leaf corruption
4597 * later(esp. in __btrfs_drop_extents()).
4599 * And a bit more explanation about this check,
4600 * with ret > 0, the key isn't found, the path points to the slot
4601 * where it should be inserted, so the path->slots[0] item must be the
4604 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
4609 while (level < BTRFS_MAX_LEVEL) {
4610 if (!path->nodes[level]) {
4615 slot = path->slots[level] + 1;
4616 c = path->nodes[level];
4617 if (slot >= btrfs_header_nritems(c)) {
4619 if (level == BTRFS_MAX_LEVEL) {
4628 * Our current level is where we're going to start from, and to
4629 * make sure lockdep doesn't complain we need to drop our locks
4630 * and nodes from 0 to our current level.
4632 for (i = 0; i < level; i++) {
4633 if (path->locks[level]) {
4634 btrfs_tree_read_unlock(path->nodes[i]);
4637 free_extent_buffer(path->nodes[i]);
4638 path->nodes[i] = NULL;
4642 ret = read_block_for_search(root, path, &next, level,
4648 btrfs_release_path(path);
4652 if (!path->skip_locking) {
4653 ret = btrfs_try_tree_read_lock(next);
4654 if (!ret && time_seq) {
4656 * If we don't get the lock, we may be racing
4657 * with push_leaf_left, holding that lock while
4658 * itself waiting for the leaf we've currently
4659 * locked. To solve this situation, we give up
4660 * on our lock and cycle.
4662 free_extent_buffer(next);
4663 btrfs_release_path(path);
4668 btrfs_tree_read_lock(next);
4672 path->slots[level] = slot;
4675 path->nodes[level] = next;
4676 path->slots[level] = 0;
4677 if (!path->skip_locking)
4678 path->locks[level] = BTRFS_READ_LOCK;
4682 ret = read_block_for_search(root, path, &next, level,
4688 btrfs_release_path(path);
4692 if (!path->skip_locking)
4693 btrfs_tree_read_lock(next);
4697 unlock_up(path, 0, 1, 0, NULL);
4698 if (need_commit_sem) {
4701 path->need_commit_sem = 1;
4702 ret2 = finish_need_commit_sem_search(path);
4703 up_read(&fs_info->commit_root_sem);
4712 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4713 * searching until it gets past min_objectid or finds an item of 'type'
4715 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4717 int btrfs_previous_item(struct btrfs_root *root,
4718 struct btrfs_path *path, u64 min_objectid,
4721 struct btrfs_key found_key;
4722 struct extent_buffer *leaf;
4727 if (path->slots[0] == 0) {
4728 ret = btrfs_prev_leaf(root, path);
4734 leaf = path->nodes[0];
4735 nritems = btrfs_header_nritems(leaf);
4738 if (path->slots[0] == nritems)
4741 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4742 if (found_key.objectid < min_objectid)
4744 if (found_key.type == type)
4746 if (found_key.objectid == min_objectid &&
4747 found_key.type < type)
4754 * search in extent tree to find a previous Metadata/Data extent item with
4757 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4759 int btrfs_previous_extent_item(struct btrfs_root *root,
4760 struct btrfs_path *path, u64 min_objectid)
4762 struct btrfs_key found_key;
4763 struct extent_buffer *leaf;
4768 if (path->slots[0] == 0) {
4769 ret = btrfs_prev_leaf(root, path);
4775 leaf = path->nodes[0];
4776 nritems = btrfs_header_nritems(leaf);
4779 if (path->slots[0] == nritems)
4782 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4783 if (found_key.objectid < min_objectid)
4785 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
4786 found_key.type == BTRFS_METADATA_ITEM_KEY)
4788 if (found_key.objectid == min_objectid &&
4789 found_key.type < BTRFS_EXTENT_ITEM_KEY)