2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
41 struct btrfs_path *path, int level, int slot);
42 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
44 struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
45 u32 blocksize, u64 parent_transid,
47 struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
48 u64 bytenr, u32 blocksize,
51 struct btrfs_path *btrfs_alloc_path(void)
53 struct btrfs_path *path;
54 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
59 * set all locked nodes in the path to blocking locks. This should
60 * be done before scheduling
62 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
65 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
66 if (!p->nodes[i] || !p->locks[i])
68 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
69 if (p->locks[i] == BTRFS_READ_LOCK)
70 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
71 else if (p->locks[i] == BTRFS_WRITE_LOCK)
72 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
77 * reset all the locked nodes in the patch to spinning locks.
79 * held is used to keep lockdep happy, when lockdep is enabled
80 * we set held to a blocking lock before we go around and
81 * retake all the spinlocks in the path. You can safely use NULL
84 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
85 struct extent_buffer *held, int held_rw)
89 #ifdef CONFIG_DEBUG_LOCK_ALLOC
90 /* lockdep really cares that we take all of these spinlocks
91 * in the right order. If any of the locks in the path are not
92 * currently blocking, it is going to complain. So, make really
93 * really sure by forcing the path to blocking before we clear
97 btrfs_set_lock_blocking_rw(held, held_rw);
98 if (held_rw == BTRFS_WRITE_LOCK)
99 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
100 else if (held_rw == BTRFS_READ_LOCK)
101 held_rw = BTRFS_READ_LOCK_BLOCKING;
103 btrfs_set_path_blocking(p);
106 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
107 if (p->nodes[i] && p->locks[i]) {
108 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
109 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
110 p->locks[i] = BTRFS_WRITE_LOCK;
111 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
112 p->locks[i] = BTRFS_READ_LOCK;
116 #ifdef CONFIG_DEBUG_LOCK_ALLOC
118 btrfs_clear_lock_blocking_rw(held, held_rw);
122 /* this also releases the path */
123 void btrfs_free_path(struct btrfs_path *p)
127 btrfs_release_path(p);
128 kmem_cache_free(btrfs_path_cachep, p);
132 * path release drops references on the extent buffers in the path
133 * and it drops any locks held by this path
135 * It is safe to call this on paths that no locks or extent buffers held.
137 noinline void btrfs_release_path(struct btrfs_path *p)
141 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
146 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
149 free_extent_buffer(p->nodes[i]);
155 * safely gets a reference on the root node of a tree. A lock
156 * is not taken, so a concurrent writer may put a different node
157 * at the root of the tree. See btrfs_lock_root_node for the
160 * The extent buffer returned by this has a reference taken, so
161 * it won't disappear. It may stop being the root of the tree
162 * at any time because there are no locks held.
164 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
166 struct extent_buffer *eb;
170 eb = rcu_dereference(root->node);
173 * RCU really hurts here, we could free up the root node because
174 * it was cow'ed but we may not get the new root node yet so do
175 * the inc_not_zero dance and if it doesn't work then
176 * synchronize_rcu and try again.
178 if (atomic_inc_not_zero(&eb->refs)) {
188 /* loop around taking references on and locking the root node of the
189 * tree until you end up with a lock on the root. A locked buffer
190 * is returned, with a reference held.
192 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
194 struct extent_buffer *eb;
197 eb = btrfs_root_node(root);
199 if (eb == root->node)
201 btrfs_tree_unlock(eb);
202 free_extent_buffer(eb);
207 /* loop around taking references on and locking the root node of the
208 * tree until you end up with a lock on the root. A locked buffer
209 * is returned, with a reference held.
211 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
213 struct extent_buffer *eb;
216 eb = btrfs_root_node(root);
217 btrfs_tree_read_lock(eb);
218 if (eb == root->node)
220 btrfs_tree_read_unlock(eb);
221 free_extent_buffer(eb);
226 /* cowonly root (everything not a reference counted cow subvolume), just get
227 * put onto a simple dirty list. transaction.c walks this to make sure they
228 * get properly updated on disk.
230 static void add_root_to_dirty_list(struct btrfs_root *root)
232 spin_lock(&root->fs_info->trans_lock);
233 if (root->track_dirty && list_empty(&root->dirty_list)) {
234 list_add(&root->dirty_list,
235 &root->fs_info->dirty_cowonly_roots);
237 spin_unlock(&root->fs_info->trans_lock);
241 * used by snapshot creation to make a copy of a root for a tree with
242 * a given objectid. The buffer with the new root node is returned in
243 * cow_ret, and this func returns zero on success or a negative error code.
245 int btrfs_copy_root(struct btrfs_trans_handle *trans,
246 struct btrfs_root *root,
247 struct extent_buffer *buf,
248 struct extent_buffer **cow_ret, u64 new_root_objectid)
250 struct extent_buffer *cow;
253 struct btrfs_disk_key disk_key;
255 WARN_ON(root->ref_cows && trans->transid !=
256 root->fs_info->running_transaction->transid);
257 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
259 level = btrfs_header_level(buf);
261 btrfs_item_key(buf, &disk_key, 0);
263 btrfs_node_key(buf, &disk_key, 0);
265 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
266 new_root_objectid, &disk_key, level,
271 copy_extent_buffer(cow, buf, 0, 0, cow->len);
272 btrfs_set_header_bytenr(cow, cow->start);
273 btrfs_set_header_generation(cow, trans->transid);
274 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
275 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
276 BTRFS_HEADER_FLAG_RELOC);
277 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
278 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
280 btrfs_set_header_owner(cow, new_root_objectid);
282 write_extent_buffer(cow, root->fs_info->fsid,
283 (unsigned long)btrfs_header_fsid(cow),
286 WARN_ON(btrfs_header_generation(buf) > trans->transid);
287 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
288 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
290 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
295 btrfs_mark_buffer_dirty(cow);
304 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
305 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
307 MOD_LOG_ROOT_REPLACE,
310 struct tree_mod_move {
315 struct tree_mod_root {
320 struct tree_mod_elem {
322 u64 index; /* shifted logical */
326 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
329 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
332 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
333 struct btrfs_disk_key key;
336 /* this is used for op == MOD_LOG_MOVE_KEYS */
337 struct tree_mod_move move;
339 /* this is used for op == MOD_LOG_ROOT_REPLACE */
340 struct tree_mod_root old_root;
343 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
345 read_lock(&fs_info->tree_mod_log_lock);
348 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
350 read_unlock(&fs_info->tree_mod_log_lock);
353 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
355 write_lock(&fs_info->tree_mod_log_lock);
358 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
360 write_unlock(&fs_info->tree_mod_log_lock);
364 * This adds a new blocker to the tree mod log's blocker list if the @elem
365 * passed does not already have a sequence number set. So when a caller expects
366 * to record tree modifications, it should ensure to set elem->seq to zero
367 * before calling btrfs_get_tree_mod_seq.
368 * Returns a fresh, unused tree log modification sequence number, even if no new
371 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
372 struct seq_list *elem)
376 tree_mod_log_write_lock(fs_info);
377 spin_lock(&fs_info->tree_mod_seq_lock);
379 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
380 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
382 seq = btrfs_inc_tree_mod_seq(fs_info);
383 spin_unlock(&fs_info->tree_mod_seq_lock);
384 tree_mod_log_write_unlock(fs_info);
389 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
390 struct seq_list *elem)
392 struct rb_root *tm_root;
393 struct rb_node *node;
394 struct rb_node *next;
395 struct seq_list *cur_elem;
396 struct tree_mod_elem *tm;
397 u64 min_seq = (u64)-1;
398 u64 seq_putting = elem->seq;
403 spin_lock(&fs_info->tree_mod_seq_lock);
404 list_del(&elem->list);
407 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
408 if (cur_elem->seq < min_seq) {
409 if (seq_putting > cur_elem->seq) {
411 * blocker with lower sequence number exists, we
412 * cannot remove anything from the log
414 spin_unlock(&fs_info->tree_mod_seq_lock);
417 min_seq = cur_elem->seq;
420 spin_unlock(&fs_info->tree_mod_seq_lock);
423 * anything that's lower than the lowest existing (read: blocked)
424 * sequence number can be removed from the tree.
426 tree_mod_log_write_lock(fs_info);
427 tm_root = &fs_info->tree_mod_log;
428 for (node = rb_first(tm_root); node; node = next) {
429 next = rb_next(node);
430 tm = container_of(node, struct tree_mod_elem, node);
431 if (tm->seq > min_seq)
433 rb_erase(node, tm_root);
436 tree_mod_log_write_unlock(fs_info);
440 * key order of the log:
443 * the index is the shifted logical of the *new* root node for root replace
444 * operations, or the shifted logical of the affected block for all other
448 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
450 struct rb_root *tm_root;
451 struct rb_node **new;
452 struct rb_node *parent = NULL;
453 struct tree_mod_elem *cur;
455 BUG_ON(!tm || !tm->seq);
457 tm_root = &fs_info->tree_mod_log;
458 new = &tm_root->rb_node;
460 cur = container_of(*new, struct tree_mod_elem, node);
462 if (cur->index < tm->index)
463 new = &((*new)->rb_left);
464 else if (cur->index > tm->index)
465 new = &((*new)->rb_right);
466 else if (cur->seq < tm->seq)
467 new = &((*new)->rb_left);
468 else if (cur->seq > tm->seq)
469 new = &((*new)->rb_right);
476 rb_link_node(&tm->node, parent, new);
477 rb_insert_color(&tm->node, tm_root);
482 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
483 * returns zero with the tree_mod_log_lock acquired. The caller must hold
484 * this until all tree mod log insertions are recorded in the rb tree and then
485 * call tree_mod_log_write_unlock() to release.
487 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
488 struct extent_buffer *eb) {
490 if (list_empty(&(fs_info)->tree_mod_seq_list))
492 if (eb && btrfs_header_level(eb) == 0)
495 tree_mod_log_write_lock(fs_info);
496 if (list_empty(&fs_info->tree_mod_seq_list)) {
498 * someone emptied the list while we were waiting for the lock.
499 * we must not add to the list when no blocker exists.
501 tree_mod_log_write_unlock(fs_info);
509 * This allocates memory and gets a tree modification sequence number.
511 * Returns <0 on error.
512 * Returns >0 (the added sequence number) on success.
514 static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
515 struct tree_mod_elem **tm_ret)
517 struct tree_mod_elem *tm;
520 * once we switch from spin locks to something different, we should
521 * honor the flags parameter here.
523 tm = *tm_ret = kzalloc(sizeof(*tm), GFP_ATOMIC);
527 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
532 __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
533 struct extent_buffer *eb, int slot,
534 enum mod_log_op op, gfp_t flags)
537 struct tree_mod_elem *tm;
539 ret = tree_mod_alloc(fs_info, flags, &tm);
543 tm->index = eb->start >> PAGE_CACHE_SHIFT;
544 if (op != MOD_LOG_KEY_ADD) {
545 btrfs_node_key(eb, &tm->key, slot);
546 tm->blockptr = btrfs_node_blockptr(eb, slot);
550 tm->generation = btrfs_node_ptr_generation(eb, slot);
552 return __tree_mod_log_insert(fs_info, tm);
556 tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
557 struct extent_buffer *eb, int slot,
558 enum mod_log_op op, gfp_t flags)
562 if (tree_mod_dont_log(fs_info, eb))
565 ret = __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
567 tree_mod_log_write_unlock(fs_info);
572 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
573 int slot, enum mod_log_op op)
575 return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
579 tree_mod_log_insert_key_locked(struct btrfs_fs_info *fs_info,
580 struct extent_buffer *eb, int slot,
583 return __tree_mod_log_insert_key(fs_info, eb, slot, op, GFP_NOFS);
587 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
588 struct extent_buffer *eb, int dst_slot, int src_slot,
589 int nr_items, gfp_t flags)
591 struct tree_mod_elem *tm;
595 if (tree_mod_dont_log(fs_info, eb))
599 * When we override something during the move, we log these removals.
600 * This can only happen when we move towards the beginning of the
601 * buffer, i.e. dst_slot < src_slot.
603 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
604 ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
605 MOD_LOG_KEY_REMOVE_WHILE_MOVING);
609 ret = tree_mod_alloc(fs_info, flags, &tm);
613 tm->index = eb->start >> PAGE_CACHE_SHIFT;
615 tm->move.dst_slot = dst_slot;
616 tm->move.nr_items = nr_items;
617 tm->op = MOD_LOG_MOVE_KEYS;
619 ret = __tree_mod_log_insert(fs_info, tm);
621 tree_mod_log_write_unlock(fs_info);
626 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
632 if (btrfs_header_level(eb) == 0)
635 nritems = btrfs_header_nritems(eb);
636 for (i = nritems - 1; i >= 0; i--) {
637 ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
638 MOD_LOG_KEY_REMOVE_WHILE_FREEING);
644 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
645 struct extent_buffer *old_root,
646 struct extent_buffer *new_root, gfp_t flags)
648 struct tree_mod_elem *tm;
651 if (tree_mod_dont_log(fs_info, NULL))
654 ret = tree_mod_alloc(fs_info, flags, &tm);
658 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
659 tm->old_root.logical = old_root->start;
660 tm->old_root.level = btrfs_header_level(old_root);
661 tm->generation = btrfs_header_generation(old_root);
662 tm->op = MOD_LOG_ROOT_REPLACE;
664 ret = __tree_mod_log_insert(fs_info, tm);
666 tree_mod_log_write_unlock(fs_info);
670 static struct tree_mod_elem *
671 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
674 struct rb_root *tm_root;
675 struct rb_node *node;
676 struct tree_mod_elem *cur = NULL;
677 struct tree_mod_elem *found = NULL;
678 u64 index = start >> PAGE_CACHE_SHIFT;
680 tree_mod_log_read_lock(fs_info);
681 tm_root = &fs_info->tree_mod_log;
682 node = tm_root->rb_node;
684 cur = container_of(node, struct tree_mod_elem, node);
685 if (cur->index < index) {
686 node = node->rb_left;
687 } else if (cur->index > index) {
688 node = node->rb_right;
689 } else if (cur->seq < min_seq) {
690 node = node->rb_left;
691 } else if (!smallest) {
692 /* we want the node with the highest seq */
694 BUG_ON(found->seq > cur->seq);
696 node = node->rb_left;
697 } else if (cur->seq > min_seq) {
698 /* we want the node with the smallest seq */
700 BUG_ON(found->seq < cur->seq);
702 node = node->rb_right;
708 tree_mod_log_read_unlock(fs_info);
714 * this returns the element from the log with the smallest time sequence
715 * value that's in the log (the oldest log item). any element with a time
716 * sequence lower than min_seq will be ignored.
718 static struct tree_mod_elem *
719 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
722 return __tree_mod_log_search(fs_info, start, min_seq, 1);
726 * this returns the element from the log with the largest time sequence
727 * value that's in the log (the most recent log item). any element with
728 * a time sequence lower than min_seq will be ignored.
730 static struct tree_mod_elem *
731 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
733 return __tree_mod_log_search(fs_info, start, min_seq, 0);
737 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
738 struct extent_buffer *src, unsigned long dst_offset,
739 unsigned long src_offset, int nr_items)
744 if (tree_mod_dont_log(fs_info, NULL))
747 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) {
748 tree_mod_log_write_unlock(fs_info);
752 for (i = 0; i < nr_items; i++) {
753 ret = tree_mod_log_insert_key_locked(fs_info, src,
757 ret = tree_mod_log_insert_key_locked(fs_info, dst,
763 tree_mod_log_write_unlock(fs_info);
767 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
768 int dst_offset, int src_offset, int nr_items)
771 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
777 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
778 struct extent_buffer *eb, int slot, int atomic)
782 ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
784 atomic ? GFP_ATOMIC : GFP_NOFS);
789 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
791 if (tree_mod_dont_log(fs_info, eb))
794 __tree_mod_log_free_eb(fs_info, eb);
796 tree_mod_log_write_unlock(fs_info);
800 tree_mod_log_set_root_pointer(struct btrfs_root *root,
801 struct extent_buffer *new_root_node)
804 ret = tree_mod_log_insert_root(root->fs_info, root->node,
805 new_root_node, GFP_NOFS);
810 * check if the tree block can be shared by multiple trees
812 int btrfs_block_can_be_shared(struct btrfs_root *root,
813 struct extent_buffer *buf)
816 * Tree blocks not in refernece counted trees and tree roots
817 * are never shared. If a block was allocated after the last
818 * snapshot and the block was not allocated by tree relocation,
819 * we know the block is not shared.
821 if (root->ref_cows &&
822 buf != root->node && buf != root->commit_root &&
823 (btrfs_header_generation(buf) <=
824 btrfs_root_last_snapshot(&root->root_item) ||
825 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
827 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
828 if (root->ref_cows &&
829 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
835 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
836 struct btrfs_root *root,
837 struct extent_buffer *buf,
838 struct extent_buffer *cow,
848 * Backrefs update rules:
850 * Always use full backrefs for extent pointers in tree block
851 * allocated by tree relocation.
853 * If a shared tree block is no longer referenced by its owner
854 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
855 * use full backrefs for extent pointers in tree block.
857 * If a tree block is been relocating
858 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
859 * use full backrefs for extent pointers in tree block.
860 * The reason for this is some operations (such as drop tree)
861 * are only allowed for blocks use full backrefs.
864 if (btrfs_block_can_be_shared(root, buf)) {
865 ret = btrfs_lookup_extent_info(trans, root, buf->start,
866 buf->len, &refs, &flags);
871 btrfs_std_error(root->fs_info, ret);
876 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
877 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
878 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
883 owner = btrfs_header_owner(buf);
884 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
885 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
888 if ((owner == root->root_key.objectid ||
889 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
890 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
891 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
892 BUG_ON(ret); /* -ENOMEM */
894 if (root->root_key.objectid ==
895 BTRFS_TREE_RELOC_OBJECTID) {
896 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
897 BUG_ON(ret); /* -ENOMEM */
898 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
899 BUG_ON(ret); /* -ENOMEM */
901 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
904 if (root->root_key.objectid ==
905 BTRFS_TREE_RELOC_OBJECTID)
906 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
908 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
909 BUG_ON(ret); /* -ENOMEM */
911 if (new_flags != 0) {
912 ret = btrfs_set_disk_extent_flags(trans, root,
920 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
921 if (root->root_key.objectid ==
922 BTRFS_TREE_RELOC_OBJECTID)
923 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
925 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
926 BUG_ON(ret); /* -ENOMEM */
927 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
928 BUG_ON(ret); /* -ENOMEM */
930 tree_mod_log_free_eb(root->fs_info, buf);
931 clean_tree_block(trans, root, buf);
938 * does the dirty work in cow of a single block. The parent block (if
939 * supplied) is updated to point to the new cow copy. The new buffer is marked
940 * dirty and returned locked. If you modify the block it needs to be marked
943 * search_start -- an allocation hint for the new block
945 * empty_size -- a hint that you plan on doing more cow. This is the size in
946 * bytes the allocator should try to find free next to the block it returns.
947 * This is just a hint and may be ignored by the allocator.
949 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
950 struct btrfs_root *root,
951 struct extent_buffer *buf,
952 struct extent_buffer *parent, int parent_slot,
953 struct extent_buffer **cow_ret,
954 u64 search_start, u64 empty_size)
956 struct btrfs_disk_key disk_key;
957 struct extent_buffer *cow;
966 btrfs_assert_tree_locked(buf);
968 WARN_ON(root->ref_cows && trans->transid !=
969 root->fs_info->running_transaction->transid);
970 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
972 level = btrfs_header_level(buf);
975 btrfs_item_key(buf, &disk_key, 0);
977 btrfs_node_key(buf, &disk_key, 0);
979 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
981 parent_start = parent->start;
987 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
988 root->root_key.objectid, &disk_key,
989 level, search_start, empty_size);
993 /* cow is set to blocking by btrfs_init_new_buffer */
995 copy_extent_buffer(cow, buf, 0, 0, cow->len);
996 btrfs_set_header_bytenr(cow, cow->start);
997 btrfs_set_header_generation(cow, trans->transid);
998 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
999 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1000 BTRFS_HEADER_FLAG_RELOC);
1001 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1002 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1004 btrfs_set_header_owner(cow, root->root_key.objectid);
1006 write_extent_buffer(cow, root->fs_info->fsid,
1007 (unsigned long)btrfs_header_fsid(cow),
1010 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1012 btrfs_abort_transaction(trans, root, ret);
1017 btrfs_reloc_cow_block(trans, root, buf, cow);
1019 if (buf == root->node) {
1020 WARN_ON(parent && parent != buf);
1021 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1022 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1023 parent_start = buf->start;
1027 extent_buffer_get(cow);
1028 tree_mod_log_set_root_pointer(root, cow);
1029 rcu_assign_pointer(root->node, cow);
1031 btrfs_free_tree_block(trans, root, buf, parent_start,
1033 free_extent_buffer(buf);
1034 add_root_to_dirty_list(root);
1036 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1037 parent_start = parent->start;
1041 WARN_ON(trans->transid != btrfs_header_generation(parent));
1042 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1043 MOD_LOG_KEY_REPLACE);
1044 btrfs_set_node_blockptr(parent, parent_slot,
1046 btrfs_set_node_ptr_generation(parent, parent_slot,
1048 btrfs_mark_buffer_dirty(parent);
1049 btrfs_free_tree_block(trans, root, buf, parent_start,
1053 btrfs_tree_unlock(buf);
1054 free_extent_buffer_stale(buf);
1055 btrfs_mark_buffer_dirty(cow);
1061 * returns the logical address of the oldest predecessor of the given root.
1062 * entries older than time_seq are ignored.
1064 static struct tree_mod_elem *
1065 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1066 struct btrfs_root *root, u64 time_seq)
1068 struct tree_mod_elem *tm;
1069 struct tree_mod_elem *found = NULL;
1070 u64 root_logical = root->node->start;
1077 * the very last operation that's logged for a root is the replacement
1078 * operation (if it is replaced at all). this has the index of the *new*
1079 * root, making it the very first operation that's logged for this root.
1082 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1087 * if there are no tree operation for the oldest root, we simply
1088 * return it. this should only happen if that (old) root is at
1095 * if there's an operation that's not a root replacement, we
1096 * found the oldest version of our root. normally, we'll find a
1097 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1099 if (tm->op != MOD_LOG_ROOT_REPLACE)
1103 root_logical = tm->old_root.logical;
1104 BUG_ON(root_logical == root->node->start);
1108 /* if there's no old root to return, return what we found instead */
1116 * tm is a pointer to the first operation to rewind within eb. then, all
1117 * previous operations will be rewinded (until we reach something older than
1121 __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
1122 struct tree_mod_elem *first_tm)
1125 struct rb_node *next;
1126 struct tree_mod_elem *tm = first_tm;
1127 unsigned long o_dst;
1128 unsigned long o_src;
1129 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1131 n = btrfs_header_nritems(eb);
1132 while (tm && tm->seq >= time_seq) {
1134 * all the operations are recorded with the operator used for
1135 * the modification. as we're going backwards, we do the
1136 * opposite of each operation here.
1139 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1140 BUG_ON(tm->slot < n);
1141 case MOD_LOG_KEY_REMOVE:
1143 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1144 btrfs_set_node_key(eb, &tm->key, tm->slot);
1145 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1146 btrfs_set_node_ptr_generation(eb, tm->slot,
1149 case MOD_LOG_KEY_REPLACE:
1150 BUG_ON(tm->slot >= n);
1151 btrfs_set_node_key(eb, &tm->key, tm->slot);
1152 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1153 btrfs_set_node_ptr_generation(eb, tm->slot,
1156 case MOD_LOG_KEY_ADD:
1157 /* if a move operation is needed it's in the log */
1160 case MOD_LOG_MOVE_KEYS:
1161 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1162 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1163 memmove_extent_buffer(eb, o_dst, o_src,
1164 tm->move.nr_items * p_size);
1166 case MOD_LOG_ROOT_REPLACE:
1168 * this operation is special. for roots, this must be
1169 * handled explicitly before rewinding.
1170 * for non-roots, this operation may exist if the node
1171 * was a root: root A -> child B; then A gets empty and
1172 * B is promoted to the new root. in the mod log, we'll
1173 * have a root-replace operation for B, a tree block
1174 * that is no root. we simply ignore that operation.
1178 next = rb_next(&tm->node);
1181 tm = container_of(next, struct tree_mod_elem, node);
1182 if (tm->index != first_tm->index)
1185 btrfs_set_header_nritems(eb, n);
1188 static struct extent_buffer *
1189 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1192 struct extent_buffer *eb_rewin;
1193 struct tree_mod_elem *tm;
1198 if (btrfs_header_level(eb) == 0)
1201 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1205 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1206 BUG_ON(tm->slot != 0);
1207 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1208 fs_info->tree_root->nodesize);
1210 btrfs_set_header_bytenr(eb_rewin, eb->start);
1211 btrfs_set_header_backref_rev(eb_rewin,
1212 btrfs_header_backref_rev(eb));
1213 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1214 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1216 eb_rewin = btrfs_clone_extent_buffer(eb);
1220 extent_buffer_get(eb_rewin);
1221 free_extent_buffer(eb);
1223 __tree_mod_log_rewind(eb_rewin, time_seq, tm);
1224 WARN_ON(btrfs_header_nritems(eb_rewin) >
1225 BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root));
1231 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1232 * value. If there are no changes, the current root->root_node is returned. If
1233 * anything changed in between, there's a fresh buffer allocated on which the
1234 * rewind operations are done. In any case, the returned buffer is read locked.
1235 * Returns NULL on error (with no locks held).
1237 static inline struct extent_buffer *
1238 get_old_root(struct btrfs_root *root, u64 time_seq)
1240 struct tree_mod_elem *tm;
1241 struct extent_buffer *eb;
1242 struct extent_buffer *old;
1243 struct tree_mod_root *old_root = NULL;
1244 u64 old_generation = 0;
1248 eb = btrfs_read_lock_root_node(root);
1249 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
1253 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1254 old_root = &tm->old_root;
1255 old_generation = tm->generation;
1256 logical = old_root->logical;
1258 logical = root->node->start;
1261 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1262 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1263 btrfs_tree_read_unlock(root->node);
1264 free_extent_buffer(root->node);
1265 blocksize = btrfs_level_size(root, old_root->level);
1266 old = read_tree_block(root, logical, blocksize, 0);
1268 pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1272 eb = btrfs_clone_extent_buffer(old);
1273 free_extent_buffer(old);
1275 } else if (old_root) {
1276 btrfs_tree_read_unlock(root->node);
1277 free_extent_buffer(root->node);
1278 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1280 eb = btrfs_clone_extent_buffer(root->node);
1281 btrfs_tree_read_unlock(root->node);
1282 free_extent_buffer(root->node);
1287 extent_buffer_get(eb);
1288 btrfs_tree_read_lock(eb);
1290 btrfs_set_header_bytenr(eb, eb->start);
1291 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1292 btrfs_set_header_owner(eb, root->root_key.objectid);
1293 btrfs_set_header_level(eb, old_root->level);
1294 btrfs_set_header_generation(eb, old_generation);
1297 __tree_mod_log_rewind(eb, time_seq, tm);
1299 WARN_ON(btrfs_header_level(eb) != 0);
1300 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1305 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1307 struct tree_mod_elem *tm;
1310 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
1311 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1312 level = tm->old_root.level;
1315 level = btrfs_header_level(root->node);
1322 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1323 struct btrfs_root *root,
1324 struct extent_buffer *buf)
1326 /* ensure we can see the force_cow */
1330 * We do not need to cow a block if
1331 * 1) this block is not created or changed in this transaction;
1332 * 2) this block does not belong to TREE_RELOC tree;
1333 * 3) the root is not forced COW.
1335 * What is forced COW:
1336 * when we create snapshot during commiting the transaction,
1337 * after we've finished coping src root, we must COW the shared
1338 * block to ensure the metadata consistency.
1340 if (btrfs_header_generation(buf) == trans->transid &&
1341 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1342 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1343 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1350 * cows a single block, see __btrfs_cow_block for the real work.
1351 * This version of it has extra checks so that a block isn't cow'd more than
1352 * once per transaction, as long as it hasn't been written yet
1354 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1355 struct btrfs_root *root, struct extent_buffer *buf,
1356 struct extent_buffer *parent, int parent_slot,
1357 struct extent_buffer **cow_ret)
1362 if (trans->transaction != root->fs_info->running_transaction)
1363 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1364 (unsigned long long)trans->transid,
1365 (unsigned long long)
1366 root->fs_info->running_transaction->transid);
1368 if (trans->transid != root->fs_info->generation)
1369 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1370 (unsigned long long)trans->transid,
1371 (unsigned long long)root->fs_info->generation);
1373 if (!should_cow_block(trans, root, buf)) {
1378 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1381 btrfs_set_lock_blocking(parent);
1382 btrfs_set_lock_blocking(buf);
1384 ret = __btrfs_cow_block(trans, root, buf, parent,
1385 parent_slot, cow_ret, search_start, 0);
1387 trace_btrfs_cow_block(root, buf, *cow_ret);
1393 * helper function for defrag to decide if two blocks pointed to by a
1394 * node are actually close by
1396 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1398 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1400 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1406 * compare two keys in a memcmp fashion
1408 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1410 struct btrfs_key k1;
1412 btrfs_disk_key_to_cpu(&k1, disk);
1414 return btrfs_comp_cpu_keys(&k1, k2);
1418 * same as comp_keys only with two btrfs_key's
1420 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1422 if (k1->objectid > k2->objectid)
1424 if (k1->objectid < k2->objectid)
1426 if (k1->type > k2->type)
1428 if (k1->type < k2->type)
1430 if (k1->offset > k2->offset)
1432 if (k1->offset < k2->offset)
1438 * this is used by the defrag code to go through all the
1439 * leaves pointed to by a node and reallocate them so that
1440 * disk order is close to key order
1442 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1443 struct btrfs_root *root, struct extent_buffer *parent,
1444 int start_slot, int cache_only, u64 *last_ret,
1445 struct btrfs_key *progress)
1447 struct extent_buffer *cur;
1450 u64 search_start = *last_ret;
1460 int progress_passed = 0;
1461 struct btrfs_disk_key disk_key;
1463 parent_level = btrfs_header_level(parent);
1464 if (cache_only && parent_level != 1)
1467 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1468 WARN_ON(trans->transid != root->fs_info->generation);
1470 parent_nritems = btrfs_header_nritems(parent);
1471 blocksize = btrfs_level_size(root, parent_level - 1);
1472 end_slot = parent_nritems;
1474 if (parent_nritems == 1)
1477 btrfs_set_lock_blocking(parent);
1479 for (i = start_slot; i < end_slot; i++) {
1482 btrfs_node_key(parent, &disk_key, i);
1483 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1486 progress_passed = 1;
1487 blocknr = btrfs_node_blockptr(parent, i);
1488 gen = btrfs_node_ptr_generation(parent, i);
1489 if (last_block == 0)
1490 last_block = blocknr;
1493 other = btrfs_node_blockptr(parent, i - 1);
1494 close = close_blocks(blocknr, other, blocksize);
1496 if (!close && i < end_slot - 2) {
1497 other = btrfs_node_blockptr(parent, i + 1);
1498 close = close_blocks(blocknr, other, blocksize);
1501 last_block = blocknr;
1505 cur = btrfs_find_tree_block(root, blocknr, blocksize);
1507 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1510 if (!cur || !uptodate) {
1512 free_extent_buffer(cur);
1516 cur = read_tree_block(root, blocknr,
1520 } else if (!uptodate) {
1521 err = btrfs_read_buffer(cur, gen);
1523 free_extent_buffer(cur);
1528 if (search_start == 0)
1529 search_start = last_block;
1531 btrfs_tree_lock(cur);
1532 btrfs_set_lock_blocking(cur);
1533 err = __btrfs_cow_block(trans, root, cur, parent, i,
1536 (end_slot - i) * blocksize));
1538 btrfs_tree_unlock(cur);
1539 free_extent_buffer(cur);
1542 search_start = cur->start;
1543 last_block = cur->start;
1544 *last_ret = search_start;
1545 btrfs_tree_unlock(cur);
1546 free_extent_buffer(cur);
1552 * The leaf data grows from end-to-front in the node.
1553 * this returns the address of the start of the last item,
1554 * which is the stop of the leaf data stack
1556 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1557 struct extent_buffer *leaf)
1559 u32 nr = btrfs_header_nritems(leaf);
1561 return BTRFS_LEAF_DATA_SIZE(root);
1562 return btrfs_item_offset_nr(leaf, nr - 1);
1567 * search for key in the extent_buffer. The items start at offset p,
1568 * and they are item_size apart. There are 'max' items in p.
1570 * the slot in the array is returned via slot, and it points to
1571 * the place where you would insert key if it is not found in
1574 * slot may point to max if the key is bigger than all of the keys
1576 static noinline int generic_bin_search(struct extent_buffer *eb,
1578 int item_size, struct btrfs_key *key,
1585 struct btrfs_disk_key *tmp = NULL;
1586 struct btrfs_disk_key unaligned;
1587 unsigned long offset;
1589 unsigned long map_start = 0;
1590 unsigned long map_len = 0;
1593 while (low < high) {
1594 mid = (low + high) / 2;
1595 offset = p + mid * item_size;
1597 if (!kaddr || offset < map_start ||
1598 (offset + sizeof(struct btrfs_disk_key)) >
1599 map_start + map_len) {
1601 err = map_private_extent_buffer(eb, offset,
1602 sizeof(struct btrfs_disk_key),
1603 &kaddr, &map_start, &map_len);
1606 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1609 read_extent_buffer(eb, &unaligned,
1610 offset, sizeof(unaligned));
1615 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1618 ret = comp_keys(tmp, key);
1634 * simple bin_search frontend that does the right thing for
1637 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1638 int level, int *slot)
1641 return generic_bin_search(eb,
1642 offsetof(struct btrfs_leaf, items),
1643 sizeof(struct btrfs_item),
1644 key, btrfs_header_nritems(eb),
1647 return generic_bin_search(eb,
1648 offsetof(struct btrfs_node, ptrs),
1649 sizeof(struct btrfs_key_ptr),
1650 key, btrfs_header_nritems(eb),
1654 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1655 int level, int *slot)
1657 return bin_search(eb, key, level, slot);
1660 static void root_add_used(struct btrfs_root *root, u32 size)
1662 spin_lock(&root->accounting_lock);
1663 btrfs_set_root_used(&root->root_item,
1664 btrfs_root_used(&root->root_item) + size);
1665 spin_unlock(&root->accounting_lock);
1668 static void root_sub_used(struct btrfs_root *root, u32 size)
1670 spin_lock(&root->accounting_lock);
1671 btrfs_set_root_used(&root->root_item,
1672 btrfs_root_used(&root->root_item) - size);
1673 spin_unlock(&root->accounting_lock);
1676 /* given a node and slot number, this reads the blocks it points to. The
1677 * extent buffer is returned with a reference taken (but unlocked).
1678 * NULL is returned on error.
1680 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1681 struct extent_buffer *parent, int slot)
1683 int level = btrfs_header_level(parent);
1686 if (slot >= btrfs_header_nritems(parent))
1691 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
1692 btrfs_level_size(root, level - 1),
1693 btrfs_node_ptr_generation(parent, slot));
1697 * node level balancing, used to make sure nodes are in proper order for
1698 * item deletion. We balance from the top down, so we have to make sure
1699 * that a deletion won't leave an node completely empty later on.
1701 static noinline int balance_level(struct btrfs_trans_handle *trans,
1702 struct btrfs_root *root,
1703 struct btrfs_path *path, int level)
1705 struct extent_buffer *right = NULL;
1706 struct extent_buffer *mid;
1707 struct extent_buffer *left = NULL;
1708 struct extent_buffer *parent = NULL;
1712 int orig_slot = path->slots[level];
1718 mid = path->nodes[level];
1720 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1721 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1722 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1724 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1726 if (level < BTRFS_MAX_LEVEL - 1) {
1727 parent = path->nodes[level + 1];
1728 pslot = path->slots[level + 1];
1732 * deal with the case where there is only one pointer in the root
1733 * by promoting the node below to a root
1736 struct extent_buffer *child;
1738 if (btrfs_header_nritems(mid) != 1)
1741 /* promote the child to a root */
1742 child = read_node_slot(root, mid, 0);
1745 btrfs_std_error(root->fs_info, ret);
1749 btrfs_tree_lock(child);
1750 btrfs_set_lock_blocking(child);
1751 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1753 btrfs_tree_unlock(child);
1754 free_extent_buffer(child);
1758 tree_mod_log_free_eb(root->fs_info, root->node);
1759 tree_mod_log_set_root_pointer(root, child);
1760 rcu_assign_pointer(root->node, child);
1762 add_root_to_dirty_list(root);
1763 btrfs_tree_unlock(child);
1765 path->locks[level] = 0;
1766 path->nodes[level] = NULL;
1767 clean_tree_block(trans, root, mid);
1768 btrfs_tree_unlock(mid);
1769 /* once for the path */
1770 free_extent_buffer(mid);
1772 root_sub_used(root, mid->len);
1773 btrfs_free_tree_block(trans, root, mid, 0, 1);
1774 /* once for the root ptr */
1775 free_extent_buffer_stale(mid);
1778 if (btrfs_header_nritems(mid) >
1779 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1782 left = read_node_slot(root, parent, pslot - 1);
1784 btrfs_tree_lock(left);
1785 btrfs_set_lock_blocking(left);
1786 wret = btrfs_cow_block(trans, root, left,
1787 parent, pslot - 1, &left);
1793 right = read_node_slot(root, parent, pslot + 1);
1795 btrfs_tree_lock(right);
1796 btrfs_set_lock_blocking(right);
1797 wret = btrfs_cow_block(trans, root, right,
1798 parent, pslot + 1, &right);
1805 /* first, try to make some room in the middle buffer */
1807 orig_slot += btrfs_header_nritems(left);
1808 wret = push_node_left(trans, root, left, mid, 1);
1814 * then try to empty the right most buffer into the middle
1817 wret = push_node_left(trans, root, mid, right, 1);
1818 if (wret < 0 && wret != -ENOSPC)
1820 if (btrfs_header_nritems(right) == 0) {
1821 clean_tree_block(trans, root, right);
1822 btrfs_tree_unlock(right);
1823 del_ptr(trans, root, path, level + 1, pslot + 1);
1824 root_sub_used(root, right->len);
1825 btrfs_free_tree_block(trans, root, right, 0, 1);
1826 free_extent_buffer_stale(right);
1829 struct btrfs_disk_key right_key;
1830 btrfs_node_key(right, &right_key, 0);
1831 tree_mod_log_set_node_key(root->fs_info, parent,
1833 btrfs_set_node_key(parent, &right_key, pslot + 1);
1834 btrfs_mark_buffer_dirty(parent);
1837 if (btrfs_header_nritems(mid) == 1) {
1839 * we're not allowed to leave a node with one item in the
1840 * tree during a delete. A deletion from lower in the tree
1841 * could try to delete the only pointer in this node.
1842 * So, pull some keys from the left.
1843 * There has to be a left pointer at this point because
1844 * otherwise we would have pulled some pointers from the
1849 btrfs_std_error(root->fs_info, ret);
1852 wret = balance_node_right(trans, root, mid, left);
1858 wret = push_node_left(trans, root, left, mid, 1);
1864 if (btrfs_header_nritems(mid) == 0) {
1865 clean_tree_block(trans, root, mid);
1866 btrfs_tree_unlock(mid);
1867 del_ptr(trans, root, path, level + 1, pslot);
1868 root_sub_used(root, mid->len);
1869 btrfs_free_tree_block(trans, root, mid, 0, 1);
1870 free_extent_buffer_stale(mid);
1873 /* update the parent key to reflect our changes */
1874 struct btrfs_disk_key mid_key;
1875 btrfs_node_key(mid, &mid_key, 0);
1876 tree_mod_log_set_node_key(root->fs_info, parent,
1878 btrfs_set_node_key(parent, &mid_key, pslot);
1879 btrfs_mark_buffer_dirty(parent);
1882 /* update the path */
1884 if (btrfs_header_nritems(left) > orig_slot) {
1885 extent_buffer_get(left);
1886 /* left was locked after cow */
1887 path->nodes[level] = left;
1888 path->slots[level + 1] -= 1;
1889 path->slots[level] = orig_slot;
1891 btrfs_tree_unlock(mid);
1892 free_extent_buffer(mid);
1895 orig_slot -= btrfs_header_nritems(left);
1896 path->slots[level] = orig_slot;
1899 /* double check we haven't messed things up */
1901 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1905 btrfs_tree_unlock(right);
1906 free_extent_buffer(right);
1909 if (path->nodes[level] != left)
1910 btrfs_tree_unlock(left);
1911 free_extent_buffer(left);
1916 /* Node balancing for insertion. Here we only split or push nodes around
1917 * when they are completely full. This is also done top down, so we
1918 * have to be pessimistic.
1920 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1921 struct btrfs_root *root,
1922 struct btrfs_path *path, int level)
1924 struct extent_buffer *right = NULL;
1925 struct extent_buffer *mid;
1926 struct extent_buffer *left = NULL;
1927 struct extent_buffer *parent = NULL;
1931 int orig_slot = path->slots[level];
1936 mid = path->nodes[level];
1937 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1939 if (level < BTRFS_MAX_LEVEL - 1) {
1940 parent = path->nodes[level + 1];
1941 pslot = path->slots[level + 1];
1947 left = read_node_slot(root, parent, pslot - 1);
1949 /* first, try to make some room in the middle buffer */
1953 btrfs_tree_lock(left);
1954 btrfs_set_lock_blocking(left);
1956 left_nr = btrfs_header_nritems(left);
1957 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1960 ret = btrfs_cow_block(trans, root, left, parent,
1965 wret = push_node_left(trans, root,
1972 struct btrfs_disk_key disk_key;
1973 orig_slot += left_nr;
1974 btrfs_node_key(mid, &disk_key, 0);
1975 tree_mod_log_set_node_key(root->fs_info, parent,
1977 btrfs_set_node_key(parent, &disk_key, pslot);
1978 btrfs_mark_buffer_dirty(parent);
1979 if (btrfs_header_nritems(left) > orig_slot) {
1980 path->nodes[level] = left;
1981 path->slots[level + 1] -= 1;
1982 path->slots[level] = orig_slot;
1983 btrfs_tree_unlock(mid);
1984 free_extent_buffer(mid);
1987 btrfs_header_nritems(left);
1988 path->slots[level] = orig_slot;
1989 btrfs_tree_unlock(left);
1990 free_extent_buffer(left);
1994 btrfs_tree_unlock(left);
1995 free_extent_buffer(left);
1997 right = read_node_slot(root, parent, pslot + 1);
2000 * then try to empty the right most buffer into the middle
2005 btrfs_tree_lock(right);
2006 btrfs_set_lock_blocking(right);
2008 right_nr = btrfs_header_nritems(right);
2009 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2012 ret = btrfs_cow_block(trans, root, right,
2018 wret = balance_node_right(trans, root,
2025 struct btrfs_disk_key disk_key;
2027 btrfs_node_key(right, &disk_key, 0);
2028 tree_mod_log_set_node_key(root->fs_info, parent,
2030 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2031 btrfs_mark_buffer_dirty(parent);
2033 if (btrfs_header_nritems(mid) <= orig_slot) {
2034 path->nodes[level] = right;
2035 path->slots[level + 1] += 1;
2036 path->slots[level] = orig_slot -
2037 btrfs_header_nritems(mid);
2038 btrfs_tree_unlock(mid);
2039 free_extent_buffer(mid);
2041 btrfs_tree_unlock(right);
2042 free_extent_buffer(right);
2046 btrfs_tree_unlock(right);
2047 free_extent_buffer(right);
2053 * readahead one full node of leaves, finding things that are close
2054 * to the block in 'slot', and triggering ra on them.
2056 static void reada_for_search(struct btrfs_root *root,
2057 struct btrfs_path *path,
2058 int level, int slot, u64 objectid)
2060 struct extent_buffer *node;
2061 struct btrfs_disk_key disk_key;
2067 int direction = path->reada;
2068 struct extent_buffer *eb;
2076 if (!path->nodes[level])
2079 node = path->nodes[level];
2081 search = btrfs_node_blockptr(node, slot);
2082 blocksize = btrfs_level_size(root, level - 1);
2083 eb = btrfs_find_tree_block(root, search, blocksize);
2085 free_extent_buffer(eb);
2091 nritems = btrfs_header_nritems(node);
2095 if (direction < 0) {
2099 } else if (direction > 0) {
2104 if (path->reada < 0 && objectid) {
2105 btrfs_node_key(node, &disk_key, nr);
2106 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2109 search = btrfs_node_blockptr(node, nr);
2110 if ((search <= target && target - search <= 65536) ||
2111 (search > target && search - target <= 65536)) {
2112 gen = btrfs_node_ptr_generation(node, nr);
2113 readahead_tree_block(root, search, blocksize, gen);
2117 if ((nread > 65536 || nscan > 32))
2123 * returns -EAGAIN if it had to drop the path, or zero if everything was in
2126 static noinline int reada_for_balance(struct btrfs_root *root,
2127 struct btrfs_path *path, int level)
2131 struct extent_buffer *parent;
2132 struct extent_buffer *eb;
2139 parent = path->nodes[level + 1];
2143 nritems = btrfs_header_nritems(parent);
2144 slot = path->slots[level + 1];
2145 blocksize = btrfs_level_size(root, level);
2148 block1 = btrfs_node_blockptr(parent, slot - 1);
2149 gen = btrfs_node_ptr_generation(parent, slot - 1);
2150 eb = btrfs_find_tree_block(root, block1, blocksize);
2152 * if we get -eagain from btrfs_buffer_uptodate, we
2153 * don't want to return eagain here. That will loop
2156 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2158 free_extent_buffer(eb);
2160 if (slot + 1 < nritems) {
2161 block2 = btrfs_node_blockptr(parent, slot + 1);
2162 gen = btrfs_node_ptr_generation(parent, slot + 1);
2163 eb = btrfs_find_tree_block(root, block2, blocksize);
2164 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2166 free_extent_buffer(eb);
2168 if (block1 || block2) {
2171 /* release the whole path */
2172 btrfs_release_path(path);
2174 /* read the blocks */
2176 readahead_tree_block(root, block1, blocksize, 0);
2178 readahead_tree_block(root, block2, blocksize, 0);
2181 eb = read_tree_block(root, block1, blocksize, 0);
2182 free_extent_buffer(eb);
2185 eb = read_tree_block(root, block2, blocksize, 0);
2186 free_extent_buffer(eb);
2194 * when we walk down the tree, it is usually safe to unlock the higher layers
2195 * in the tree. The exceptions are when our path goes through slot 0, because
2196 * operations on the tree might require changing key pointers higher up in the
2199 * callers might also have set path->keep_locks, which tells this code to keep
2200 * the lock if the path points to the last slot in the block. This is part of
2201 * walking through the tree, and selecting the next slot in the higher block.
2203 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2204 * if lowest_unlock is 1, level 0 won't be unlocked
2206 static noinline void unlock_up(struct btrfs_path *path, int level,
2207 int lowest_unlock, int min_write_lock_level,
2208 int *write_lock_level)
2211 int skip_level = level;
2213 struct extent_buffer *t;
2215 if (path->really_keep_locks)
2218 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2219 if (!path->nodes[i])
2221 if (!path->locks[i])
2223 if (!no_skips && path->slots[i] == 0) {
2227 if (!no_skips && path->keep_locks) {
2230 nritems = btrfs_header_nritems(t);
2231 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2236 if (skip_level < i && i >= lowest_unlock)
2240 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2241 btrfs_tree_unlock_rw(t, path->locks[i]);
2243 if (write_lock_level &&
2244 i > min_write_lock_level &&
2245 i <= *write_lock_level) {
2246 *write_lock_level = i - 1;
2253 * This releases any locks held in the path starting at level and
2254 * going all the way up to the root.
2256 * btrfs_search_slot will keep the lock held on higher nodes in a few
2257 * corner cases, such as COW of the block at slot zero in the node. This
2258 * ignores those rules, and it should only be called when there are no
2259 * more updates to be done higher up in the tree.
2261 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2265 if (path->keep_locks || path->really_keep_locks)
2268 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2269 if (!path->nodes[i])
2271 if (!path->locks[i])
2273 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2279 * helper function for btrfs_search_slot. The goal is to find a block
2280 * in cache without setting the path to blocking. If we find the block
2281 * we return zero and the path is unchanged.
2283 * If we can't find the block, we set the path blocking and do some
2284 * reada. -EAGAIN is returned and the search must be repeated.
2287 read_block_for_search(struct btrfs_trans_handle *trans,
2288 struct btrfs_root *root, struct btrfs_path *p,
2289 struct extent_buffer **eb_ret, int level, int slot,
2290 struct btrfs_key *key, u64 time_seq)
2295 struct extent_buffer *b = *eb_ret;
2296 struct extent_buffer *tmp;
2299 blocknr = btrfs_node_blockptr(b, slot);
2300 gen = btrfs_node_ptr_generation(b, slot);
2301 blocksize = btrfs_level_size(root, level - 1);
2303 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2305 /* first we do an atomic uptodate check */
2306 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
2307 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2309 * we found an up to date block without
2316 /* the pages were up to date, but we failed
2317 * the generation number check. Do a full
2318 * read for the generation number that is correct.
2319 * We must do this without dropping locks so
2320 * we can trust our generation number
2322 free_extent_buffer(tmp);
2323 btrfs_set_path_blocking(p);
2325 /* now we're allowed to do a blocking uptodate check */
2326 tmp = read_tree_block(root, blocknr, blocksize, gen);
2327 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
2331 free_extent_buffer(tmp);
2332 btrfs_release_path(p);
2338 * reduce lock contention at high levels
2339 * of the btree by dropping locks before
2340 * we read. Don't release the lock on the current
2341 * level because we need to walk this node to figure
2342 * out which blocks to read.
2344 btrfs_unlock_up_safe(p, level + 1);
2345 btrfs_set_path_blocking(p);
2347 free_extent_buffer(tmp);
2349 reada_for_search(root, p, level, slot, key->objectid);
2351 btrfs_release_path(p);
2354 tmp = read_tree_block(root, blocknr, blocksize, 0);
2357 * If the read above didn't mark this buffer up to date,
2358 * it will never end up being up to date. Set ret to EIO now
2359 * and give up so that our caller doesn't loop forever
2362 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2364 free_extent_buffer(tmp);
2370 * helper function for btrfs_search_slot. This does all of the checks
2371 * for node-level blocks and does any balancing required based on
2374 * If no extra work was required, zero is returned. If we had to
2375 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2379 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2380 struct btrfs_root *root, struct btrfs_path *p,
2381 struct extent_buffer *b, int level, int ins_len,
2382 int *write_lock_level)
2385 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2386 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2389 if (*write_lock_level < level + 1) {
2390 *write_lock_level = level + 1;
2391 btrfs_release_path(p);
2395 sret = reada_for_balance(root, p, level);
2399 btrfs_set_path_blocking(p);
2400 sret = split_node(trans, root, p, level);
2401 btrfs_clear_path_blocking(p, NULL, 0);
2408 b = p->nodes[level];
2409 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2410 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2413 if (*write_lock_level < level + 1) {
2414 *write_lock_level = level + 1;
2415 btrfs_release_path(p);
2419 sret = reada_for_balance(root, p, level);
2423 btrfs_set_path_blocking(p);
2424 sret = balance_level(trans, root, p, level);
2425 btrfs_clear_path_blocking(p, NULL, 0);
2431 b = p->nodes[level];
2433 btrfs_release_path(p);
2436 BUG_ON(btrfs_header_nritems(b) == 1);
2447 * look for key in the tree. path is filled in with nodes along the way
2448 * if key is found, we return zero and you can find the item in the leaf
2449 * level of the path (level 0)
2451 * If the key isn't found, the path points to the slot where it should
2452 * be inserted, and 1 is returned. If there are other errors during the
2453 * search a negative error number is returned.
2455 * if ins_len > 0, nodes and leaves will be split as we walk down the
2456 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2459 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2460 *root, struct btrfs_key *key, struct btrfs_path *p, int
2463 struct extent_buffer *b;
2468 int lowest_unlock = 1;
2470 /* everything at write_lock_level or lower must be write locked */
2471 int write_lock_level = 0;
2472 u8 lowest_level = 0;
2473 int min_write_lock_level;
2475 lowest_level = p->lowest_level;
2476 WARN_ON(lowest_level && ins_len > 0);
2477 WARN_ON(p->nodes[0] != NULL);
2482 /* when we are removing items, we might have to go up to level
2483 * two as we update tree pointers Make sure we keep write
2484 * for those levels as well
2486 write_lock_level = 2;
2487 } else if (ins_len > 0) {
2489 * for inserting items, make sure we have a write lock on
2490 * level 1 so we can update keys
2492 write_lock_level = 1;
2496 write_lock_level = -1;
2498 if (cow && (p->really_keep_locks || p->keep_locks || p->lowest_level))
2499 write_lock_level = BTRFS_MAX_LEVEL;
2501 min_write_lock_level = write_lock_level;
2505 * we try very hard to do read locks on the root
2507 root_lock = BTRFS_READ_LOCK;
2509 if (p->search_commit_root) {
2511 * the commit roots are read only
2512 * so we always do read locks
2514 b = root->commit_root;
2515 extent_buffer_get(b);
2516 level = btrfs_header_level(b);
2517 if (!p->skip_locking)
2518 btrfs_tree_read_lock(b);
2520 if (p->skip_locking) {
2521 b = btrfs_root_node(root);
2522 level = btrfs_header_level(b);
2524 /* we don't know the level of the root node
2525 * until we actually have it read locked
2527 b = btrfs_read_lock_root_node(root);
2528 level = btrfs_header_level(b);
2529 if (level <= write_lock_level) {
2530 /* whoops, must trade for write lock */
2531 btrfs_tree_read_unlock(b);
2532 free_extent_buffer(b);
2533 b = btrfs_lock_root_node(root);
2534 root_lock = BTRFS_WRITE_LOCK;
2536 /* the level might have changed, check again */
2537 level = btrfs_header_level(b);
2541 p->nodes[level] = b;
2542 if (!p->skip_locking)
2543 p->locks[level] = root_lock;
2546 level = btrfs_header_level(b);
2549 * setup the path here so we can release it under lock
2550 * contention with the cow code
2554 * if we don't really need to cow this block
2555 * then we don't want to set the path blocking,
2556 * so we test it here
2558 if (!should_cow_block(trans, root, b))
2561 btrfs_set_path_blocking(p);
2564 * must have write locks on this node and the
2567 if (level + 1 > write_lock_level) {
2568 write_lock_level = level + 1;
2569 btrfs_release_path(p);
2573 err = btrfs_cow_block(trans, root, b,
2574 p->nodes[level + 1],
2575 p->slots[level + 1], &b);
2582 BUG_ON(!cow && ins_len);
2584 p->nodes[level] = b;
2585 btrfs_clear_path_blocking(p, NULL, 0);
2588 * we have a lock on b and as long as we aren't changing
2589 * the tree, there is no way to for the items in b to change.
2590 * It is safe to drop the lock on our parent before we
2591 * go through the expensive btree search on b.
2593 * If cow is true, then we might be changing slot zero,
2594 * which may require changing the parent. So, we can't
2595 * drop the lock until after we know which slot we're
2599 btrfs_unlock_up_safe(p, level + 1);
2601 ret = bin_search(b, key, level, &slot);
2605 if (ret && slot > 0) {
2609 p->slots[level] = slot;
2610 err = setup_nodes_for_search(trans, root, p, b, level,
2611 ins_len, &write_lock_level);
2618 b = p->nodes[level];
2619 slot = p->slots[level];
2622 * slot 0 is special, if we change the key
2623 * we have to update the parent pointer
2624 * which means we must have a write lock
2627 if (slot == 0 && cow &&
2628 write_lock_level < level + 1) {
2629 write_lock_level = level + 1;
2630 btrfs_release_path(p);
2634 unlock_up(p, level, lowest_unlock,
2635 min_write_lock_level, &write_lock_level);
2637 if (level == lowest_level) {
2643 err = read_block_for_search(trans, root, p,
2644 &b, level, slot, key, 0);
2652 if (!p->skip_locking) {
2653 level = btrfs_header_level(b);
2654 if (level <= write_lock_level) {
2655 err = btrfs_try_tree_write_lock(b);
2657 btrfs_set_path_blocking(p);
2659 btrfs_clear_path_blocking(p, b,
2662 p->locks[level] = BTRFS_WRITE_LOCK;
2664 err = btrfs_try_tree_read_lock(b);
2666 btrfs_set_path_blocking(p);
2667 btrfs_tree_read_lock(b);
2668 btrfs_clear_path_blocking(p, b,
2671 p->locks[level] = BTRFS_READ_LOCK;
2673 p->nodes[level] = b;
2676 p->slots[level] = slot;
2678 btrfs_leaf_free_space(root, b) < ins_len) {
2679 if (write_lock_level < 1) {
2680 write_lock_level = 1;
2681 btrfs_release_path(p);
2685 btrfs_set_path_blocking(p);
2686 err = split_leaf(trans, root, key,
2687 p, ins_len, ret == 0);
2688 btrfs_clear_path_blocking(p, NULL, 0);
2696 if (!p->search_for_split)
2697 unlock_up(p, level, lowest_unlock,
2698 min_write_lock_level, &write_lock_level);
2705 * we don't really know what they plan on doing with the path
2706 * from here on, so for now just mark it as blocking
2708 if (!p->leave_spinning)
2709 btrfs_set_path_blocking(p);
2711 btrfs_release_path(p);
2716 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2717 * current state of the tree together with the operations recorded in the tree
2718 * modification log to search for the key in a previous version of this tree, as
2719 * denoted by the time_seq parameter.
2721 * Naturally, there is no support for insert, delete or cow operations.
2723 * The resulting path and return value will be set up as if we called
2724 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2726 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2727 struct btrfs_path *p, u64 time_seq)
2729 struct extent_buffer *b;
2734 int lowest_unlock = 1;
2735 u8 lowest_level = 0;
2737 lowest_level = p->lowest_level;
2738 WARN_ON(p->nodes[0] != NULL);
2740 if (p->search_commit_root) {
2742 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2746 b = get_old_root(root, time_seq);
2747 level = btrfs_header_level(b);
2748 p->locks[level] = BTRFS_READ_LOCK;
2751 level = btrfs_header_level(b);
2752 p->nodes[level] = b;
2753 btrfs_clear_path_blocking(p, NULL, 0);
2756 * we have a lock on b and as long as we aren't changing
2757 * the tree, there is no way to for the items in b to change.
2758 * It is safe to drop the lock on our parent before we
2759 * go through the expensive btree search on b.
2761 btrfs_unlock_up_safe(p, level + 1);
2763 ret = bin_search(b, key, level, &slot);
2767 if (ret && slot > 0) {
2771 p->slots[level] = slot;
2772 unlock_up(p, level, lowest_unlock, 0, NULL);
2774 if (level == lowest_level) {
2780 err = read_block_for_search(NULL, root, p, &b, level,
2781 slot, key, time_seq);
2789 level = btrfs_header_level(b);
2790 err = btrfs_try_tree_read_lock(b);
2792 btrfs_set_path_blocking(p);
2793 btrfs_tree_read_lock(b);
2794 btrfs_clear_path_blocking(p, b,
2797 p->locks[level] = BTRFS_READ_LOCK;
2798 p->nodes[level] = b;
2799 b = tree_mod_log_rewind(root->fs_info, b, time_seq);
2800 if (b != p->nodes[level]) {
2801 btrfs_tree_unlock_rw(p->nodes[level],
2803 p->locks[level] = 0;
2804 p->nodes[level] = b;
2807 p->slots[level] = slot;
2808 unlock_up(p, level, lowest_unlock, 0, NULL);
2814 if (!p->leave_spinning)
2815 btrfs_set_path_blocking(p);
2817 btrfs_release_path(p);
2823 * helper to use instead of search slot if no exact match is needed but
2824 * instead the next or previous item should be returned.
2825 * When find_higher is true, the next higher item is returned, the next lower
2827 * When return_any and find_higher are both true, and no higher item is found,
2828 * return the next lower instead.
2829 * When return_any is true and find_higher is false, and no lower item is found,
2830 * return the next higher instead.
2831 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2834 int btrfs_search_slot_for_read(struct btrfs_root *root,
2835 struct btrfs_key *key, struct btrfs_path *p,
2836 int find_higher, int return_any)
2839 struct extent_buffer *leaf;
2842 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2846 * a return value of 1 means the path is at the position where the
2847 * item should be inserted. Normally this is the next bigger item,
2848 * but in case the previous item is the last in a leaf, path points
2849 * to the first free slot in the previous leaf, i.e. at an invalid
2855 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2856 ret = btrfs_next_leaf(root, p);
2862 * no higher item found, return the next
2867 btrfs_release_path(p);
2871 if (p->slots[0] == 0) {
2872 ret = btrfs_prev_leaf(root, p);
2876 p->slots[0] = btrfs_header_nritems(leaf) - 1;
2882 * no lower item found, return the next
2887 btrfs_release_path(p);
2897 * adjust the pointers going up the tree, starting at level
2898 * making sure the right key of each node is points to 'key'.
2899 * This is used after shifting pointers to the left, so it stops
2900 * fixing up pointers when a given leaf/node is not in slot 0 of the
2904 static void fixup_low_keys(struct btrfs_trans_handle *trans,
2905 struct btrfs_root *root, struct btrfs_path *path,
2906 struct btrfs_disk_key *key, int level)
2909 struct extent_buffer *t;
2911 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2912 int tslot = path->slots[i];
2913 if (!path->nodes[i])
2916 tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
2917 btrfs_set_node_key(t, key, tslot);
2918 btrfs_mark_buffer_dirty(path->nodes[i]);
2927 * This function isn't completely safe. It's the caller's responsibility
2928 * that the new key won't break the order
2930 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2931 struct btrfs_root *root, struct btrfs_path *path,
2932 struct btrfs_key *new_key)
2934 struct btrfs_disk_key disk_key;
2935 struct extent_buffer *eb;
2938 eb = path->nodes[0];
2939 slot = path->slots[0];
2941 btrfs_item_key(eb, &disk_key, slot - 1);
2942 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
2944 if (slot < btrfs_header_nritems(eb) - 1) {
2945 btrfs_item_key(eb, &disk_key, slot + 1);
2946 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
2949 btrfs_cpu_key_to_disk(&disk_key, new_key);
2950 btrfs_set_item_key(eb, &disk_key, slot);
2951 btrfs_mark_buffer_dirty(eb);
2953 fixup_low_keys(trans, root, path, &disk_key, 1);
2957 * try to push data from one node into the next node left in the
2960 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2961 * error, and > 0 if there was no room in the left hand block.
2963 static int push_node_left(struct btrfs_trans_handle *trans,
2964 struct btrfs_root *root, struct extent_buffer *dst,
2965 struct extent_buffer *src, int empty)
2972 src_nritems = btrfs_header_nritems(src);
2973 dst_nritems = btrfs_header_nritems(dst);
2974 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2975 WARN_ON(btrfs_header_generation(src) != trans->transid);
2976 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2978 if (!empty && src_nritems <= 8)
2981 if (push_items <= 0)
2985 push_items = min(src_nritems, push_items);
2986 if (push_items < src_nritems) {
2987 /* leave at least 8 pointers in the node if
2988 * we aren't going to empty it
2990 if (src_nritems - push_items < 8) {
2991 if (push_items <= 8)
2997 push_items = min(src_nritems - 8, push_items);
2999 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3001 copy_extent_buffer(dst, src,
3002 btrfs_node_key_ptr_offset(dst_nritems),
3003 btrfs_node_key_ptr_offset(0),
3004 push_items * sizeof(struct btrfs_key_ptr));
3006 if (push_items < src_nritems) {
3008 * don't call tree_mod_log_eb_move here, key removal was already
3009 * fully logged by tree_mod_log_eb_copy above.
3011 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3012 btrfs_node_key_ptr_offset(push_items),
3013 (src_nritems - push_items) *
3014 sizeof(struct btrfs_key_ptr));
3016 btrfs_set_header_nritems(src, src_nritems - push_items);
3017 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3018 btrfs_mark_buffer_dirty(src);
3019 btrfs_mark_buffer_dirty(dst);
3025 * try to push data from one node into the next node right in the
3028 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3029 * error, and > 0 if there was no room in the right hand block.
3031 * this will only push up to 1/2 the contents of the left node over
3033 static int balance_node_right(struct btrfs_trans_handle *trans,
3034 struct btrfs_root *root,
3035 struct extent_buffer *dst,
3036 struct extent_buffer *src)
3044 WARN_ON(btrfs_header_generation(src) != trans->transid);
3045 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3047 src_nritems = btrfs_header_nritems(src);
3048 dst_nritems = btrfs_header_nritems(dst);
3049 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3050 if (push_items <= 0)
3053 if (src_nritems < 4)
3056 max_push = src_nritems / 2 + 1;
3057 /* don't try to empty the node */
3058 if (max_push >= src_nritems)
3061 if (max_push < push_items)
3062 push_items = max_push;
3064 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3065 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3066 btrfs_node_key_ptr_offset(0),
3068 sizeof(struct btrfs_key_ptr));
3070 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3071 src_nritems - push_items, push_items);
3072 copy_extent_buffer(dst, src,
3073 btrfs_node_key_ptr_offset(0),
3074 btrfs_node_key_ptr_offset(src_nritems - push_items),
3075 push_items * sizeof(struct btrfs_key_ptr));
3077 btrfs_set_header_nritems(src, src_nritems - push_items);
3078 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3080 btrfs_mark_buffer_dirty(src);
3081 btrfs_mark_buffer_dirty(dst);
3087 * helper function to insert a new root level in the tree.
3088 * A new node is allocated, and a single item is inserted to
3089 * point to the existing root
3091 * returns zero on success or < 0 on failure.
3093 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3094 struct btrfs_root *root,
3095 struct btrfs_path *path, int level)
3098 struct extent_buffer *lower;
3099 struct extent_buffer *c;
3100 struct extent_buffer *old;
3101 struct btrfs_disk_key lower_key;
3103 BUG_ON(path->nodes[level]);
3104 BUG_ON(path->nodes[level-1] != root->node);
3106 lower = path->nodes[level-1];
3108 btrfs_item_key(lower, &lower_key, 0);
3110 btrfs_node_key(lower, &lower_key, 0);
3112 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3113 root->root_key.objectid, &lower_key,
3114 level, root->node->start, 0);
3118 root_add_used(root, root->nodesize);
3120 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3121 btrfs_set_header_nritems(c, 1);
3122 btrfs_set_header_level(c, level);
3123 btrfs_set_header_bytenr(c, c->start);
3124 btrfs_set_header_generation(c, trans->transid);
3125 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3126 btrfs_set_header_owner(c, root->root_key.objectid);
3128 write_extent_buffer(c, root->fs_info->fsid,
3129 (unsigned long)btrfs_header_fsid(c),
3132 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3133 (unsigned long)btrfs_header_chunk_tree_uuid(c),
3136 btrfs_set_node_key(c, &lower_key, 0);
3137 btrfs_set_node_blockptr(c, 0, lower->start);
3138 lower_gen = btrfs_header_generation(lower);
3139 WARN_ON(lower_gen != trans->transid);
3141 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3143 btrfs_mark_buffer_dirty(c);
3146 tree_mod_log_set_root_pointer(root, c);
3147 rcu_assign_pointer(root->node, c);
3149 /* the super has an extra ref to root->node */
3150 free_extent_buffer(old);
3152 add_root_to_dirty_list(root);
3153 extent_buffer_get(c);
3154 path->nodes[level] = c;
3155 path->locks[level] = BTRFS_WRITE_LOCK;
3156 path->slots[level] = 0;
3161 * worker function to insert a single pointer in a node.
3162 * the node should have enough room for the pointer already
3164 * slot and level indicate where you want the key to go, and
3165 * blocknr is the block the key points to.
3167 static void insert_ptr(struct btrfs_trans_handle *trans,
3168 struct btrfs_root *root, struct btrfs_path *path,
3169 struct btrfs_disk_key *key, u64 bytenr,
3170 int slot, int level)
3172 struct extent_buffer *lower;
3176 BUG_ON(!path->nodes[level]);
3177 btrfs_assert_tree_locked(path->nodes[level]);
3178 lower = path->nodes[level];
3179 nritems = btrfs_header_nritems(lower);
3180 BUG_ON(slot > nritems);
3181 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3182 if (slot != nritems) {
3184 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3185 slot, nritems - slot);
3186 memmove_extent_buffer(lower,
3187 btrfs_node_key_ptr_offset(slot + 1),
3188 btrfs_node_key_ptr_offset(slot),
3189 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3192 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3196 btrfs_set_node_key(lower, key, slot);
3197 btrfs_set_node_blockptr(lower, slot, bytenr);
3198 WARN_ON(trans->transid == 0);
3199 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3200 btrfs_set_header_nritems(lower, nritems + 1);
3201 btrfs_mark_buffer_dirty(lower);
3205 * split the node at the specified level in path in two.
3206 * The path is corrected to point to the appropriate node after the split
3208 * Before splitting this tries to make some room in the node by pushing
3209 * left and right, if either one works, it returns right away.
3211 * returns 0 on success and < 0 on failure
3213 static noinline int split_node(struct btrfs_trans_handle *trans,
3214 struct btrfs_root *root,
3215 struct btrfs_path *path, int level)
3217 struct extent_buffer *c;
3218 struct extent_buffer *split;
3219 struct btrfs_disk_key disk_key;
3224 c = path->nodes[level];
3225 WARN_ON(btrfs_header_generation(c) != trans->transid);
3226 if (c == root->node) {
3227 /* trying to split the root, lets make a new one */
3228 ret = insert_new_root(trans, root, path, level + 1);
3232 ret = push_nodes_for_insert(trans, root, path, level);
3233 c = path->nodes[level];
3234 if (!ret && btrfs_header_nritems(c) <
3235 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3241 c_nritems = btrfs_header_nritems(c);
3242 mid = (c_nritems + 1) / 2;
3243 btrfs_node_key(c, &disk_key, mid);
3245 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3246 root->root_key.objectid,
3247 &disk_key, level, c->start, 0);
3249 return PTR_ERR(split);
3251 root_add_used(root, root->nodesize);
3253 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3254 btrfs_set_header_level(split, btrfs_header_level(c));
3255 btrfs_set_header_bytenr(split, split->start);
3256 btrfs_set_header_generation(split, trans->transid);
3257 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3258 btrfs_set_header_owner(split, root->root_key.objectid);
3259 write_extent_buffer(split, root->fs_info->fsid,
3260 (unsigned long)btrfs_header_fsid(split),
3262 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3263 (unsigned long)btrfs_header_chunk_tree_uuid(split),
3266 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3267 copy_extent_buffer(split, c,
3268 btrfs_node_key_ptr_offset(0),
3269 btrfs_node_key_ptr_offset(mid),
3270 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3271 btrfs_set_header_nritems(split, c_nritems - mid);
3272 btrfs_set_header_nritems(c, mid);
3275 btrfs_mark_buffer_dirty(c);
3276 btrfs_mark_buffer_dirty(split);
3278 insert_ptr(trans, root, path, &disk_key, split->start,
3279 path->slots[level + 1] + 1, level + 1);
3281 if (path->slots[level] >= mid) {
3282 path->slots[level] -= mid;
3283 btrfs_tree_unlock(c);
3284 free_extent_buffer(c);
3285 path->nodes[level] = split;
3286 path->slots[level + 1] += 1;
3288 btrfs_tree_unlock(split);
3289 free_extent_buffer(split);
3295 * how many bytes are required to store the items in a leaf. start
3296 * and nr indicate which items in the leaf to check. This totals up the
3297 * space used both by the item structs and the item data
3299 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3302 int nritems = btrfs_header_nritems(l);
3303 int end = min(nritems, start + nr) - 1;
3307 data_len = btrfs_item_end_nr(l, start);
3308 data_len = data_len - btrfs_item_offset_nr(l, end);
3309 data_len += sizeof(struct btrfs_item) * nr;
3310 WARN_ON(data_len < 0);
3315 * The space between the end of the leaf items and
3316 * the start of the leaf data. IOW, how much room
3317 * the leaf has left for both items and data
3319 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3320 struct extent_buffer *leaf)
3322 int nritems = btrfs_header_nritems(leaf);
3324 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3326 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3327 "used %d nritems %d\n",
3328 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3329 leaf_space_used(leaf, 0, nritems), nritems);
3335 * min slot controls the lowest index we're willing to push to the
3336 * right. We'll push up to and including min_slot, but no lower
3338 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3339 struct btrfs_root *root,
3340 struct btrfs_path *path,
3341 int data_size, int empty,
3342 struct extent_buffer *right,
3343 int free_space, u32 left_nritems,
3346 struct extent_buffer *left = path->nodes[0];
3347 struct extent_buffer *upper = path->nodes[1];
3348 struct btrfs_map_token token;
3349 struct btrfs_disk_key disk_key;
3354 struct btrfs_item *item;
3360 btrfs_init_map_token(&token);
3365 nr = max_t(u32, 1, min_slot);
3367 if (path->slots[0] >= left_nritems)
3368 push_space += data_size;
3370 slot = path->slots[1];
3371 i = left_nritems - 1;
3373 item = btrfs_item_nr(left, i);
3375 if (!empty && push_items > 0) {
3376 if (path->slots[0] > i)
3378 if (path->slots[0] == i) {
3379 int space = btrfs_leaf_free_space(root, left);
3380 if (space + push_space * 2 > free_space)
3385 if (path->slots[0] == i)
3386 push_space += data_size;
3388 this_item_size = btrfs_item_size(left, item);
3389 if (this_item_size + sizeof(*item) + push_space > free_space)
3393 push_space += this_item_size + sizeof(*item);
3399 if (push_items == 0)
3402 WARN_ON(!empty && push_items == left_nritems);
3404 /* push left to right */
3405 right_nritems = btrfs_header_nritems(right);
3407 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3408 push_space -= leaf_data_end(root, left);
3410 /* make room in the right data area */
3411 data_end = leaf_data_end(root, right);
3412 memmove_extent_buffer(right,
3413 btrfs_leaf_data(right) + data_end - push_space,
3414 btrfs_leaf_data(right) + data_end,
3415 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3417 /* copy from the left data area */
3418 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3419 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3420 btrfs_leaf_data(left) + leaf_data_end(root, left),
3423 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3424 btrfs_item_nr_offset(0),
3425 right_nritems * sizeof(struct btrfs_item));
3427 /* copy the items from left to right */
3428 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3429 btrfs_item_nr_offset(left_nritems - push_items),
3430 push_items * sizeof(struct btrfs_item));
3432 /* update the item pointers */
3433 right_nritems += push_items;
3434 btrfs_set_header_nritems(right, right_nritems);
3435 push_space = BTRFS_LEAF_DATA_SIZE(root);
3436 for (i = 0; i < right_nritems; i++) {
3437 item = btrfs_item_nr(right, i);
3438 push_space -= btrfs_token_item_size(right, item, &token);
3439 btrfs_set_token_item_offset(right, item, push_space, &token);
3442 left_nritems -= push_items;
3443 btrfs_set_header_nritems(left, left_nritems);
3446 btrfs_mark_buffer_dirty(left);
3448 clean_tree_block(trans, root, left);
3450 btrfs_mark_buffer_dirty(right);
3452 btrfs_item_key(right, &disk_key, 0);
3453 btrfs_set_node_key(upper, &disk_key, slot + 1);
3454 btrfs_mark_buffer_dirty(upper);
3456 /* then fixup the leaf pointer in the path */
3457 if (path->slots[0] >= left_nritems) {
3458 path->slots[0] -= left_nritems;
3459 if (btrfs_header_nritems(path->nodes[0]) == 0)
3460 clean_tree_block(trans, root, path->nodes[0]);
3461 btrfs_tree_unlock(path->nodes[0]);
3462 free_extent_buffer(path->nodes[0]);
3463 path->nodes[0] = right;
3464 path->slots[1] += 1;
3466 btrfs_tree_unlock(right);
3467 free_extent_buffer(right);
3472 btrfs_tree_unlock(right);
3473 free_extent_buffer(right);
3478 * push some data in the path leaf to the right, trying to free up at
3479 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3481 * returns 1 if the push failed because the other node didn't have enough
3482 * room, 0 if everything worked out and < 0 if there were major errors.
3484 * this will push starting from min_slot to the end of the leaf. It won't
3485 * push any slot lower than min_slot
3487 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3488 *root, struct btrfs_path *path,
3489 int min_data_size, int data_size,
3490 int empty, u32 min_slot)
3492 struct extent_buffer *left = path->nodes[0];
3493 struct extent_buffer *right;
3494 struct extent_buffer *upper;
3500 if (!path->nodes[1])
3503 slot = path->slots[1];
3504 upper = path->nodes[1];
3505 if (slot >= btrfs_header_nritems(upper) - 1)
3508 btrfs_assert_tree_locked(path->nodes[1]);
3510 right = read_node_slot(root, upper, slot + 1);
3514 btrfs_tree_lock(right);
3515 btrfs_set_lock_blocking(right);
3517 free_space = btrfs_leaf_free_space(root, right);
3518 if (free_space < data_size)
3521 /* cow and double check */
3522 ret = btrfs_cow_block(trans, root, right, upper,
3527 free_space = btrfs_leaf_free_space(root, right);
3528 if (free_space < data_size)
3531 left_nritems = btrfs_header_nritems(left);
3532 if (left_nritems == 0)
3535 return __push_leaf_right(trans, root, path, min_data_size, empty,
3536 right, free_space, left_nritems, min_slot);
3538 btrfs_tree_unlock(right);
3539 free_extent_buffer(right);
3544 * push some data in the path leaf to the left, trying to free up at
3545 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3547 * max_slot can put a limit on how far into the leaf we'll push items. The
3548 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3551 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3552 struct btrfs_root *root,
3553 struct btrfs_path *path, int data_size,
3554 int empty, struct extent_buffer *left,
3555 int free_space, u32 right_nritems,
3558 struct btrfs_disk_key disk_key;
3559 struct extent_buffer *right = path->nodes[0];
3563 struct btrfs_item *item;
3564 u32 old_left_nritems;
3568 u32 old_left_item_size;
3569 struct btrfs_map_token token;
3571 btrfs_init_map_token(&token);
3574 nr = min(right_nritems, max_slot);
3576 nr = min(right_nritems - 1, max_slot);
3578 for (i = 0; i < nr; i++) {
3579 item = btrfs_item_nr(right, i);
3581 if (!empty && push_items > 0) {
3582 if (path->slots[0] < i)
3584 if (path->slots[0] == i) {
3585 int space = btrfs_leaf_free_space(root, right);
3586 if (space + push_space * 2 > free_space)
3591 if (path->slots[0] == i)
3592 push_space += data_size;
3594 this_item_size = btrfs_item_size(right, item);
3595 if (this_item_size + sizeof(*item) + push_space > free_space)
3599 push_space += this_item_size + sizeof(*item);
3602 if (push_items == 0) {
3606 if (!empty && push_items == btrfs_header_nritems(right))
3609 /* push data from right to left */
3610 copy_extent_buffer(left, right,
3611 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3612 btrfs_item_nr_offset(0),
3613 push_items * sizeof(struct btrfs_item));
3615 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3616 btrfs_item_offset_nr(right, push_items - 1);
3618 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3619 leaf_data_end(root, left) - push_space,
3620 btrfs_leaf_data(right) +
3621 btrfs_item_offset_nr(right, push_items - 1),
3623 old_left_nritems = btrfs_header_nritems(left);
3624 BUG_ON(old_left_nritems <= 0);
3626 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3627 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3630 item = btrfs_item_nr(left, i);
3632 ioff = btrfs_token_item_offset(left, item, &token);
3633 btrfs_set_token_item_offset(left, item,
3634 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3637 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3639 /* fixup right node */
3640 if (push_items > right_nritems)
3641 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3644 if (push_items < right_nritems) {
3645 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3646 leaf_data_end(root, right);
3647 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3648 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3649 btrfs_leaf_data(right) +
3650 leaf_data_end(root, right), push_space);
3652 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3653 btrfs_item_nr_offset(push_items),
3654 (btrfs_header_nritems(right) - push_items) *
3655 sizeof(struct btrfs_item));
3657 right_nritems -= push_items;
3658 btrfs_set_header_nritems(right, right_nritems);
3659 push_space = BTRFS_LEAF_DATA_SIZE(root);
3660 for (i = 0; i < right_nritems; i++) {
3661 item = btrfs_item_nr(right, i);
3663 push_space = push_space - btrfs_token_item_size(right,
3665 btrfs_set_token_item_offset(right, item, push_space, &token);
3668 btrfs_mark_buffer_dirty(left);
3670 btrfs_mark_buffer_dirty(right);
3672 clean_tree_block(trans, root, right);
3674 btrfs_item_key(right, &disk_key, 0);
3675 fixup_low_keys(trans, root, path, &disk_key, 1);
3677 /* then fixup the leaf pointer in the path */
3678 if (path->slots[0] < push_items) {
3679 path->slots[0] += old_left_nritems;
3680 btrfs_tree_unlock(path->nodes[0]);
3681 free_extent_buffer(path->nodes[0]);
3682 path->nodes[0] = left;
3683 path->slots[1] -= 1;
3685 btrfs_tree_unlock(left);
3686 free_extent_buffer(left);
3687 path->slots[0] -= push_items;
3689 BUG_ON(path->slots[0] < 0);
3692 btrfs_tree_unlock(left);
3693 free_extent_buffer(left);
3698 * push some data in the path leaf to the left, trying to free up at
3699 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3701 * max_slot can put a limit on how far into the leaf we'll push items. The
3702 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3705 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3706 *root, struct btrfs_path *path, int min_data_size,
3707 int data_size, int empty, u32 max_slot)
3709 struct extent_buffer *right = path->nodes[0];
3710 struct extent_buffer *left;
3716 slot = path->slots[1];
3719 if (!path->nodes[1])
3722 right_nritems = btrfs_header_nritems(right);
3723 if (right_nritems == 0)
3726 btrfs_assert_tree_locked(path->nodes[1]);
3728 left = read_node_slot(root, path->nodes[1], slot - 1);
3732 btrfs_tree_lock(left);
3733 btrfs_set_lock_blocking(left);
3735 free_space = btrfs_leaf_free_space(root, left);
3736 if (free_space < data_size) {
3741 /* cow and double check */
3742 ret = btrfs_cow_block(trans, root, left,
3743 path->nodes[1], slot - 1, &left);
3745 /* we hit -ENOSPC, but it isn't fatal here */
3751 free_space = btrfs_leaf_free_space(root, left);
3752 if (free_space < data_size) {
3757 return __push_leaf_left(trans, root, path, min_data_size,
3758 empty, left, free_space, right_nritems,
3761 btrfs_tree_unlock(left);
3762 free_extent_buffer(left);
3767 * split the path's leaf in two, making sure there is at least data_size
3768 * available for the resulting leaf level of the path.
3770 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3771 struct btrfs_root *root,
3772 struct btrfs_path *path,
3773 struct extent_buffer *l,
3774 struct extent_buffer *right,
3775 int slot, int mid, int nritems)
3780 struct btrfs_disk_key disk_key;
3781 struct btrfs_map_token token;
3783 btrfs_init_map_token(&token);
3785 nritems = nritems - mid;
3786 btrfs_set_header_nritems(right, nritems);
3787 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3789 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3790 btrfs_item_nr_offset(mid),
3791 nritems * sizeof(struct btrfs_item));
3793 copy_extent_buffer(right, l,
3794 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3795 data_copy_size, btrfs_leaf_data(l) +
3796 leaf_data_end(root, l), data_copy_size);
3798 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3799 btrfs_item_end_nr(l, mid);
3801 for (i = 0; i < nritems; i++) {
3802 struct btrfs_item *item = btrfs_item_nr(right, i);
3805 ioff = btrfs_token_item_offset(right, item, &token);
3806 btrfs_set_token_item_offset(right, item,
3807 ioff + rt_data_off, &token);
3810 btrfs_set_header_nritems(l, mid);
3811 btrfs_item_key(right, &disk_key, 0);
3812 insert_ptr(trans, root, path, &disk_key, right->start,
3813 path->slots[1] + 1, 1);
3815 btrfs_mark_buffer_dirty(right);
3816 btrfs_mark_buffer_dirty(l);
3817 BUG_ON(path->slots[0] != slot);
3820 btrfs_tree_unlock(path->nodes[0]);
3821 free_extent_buffer(path->nodes[0]);
3822 path->nodes[0] = right;
3823 path->slots[0] -= mid;
3824 path->slots[1] += 1;
3826 btrfs_tree_unlock(right);
3827 free_extent_buffer(right);
3830 BUG_ON(path->slots[0] < 0);
3834 * double splits happen when we need to insert a big item in the middle
3835 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3836 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3839 * We avoid this by trying to push the items on either side of our target
3840 * into the adjacent leaves. If all goes well we can avoid the double split
3843 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3844 struct btrfs_root *root,
3845 struct btrfs_path *path,
3853 slot = path->slots[0];
3856 * try to push all the items after our slot into the
3859 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3866 nritems = btrfs_header_nritems(path->nodes[0]);
3868 * our goal is to get our slot at the start or end of a leaf. If
3869 * we've done so we're done
3871 if (path->slots[0] == 0 || path->slots[0] == nritems)
3874 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3877 /* try to push all the items before our slot into the next leaf */
3878 slot = path->slots[0];
3879 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3892 * split the path's leaf in two, making sure there is at least data_size
3893 * available for the resulting leaf level of the path.
3895 * returns 0 if all went well and < 0 on failure.
3897 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3898 struct btrfs_root *root,
3899 struct btrfs_key *ins_key,
3900 struct btrfs_path *path, int data_size,
3903 struct btrfs_disk_key disk_key;
3904 struct extent_buffer *l;
3908 struct extent_buffer *right;
3912 int num_doubles = 0;
3913 int tried_avoid_double = 0;
3916 slot = path->slots[0];
3917 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3918 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3921 /* first try to make some room by pushing left and right */
3923 wret = push_leaf_right(trans, root, path, data_size,
3928 wret = push_leaf_left(trans, root, path, data_size,
3929 data_size, 0, (u32)-1);
3935 /* did the pushes work? */
3936 if (btrfs_leaf_free_space(root, l) >= data_size)
3940 if (!path->nodes[1]) {
3941 ret = insert_new_root(trans, root, path, 1);
3948 slot = path->slots[0];
3949 nritems = btrfs_header_nritems(l);
3950 mid = (nritems + 1) / 2;
3954 leaf_space_used(l, mid, nritems - mid) + data_size >
3955 BTRFS_LEAF_DATA_SIZE(root)) {
3956 if (slot >= nritems) {
3960 if (mid != nritems &&
3961 leaf_space_used(l, mid, nritems - mid) +
3962 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3963 if (data_size && !tried_avoid_double)
3964 goto push_for_double;
3970 if (leaf_space_used(l, 0, mid) + data_size >
3971 BTRFS_LEAF_DATA_SIZE(root)) {
3972 if (!extend && data_size && slot == 0) {
3974 } else if ((extend || !data_size) && slot == 0) {
3978 if (mid != nritems &&
3979 leaf_space_used(l, mid, nritems - mid) +
3980 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3981 if (data_size && !tried_avoid_double)
3982 goto push_for_double;
3990 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3992 btrfs_item_key(l, &disk_key, mid);
3994 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
3995 root->root_key.objectid,
3996 &disk_key, 0, l->start, 0);
3998 return PTR_ERR(right);
4000 root_add_used(root, root->leafsize);
4002 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4003 btrfs_set_header_bytenr(right, right->start);
4004 btrfs_set_header_generation(right, trans->transid);
4005 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4006 btrfs_set_header_owner(right, root->root_key.objectid);
4007 btrfs_set_header_level(right, 0);
4008 write_extent_buffer(right, root->fs_info->fsid,
4009 (unsigned long)btrfs_header_fsid(right),
4012 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4013 (unsigned long)btrfs_header_chunk_tree_uuid(right),
4018 btrfs_set_header_nritems(right, 0);
4019 insert_ptr(trans, root, path, &disk_key, right->start,
4020 path->slots[1] + 1, 1);
4021 btrfs_tree_unlock(path->nodes[0]);
4022 free_extent_buffer(path->nodes[0]);
4023 path->nodes[0] = right;
4025 path->slots[1] += 1;
4027 btrfs_set_header_nritems(right, 0);
4028 insert_ptr(trans, root, path, &disk_key, right->start,
4030 btrfs_tree_unlock(path->nodes[0]);
4031 free_extent_buffer(path->nodes[0]);
4032 path->nodes[0] = right;
4034 if (path->slots[1] == 0)
4035 fixup_low_keys(trans, root, path,
4038 btrfs_mark_buffer_dirty(right);
4042 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4045 BUG_ON(num_doubles != 0);
4053 push_for_double_split(trans, root, path, data_size);
4054 tried_avoid_double = 1;
4055 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4060 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4061 struct btrfs_root *root,
4062 struct btrfs_path *path, int ins_len)
4064 struct btrfs_key key;
4065 struct extent_buffer *leaf;
4066 struct btrfs_file_extent_item *fi;
4071 leaf = path->nodes[0];
4072 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4074 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4075 key.type != BTRFS_EXTENT_CSUM_KEY);
4077 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4080 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4081 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4082 fi = btrfs_item_ptr(leaf, path->slots[0],
4083 struct btrfs_file_extent_item);
4084 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4086 btrfs_release_path(path);
4088 path->keep_locks = 1;
4089 path->search_for_split = 1;
4090 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4091 path->search_for_split = 0;
4096 leaf = path->nodes[0];
4097 /* if our item isn't there or got smaller, return now */
4098 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4101 /* the leaf has changed, it now has room. return now */
4102 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4105 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4106 fi = btrfs_item_ptr(leaf, path->slots[0],
4107 struct btrfs_file_extent_item);
4108 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4112 btrfs_set_path_blocking(path);
4113 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4117 path->keep_locks = 0;
4118 btrfs_unlock_up_safe(path, 1);
4121 path->keep_locks = 0;
4125 static noinline int split_item(struct btrfs_trans_handle *trans,
4126 struct btrfs_root *root,
4127 struct btrfs_path *path,
4128 struct btrfs_key *new_key,
4129 unsigned long split_offset)
4131 struct extent_buffer *leaf;
4132 struct btrfs_item *item;
4133 struct btrfs_item *new_item;
4139 struct btrfs_disk_key disk_key;
4141 leaf = path->nodes[0];
4142 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4144 btrfs_set_path_blocking(path);
4146 item = btrfs_item_nr(leaf, path->slots[0]);
4147 orig_offset = btrfs_item_offset(leaf, item);
4148 item_size = btrfs_item_size(leaf, item);
4150 buf = kmalloc(item_size, GFP_NOFS);
4154 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4155 path->slots[0]), item_size);
4157 slot = path->slots[0] + 1;
4158 nritems = btrfs_header_nritems(leaf);
4159 if (slot != nritems) {
4160 /* shift the items */
4161 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4162 btrfs_item_nr_offset(slot),
4163 (nritems - slot) * sizeof(struct btrfs_item));
4166 btrfs_cpu_key_to_disk(&disk_key, new_key);
4167 btrfs_set_item_key(leaf, &disk_key, slot);
4169 new_item = btrfs_item_nr(leaf, slot);
4171 btrfs_set_item_offset(leaf, new_item, orig_offset);
4172 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4174 btrfs_set_item_offset(leaf, item,
4175 orig_offset + item_size - split_offset);
4176 btrfs_set_item_size(leaf, item, split_offset);
4178 btrfs_set_header_nritems(leaf, nritems + 1);
4180 /* write the data for the start of the original item */
4181 write_extent_buffer(leaf, buf,
4182 btrfs_item_ptr_offset(leaf, path->slots[0]),
4185 /* write the data for the new item */
4186 write_extent_buffer(leaf, buf + split_offset,
4187 btrfs_item_ptr_offset(leaf, slot),
4188 item_size - split_offset);
4189 btrfs_mark_buffer_dirty(leaf);
4191 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4197 * This function splits a single item into two items,
4198 * giving 'new_key' to the new item and splitting the
4199 * old one at split_offset (from the start of the item).
4201 * The path may be released by this operation. After
4202 * the split, the path is pointing to the old item. The
4203 * new item is going to be in the same node as the old one.
4205 * Note, the item being split must be smaller enough to live alone on
4206 * a tree block with room for one extra struct btrfs_item
4208 * This allows us to split the item in place, keeping a lock on the
4209 * leaf the entire time.
4211 int btrfs_split_item(struct btrfs_trans_handle *trans,
4212 struct btrfs_root *root,
4213 struct btrfs_path *path,
4214 struct btrfs_key *new_key,
4215 unsigned long split_offset)
4218 ret = setup_leaf_for_split(trans, root, path,
4219 sizeof(struct btrfs_item));
4223 ret = split_item(trans, root, path, new_key, split_offset);
4228 * This function duplicate a item, giving 'new_key' to the new item.
4229 * It guarantees both items live in the same tree leaf and the new item
4230 * is contiguous with the original item.
4232 * This allows us to split file extent in place, keeping a lock on the
4233 * leaf the entire time.
4235 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4236 struct btrfs_root *root,
4237 struct btrfs_path *path,
4238 struct btrfs_key *new_key)
4240 struct extent_buffer *leaf;
4244 leaf = path->nodes[0];
4245 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4246 ret = setup_leaf_for_split(trans, root, path,
4247 item_size + sizeof(struct btrfs_item));
4252 setup_items_for_insert(trans, root, path, new_key, &item_size,
4253 item_size, item_size +
4254 sizeof(struct btrfs_item), 1);
4255 leaf = path->nodes[0];
4256 memcpy_extent_buffer(leaf,
4257 btrfs_item_ptr_offset(leaf, path->slots[0]),
4258 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4264 * make the item pointed to by the path smaller. new_size indicates
4265 * how small to make it, and from_end tells us if we just chop bytes
4266 * off the end of the item or if we shift the item to chop bytes off
4269 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
4270 struct btrfs_root *root,
4271 struct btrfs_path *path,
4272 u32 new_size, int from_end)
4275 struct extent_buffer *leaf;
4276 struct btrfs_item *item;
4278 unsigned int data_end;
4279 unsigned int old_data_start;
4280 unsigned int old_size;
4281 unsigned int size_diff;
4283 struct btrfs_map_token token;
4285 btrfs_init_map_token(&token);
4287 leaf = path->nodes[0];
4288 slot = path->slots[0];
4290 old_size = btrfs_item_size_nr(leaf, slot);
4291 if (old_size == new_size)
4294 nritems = btrfs_header_nritems(leaf);
4295 data_end = leaf_data_end(root, leaf);
4297 old_data_start = btrfs_item_offset_nr(leaf, slot);
4299 size_diff = old_size - new_size;
4302 BUG_ON(slot >= nritems);
4305 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4307 /* first correct the data pointers */
4308 for (i = slot; i < nritems; i++) {
4310 item = btrfs_item_nr(leaf, i);
4312 ioff = btrfs_token_item_offset(leaf, item, &token);
4313 btrfs_set_token_item_offset(leaf, item,
4314 ioff + size_diff, &token);
4317 /* shift the data */
4319 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4320 data_end + size_diff, btrfs_leaf_data(leaf) +
4321 data_end, old_data_start + new_size - data_end);
4323 struct btrfs_disk_key disk_key;
4326 btrfs_item_key(leaf, &disk_key, slot);
4328 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4330 struct btrfs_file_extent_item *fi;
4332 fi = btrfs_item_ptr(leaf, slot,
4333 struct btrfs_file_extent_item);
4334 fi = (struct btrfs_file_extent_item *)(
4335 (unsigned long)fi - size_diff);
4337 if (btrfs_file_extent_type(leaf, fi) ==
4338 BTRFS_FILE_EXTENT_INLINE) {
4339 ptr = btrfs_item_ptr_offset(leaf, slot);
4340 memmove_extent_buffer(leaf, ptr,
4342 offsetof(struct btrfs_file_extent_item,
4347 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4348 data_end + size_diff, btrfs_leaf_data(leaf) +
4349 data_end, old_data_start - data_end);
4351 offset = btrfs_disk_key_offset(&disk_key);
4352 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4353 btrfs_set_item_key(leaf, &disk_key, slot);
4355 fixup_low_keys(trans, root, path, &disk_key, 1);
4358 item = btrfs_item_nr(leaf, slot);
4359 btrfs_set_item_size(leaf, item, new_size);
4360 btrfs_mark_buffer_dirty(leaf);
4362 if (btrfs_leaf_free_space(root, leaf) < 0) {
4363 btrfs_print_leaf(root, leaf);
4369 * make the item pointed to by the path bigger, data_size is the new size.
4371 void btrfs_extend_item(struct btrfs_trans_handle *trans,
4372 struct btrfs_root *root, struct btrfs_path *path,
4376 struct extent_buffer *leaf;
4377 struct btrfs_item *item;
4379 unsigned int data_end;
4380 unsigned int old_data;
4381 unsigned int old_size;
4383 struct btrfs_map_token token;
4385 btrfs_init_map_token(&token);
4387 leaf = path->nodes[0];
4389 nritems = btrfs_header_nritems(leaf);
4390 data_end = leaf_data_end(root, leaf);
4392 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4393 btrfs_print_leaf(root, leaf);
4396 slot = path->slots[0];
4397 old_data = btrfs_item_end_nr(leaf, slot);
4400 if (slot >= nritems) {
4401 btrfs_print_leaf(root, leaf);
4402 printk(KERN_CRIT "slot %d too large, nritems %d\n",
4408 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4410 /* first correct the data pointers */
4411 for (i = slot; i < nritems; i++) {
4413 item = btrfs_item_nr(leaf, i);
4415 ioff = btrfs_token_item_offset(leaf, item, &token);
4416 btrfs_set_token_item_offset(leaf, item,
4417 ioff - data_size, &token);
4420 /* shift the data */
4421 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4422 data_end - data_size, btrfs_leaf_data(leaf) +
4423 data_end, old_data - data_end);
4425 data_end = old_data;
4426 old_size = btrfs_item_size_nr(leaf, slot);
4427 item = btrfs_item_nr(leaf, slot);
4428 btrfs_set_item_size(leaf, item, old_size + data_size);
4429 btrfs_mark_buffer_dirty(leaf);
4431 if (btrfs_leaf_free_space(root, leaf) < 0) {
4432 btrfs_print_leaf(root, leaf);
4438 * this is a helper for btrfs_insert_empty_items, the main goal here is
4439 * to save stack depth by doing the bulk of the work in a function
4440 * that doesn't call btrfs_search_slot
4442 void setup_items_for_insert(struct btrfs_trans_handle *trans,
4443 struct btrfs_root *root, struct btrfs_path *path,
4444 struct btrfs_key *cpu_key, u32 *data_size,
4445 u32 total_data, u32 total_size, int nr)
4447 struct btrfs_item *item;
4450 unsigned int data_end;
4451 struct btrfs_disk_key disk_key;
4452 struct extent_buffer *leaf;
4454 struct btrfs_map_token token;
4456 btrfs_init_map_token(&token);
4458 leaf = path->nodes[0];
4459 slot = path->slots[0];
4461 nritems = btrfs_header_nritems(leaf);
4462 data_end = leaf_data_end(root, leaf);
4464 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4465 btrfs_print_leaf(root, leaf);
4466 printk(KERN_CRIT "not enough freespace need %u have %d\n",
4467 total_size, btrfs_leaf_free_space(root, leaf));
4471 if (slot != nritems) {
4472 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4474 if (old_data < data_end) {
4475 btrfs_print_leaf(root, leaf);
4476 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4477 slot, old_data, data_end);
4481 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4483 /* first correct the data pointers */
4484 for (i = slot; i < nritems; i++) {
4487 item = btrfs_item_nr(leaf, i);
4488 ioff = btrfs_token_item_offset(leaf, item, &token);
4489 btrfs_set_token_item_offset(leaf, item,
4490 ioff - total_data, &token);
4492 /* shift the items */
4493 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4494 btrfs_item_nr_offset(slot),
4495 (nritems - slot) * sizeof(struct btrfs_item));
4497 /* shift the data */
4498 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4499 data_end - total_data, btrfs_leaf_data(leaf) +
4500 data_end, old_data - data_end);
4501 data_end = old_data;
4504 /* setup the item for the new data */
4505 for (i = 0; i < nr; i++) {
4506 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4507 btrfs_set_item_key(leaf, &disk_key, slot + i);
4508 item = btrfs_item_nr(leaf, slot + i);
4509 btrfs_set_token_item_offset(leaf, item,
4510 data_end - data_size[i], &token);
4511 data_end -= data_size[i];
4512 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4515 btrfs_set_header_nritems(leaf, nritems + nr);
4518 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4519 fixup_low_keys(trans, root, path, &disk_key, 1);
4521 btrfs_unlock_up_safe(path, 1);
4522 btrfs_mark_buffer_dirty(leaf);
4524 if (btrfs_leaf_free_space(root, leaf) < 0) {
4525 btrfs_print_leaf(root, leaf);
4531 * Given a key and some data, insert items into the tree.
4532 * This does all the path init required, making room in the tree if needed.
4534 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4535 struct btrfs_root *root,
4536 struct btrfs_path *path,
4537 struct btrfs_key *cpu_key, u32 *data_size,
4546 for (i = 0; i < nr; i++)
4547 total_data += data_size[i];
4549 total_size = total_data + (nr * sizeof(struct btrfs_item));
4550 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4556 slot = path->slots[0];
4559 setup_items_for_insert(trans, root, path, cpu_key, data_size,
4560 total_data, total_size, nr);
4565 * Given a key and some data, insert an item into the tree.
4566 * This does all the path init required, making room in the tree if needed.
4568 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4569 *root, struct btrfs_key *cpu_key, void *data, u32
4573 struct btrfs_path *path;
4574 struct extent_buffer *leaf;
4577 path = btrfs_alloc_path();
4580 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4582 leaf = path->nodes[0];
4583 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4584 write_extent_buffer(leaf, data, ptr, data_size);
4585 btrfs_mark_buffer_dirty(leaf);
4587 btrfs_free_path(path);
4592 * delete the pointer from a given node.
4594 * the tree should have been previously balanced so the deletion does not
4597 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4598 struct btrfs_path *path, int level, int slot)
4600 struct extent_buffer *parent = path->nodes[level];
4605 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4606 MOD_LOG_KEY_REMOVE);
4610 nritems = btrfs_header_nritems(parent);
4611 if (slot != nritems - 1) {
4613 tree_mod_log_eb_move(root->fs_info, parent, slot,
4614 slot + 1, nritems - slot - 1);
4615 memmove_extent_buffer(parent,
4616 btrfs_node_key_ptr_offset(slot),
4617 btrfs_node_key_ptr_offset(slot + 1),
4618 sizeof(struct btrfs_key_ptr) *
4619 (nritems - slot - 1));
4623 btrfs_set_header_nritems(parent, nritems);
4624 if (nritems == 0 && parent == root->node) {
4625 BUG_ON(btrfs_header_level(root->node) != 1);
4626 /* just turn the root into a leaf and break */
4627 btrfs_set_header_level(root->node, 0);
4628 } else if (slot == 0) {
4629 struct btrfs_disk_key disk_key;
4631 btrfs_node_key(parent, &disk_key, 0);
4632 fixup_low_keys(trans, root, path, &disk_key, level + 1);
4634 btrfs_mark_buffer_dirty(parent);
4638 * a helper function to delete the leaf pointed to by path->slots[1] and
4641 * This deletes the pointer in path->nodes[1] and frees the leaf
4642 * block extent. zero is returned if it all worked out, < 0 otherwise.
4644 * The path must have already been setup for deleting the leaf, including
4645 * all the proper balancing. path->nodes[1] must be locked.
4647 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4648 struct btrfs_root *root,
4649 struct btrfs_path *path,
4650 struct extent_buffer *leaf)
4652 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4653 del_ptr(trans, root, path, 1, path->slots[1]);
4656 * btrfs_free_extent is expensive, we want to make sure we
4657 * aren't holding any locks when we call it
4659 btrfs_unlock_up_safe(path, 0);
4661 root_sub_used(root, leaf->len);
4663 extent_buffer_get(leaf);
4664 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4665 free_extent_buffer_stale(leaf);
4668 * delete the item at the leaf level in path. If that empties
4669 * the leaf, remove it from the tree
4671 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4672 struct btrfs_path *path, int slot, int nr)
4674 struct extent_buffer *leaf;
4675 struct btrfs_item *item;
4682 struct btrfs_map_token token;
4684 btrfs_init_map_token(&token);
4686 leaf = path->nodes[0];
4687 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4689 for (i = 0; i < nr; i++)
4690 dsize += btrfs_item_size_nr(leaf, slot + i);
4692 nritems = btrfs_header_nritems(leaf);
4694 if (slot + nr != nritems) {
4695 int data_end = leaf_data_end(root, leaf);
4697 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4699 btrfs_leaf_data(leaf) + data_end,
4700 last_off - data_end);
4702 for (i = slot + nr; i < nritems; i++) {
4705 item = btrfs_item_nr(leaf, i);
4706 ioff = btrfs_token_item_offset(leaf, item, &token);
4707 btrfs_set_token_item_offset(leaf, item,
4708 ioff + dsize, &token);
4711 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4712 btrfs_item_nr_offset(slot + nr),
4713 sizeof(struct btrfs_item) *
4714 (nritems - slot - nr));
4716 btrfs_set_header_nritems(leaf, nritems - nr);
4719 /* delete the leaf if we've emptied it */
4721 if (leaf == root->node) {
4722 btrfs_set_header_level(leaf, 0);
4724 btrfs_set_path_blocking(path);
4725 clean_tree_block(trans, root, leaf);
4726 btrfs_del_leaf(trans, root, path, leaf);
4729 int used = leaf_space_used(leaf, 0, nritems);
4731 struct btrfs_disk_key disk_key;
4733 btrfs_item_key(leaf, &disk_key, 0);
4734 fixup_low_keys(trans, root, path, &disk_key, 1);
4737 /* delete the leaf if it is mostly empty */
4738 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4739 /* push_leaf_left fixes the path.
4740 * make sure the path still points to our leaf
4741 * for possible call to del_ptr below
4743 slot = path->slots[1];
4744 extent_buffer_get(leaf);
4746 btrfs_set_path_blocking(path);
4747 wret = push_leaf_left(trans, root, path, 1, 1,
4749 if (wret < 0 && wret != -ENOSPC)
4752 if (path->nodes[0] == leaf &&
4753 btrfs_header_nritems(leaf)) {
4754 wret = push_leaf_right(trans, root, path, 1,
4756 if (wret < 0 && wret != -ENOSPC)
4760 if (btrfs_header_nritems(leaf) == 0) {
4761 path->slots[1] = slot;
4762 btrfs_del_leaf(trans, root, path, leaf);
4763 free_extent_buffer(leaf);
4766 /* if we're still in the path, make sure
4767 * we're dirty. Otherwise, one of the
4768 * push_leaf functions must have already
4769 * dirtied this buffer
4771 if (path->nodes[0] == leaf)
4772 btrfs_mark_buffer_dirty(leaf);
4773 free_extent_buffer(leaf);
4776 btrfs_mark_buffer_dirty(leaf);
4783 * search the tree again to find a leaf with lesser keys
4784 * returns 0 if it found something or 1 if there are no lesser leaves.
4785 * returns < 0 on io errors.
4787 * This may release the path, and so you may lose any locks held at the
4790 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4792 struct btrfs_key key;
4793 struct btrfs_disk_key found_key;
4796 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4800 else if (key.type > 0)
4802 else if (key.objectid > 0)
4807 btrfs_release_path(path);
4808 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4811 btrfs_item_key(path->nodes[0], &found_key, 0);
4812 ret = comp_keys(&found_key, &key);
4819 * A helper function to walk down the tree starting at min_key, and looking
4820 * for nodes or leaves that are either in cache or have a minimum
4821 * transaction id. This is used by the btree defrag code, and tree logging
4823 * This does not cow, but it does stuff the starting key it finds back
4824 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4825 * key and get a writable path.
4827 * This does lock as it descends, and path->keep_locks should be set
4828 * to 1 by the caller.
4830 * This honors path->lowest_level to prevent descent past a given level
4833 * min_trans indicates the oldest transaction that you are interested
4834 * in walking through. Any nodes or leaves older than min_trans are
4835 * skipped over (without reading them).
4837 * returns zero if something useful was found, < 0 on error and 1 if there
4838 * was nothing in the tree that matched the search criteria.
4840 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4841 struct btrfs_key *max_key,
4842 struct btrfs_path *path, int cache_only,
4845 struct extent_buffer *cur;
4846 struct btrfs_key found_key;
4853 WARN_ON(!path->keep_locks);
4855 cur = btrfs_read_lock_root_node(root);
4856 level = btrfs_header_level(cur);
4857 WARN_ON(path->nodes[level]);
4858 path->nodes[level] = cur;
4859 path->locks[level] = BTRFS_READ_LOCK;
4861 if (btrfs_header_generation(cur) < min_trans) {
4866 nritems = btrfs_header_nritems(cur);
4867 level = btrfs_header_level(cur);
4868 sret = bin_search(cur, min_key, level, &slot);
4870 /* at the lowest level, we're done, setup the path and exit */
4871 if (level == path->lowest_level) {
4872 if (slot >= nritems)
4875 path->slots[level] = slot;
4876 btrfs_item_key_to_cpu(cur, &found_key, slot);
4879 if (sret && slot > 0)
4882 * check this node pointer against the cache_only and
4883 * min_trans parameters. If it isn't in cache or is too
4884 * old, skip to the next one.
4886 while (slot < nritems) {
4889 struct extent_buffer *tmp;
4890 struct btrfs_disk_key disk_key;
4892 blockptr = btrfs_node_blockptr(cur, slot);
4893 gen = btrfs_node_ptr_generation(cur, slot);
4894 if (gen < min_trans) {
4902 btrfs_node_key(cur, &disk_key, slot);
4903 if (comp_keys(&disk_key, max_key) >= 0) {
4909 tmp = btrfs_find_tree_block(root, blockptr,
4910 btrfs_level_size(root, level - 1));
4912 if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
4913 free_extent_buffer(tmp);
4917 free_extent_buffer(tmp);
4922 * we didn't find a candidate key in this node, walk forward
4923 * and find another one
4925 if (slot >= nritems) {
4926 path->slots[level] = slot;
4927 btrfs_set_path_blocking(path);
4928 sret = btrfs_find_next_key(root, path, min_key, level,
4929 cache_only, min_trans);
4931 btrfs_release_path(path);
4937 /* save our key for returning back */
4938 btrfs_node_key_to_cpu(cur, &found_key, slot);
4939 path->slots[level] = slot;
4940 if (level == path->lowest_level) {
4942 unlock_up(path, level, 1, 0, NULL);
4945 btrfs_set_path_blocking(path);
4946 cur = read_node_slot(root, cur, slot);
4947 BUG_ON(!cur); /* -ENOMEM */
4949 btrfs_tree_read_lock(cur);
4951 path->locks[level - 1] = BTRFS_READ_LOCK;
4952 path->nodes[level - 1] = cur;
4953 unlock_up(path, level, 1, 0, NULL);
4954 btrfs_clear_path_blocking(path, NULL, 0);
4958 memcpy(min_key, &found_key, sizeof(found_key));
4959 btrfs_set_path_blocking(path);
4963 static void tree_move_down(struct btrfs_root *root,
4964 struct btrfs_path *path,
4965 int *level, int root_level)
4967 BUG_ON(*level == 0);
4968 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
4969 path->slots[*level]);
4970 path->slots[*level - 1] = 0;
4974 static int tree_move_next_or_upnext(struct btrfs_root *root,
4975 struct btrfs_path *path,
4976 int *level, int root_level)
4980 nritems = btrfs_header_nritems(path->nodes[*level]);
4982 path->slots[*level]++;
4984 while (path->slots[*level] >= nritems) {
4985 if (*level == root_level)
4989 path->slots[*level] = 0;
4990 free_extent_buffer(path->nodes[*level]);
4991 path->nodes[*level] = NULL;
4993 path->slots[*level]++;
4995 nritems = btrfs_header_nritems(path->nodes[*level]);
5002 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5005 static int tree_advance(struct btrfs_root *root,
5006 struct btrfs_path *path,
5007 int *level, int root_level,
5009 struct btrfs_key *key)
5013 if (*level == 0 || !allow_down) {
5014 ret = tree_move_next_or_upnext(root, path, level, root_level);
5016 tree_move_down(root, path, level, root_level);
5021 btrfs_item_key_to_cpu(path->nodes[*level], key,
5022 path->slots[*level]);
5024 btrfs_node_key_to_cpu(path->nodes[*level], key,
5025 path->slots[*level]);
5030 static int tree_compare_item(struct btrfs_root *left_root,
5031 struct btrfs_path *left_path,
5032 struct btrfs_path *right_path,
5037 unsigned long off1, off2;
5039 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5040 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5044 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5045 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5046 right_path->slots[0]);
5048 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5050 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5057 #define ADVANCE_ONLY_NEXT -1
5060 * This function compares two trees and calls the provided callback for
5061 * every changed/new/deleted item it finds.
5062 * If shared tree blocks are encountered, whole subtrees are skipped, making
5063 * the compare pretty fast on snapshotted subvolumes.
5065 * This currently works on commit roots only. As commit roots are read only,
5066 * we don't do any locking. The commit roots are protected with transactions.
5067 * Transactions are ended and rejoined when a commit is tried in between.
5069 * This function checks for modifications done to the trees while comparing.
5070 * If it detects a change, it aborts immediately.
5072 int btrfs_compare_trees(struct btrfs_root *left_root,
5073 struct btrfs_root *right_root,
5074 btrfs_changed_cb_t changed_cb, void *ctx)
5078 struct btrfs_trans_handle *trans = NULL;
5079 struct btrfs_path *left_path = NULL;
5080 struct btrfs_path *right_path = NULL;
5081 struct btrfs_key left_key;
5082 struct btrfs_key right_key;
5083 char *tmp_buf = NULL;
5084 int left_root_level;
5085 int right_root_level;
5088 int left_end_reached;
5089 int right_end_reached;
5094 u64 left_start_ctransid;
5095 u64 right_start_ctransid;
5098 left_path = btrfs_alloc_path();
5103 right_path = btrfs_alloc_path();
5109 tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
5115 left_path->search_commit_root = 1;
5116 left_path->skip_locking = 1;
5117 right_path->search_commit_root = 1;
5118 right_path->skip_locking = 1;
5120 spin_lock(&left_root->root_item_lock);
5121 left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
5122 spin_unlock(&left_root->root_item_lock);
5124 spin_lock(&right_root->root_item_lock);
5125 right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
5126 spin_unlock(&right_root->root_item_lock);
5128 trans = btrfs_join_transaction(left_root);
5129 if (IS_ERR(trans)) {
5130 ret = PTR_ERR(trans);
5136 * Strategy: Go to the first items of both trees. Then do
5138 * If both trees are at level 0
5139 * Compare keys of current items
5140 * If left < right treat left item as new, advance left tree
5142 * If left > right treat right item as deleted, advance right tree
5144 * If left == right do deep compare of items, treat as changed if
5145 * needed, advance both trees and repeat
5146 * If both trees are at the same level but not at level 0
5147 * Compare keys of current nodes/leafs
5148 * If left < right advance left tree and repeat
5149 * If left > right advance right tree and repeat
5150 * If left == right compare blockptrs of the next nodes/leafs
5151 * If they match advance both trees but stay at the same level
5153 * If they don't match advance both trees while allowing to go
5155 * If tree levels are different
5156 * Advance the tree that needs it and repeat
5158 * Advancing a tree means:
5159 * If we are at level 0, try to go to the next slot. If that's not
5160 * possible, go one level up and repeat. Stop when we found a level
5161 * where we could go to the next slot. We may at this point be on a
5164 * If we are not at level 0 and not on shared tree blocks, go one
5167 * If we are not at level 0 and on shared tree blocks, go one slot to
5168 * the right if possible or go up and right.
5171 left_level = btrfs_header_level(left_root->commit_root);
5172 left_root_level = left_level;
5173 left_path->nodes[left_level] = left_root->commit_root;
5174 extent_buffer_get(left_path->nodes[left_level]);
5176 right_level = btrfs_header_level(right_root->commit_root);
5177 right_root_level = right_level;
5178 right_path->nodes[right_level] = right_root->commit_root;
5179 extent_buffer_get(right_path->nodes[right_level]);
5181 if (left_level == 0)
5182 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5183 &left_key, left_path->slots[left_level]);
5185 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5186 &left_key, left_path->slots[left_level]);
5187 if (right_level == 0)
5188 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5189 &right_key, right_path->slots[right_level]);
5191 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5192 &right_key, right_path->slots[right_level]);
5194 left_end_reached = right_end_reached = 0;
5195 advance_left = advance_right = 0;
5199 * We need to make sure the transaction does not get committed
5200 * while we do anything on commit roots. This means, we need to
5201 * join and leave transactions for every item that we process.
5203 if (trans && btrfs_should_end_transaction(trans, left_root)) {
5204 btrfs_release_path(left_path);
5205 btrfs_release_path(right_path);
5207 ret = btrfs_end_transaction(trans, left_root);
5212 /* now rejoin the transaction */
5214 trans = btrfs_join_transaction(left_root);
5215 if (IS_ERR(trans)) {
5216 ret = PTR_ERR(trans);
5221 spin_lock(&left_root->root_item_lock);
5222 ctransid = btrfs_root_ctransid(&left_root->root_item);
5223 spin_unlock(&left_root->root_item_lock);
5224 if (ctransid != left_start_ctransid)
5225 left_start_ctransid = 0;
5227 spin_lock(&right_root->root_item_lock);
5228 ctransid = btrfs_root_ctransid(&right_root->root_item);
5229 spin_unlock(&right_root->root_item_lock);
5230 if (ctransid != right_start_ctransid)
5231 right_start_ctransid = 0;
5233 if (!left_start_ctransid || !right_start_ctransid) {
5234 WARN(1, KERN_WARNING
5235 "btrfs: btrfs_compare_tree detected "
5236 "a change in one of the trees while "
5237 "iterating. This is probably a "
5244 * the commit root may have changed, so start again
5247 left_path->lowest_level = left_level;
5248 right_path->lowest_level = right_level;
5249 ret = btrfs_search_slot(NULL, left_root,
5250 &left_key, left_path, 0, 0);
5253 ret = btrfs_search_slot(NULL, right_root,
5254 &right_key, right_path, 0, 0);
5259 if (advance_left && !left_end_reached) {
5260 ret = tree_advance(left_root, left_path, &left_level,
5262 advance_left != ADVANCE_ONLY_NEXT,
5265 left_end_reached = ADVANCE;
5268 if (advance_right && !right_end_reached) {
5269 ret = tree_advance(right_root, right_path, &right_level,
5271 advance_right != ADVANCE_ONLY_NEXT,
5274 right_end_reached = ADVANCE;
5278 if (left_end_reached && right_end_reached) {
5281 } else if (left_end_reached) {
5282 if (right_level == 0) {
5283 ret = changed_cb(left_root, right_root,
5284 left_path, right_path,
5286 BTRFS_COMPARE_TREE_DELETED,
5291 advance_right = ADVANCE;
5293 } else if (right_end_reached) {
5294 if (left_level == 0) {
5295 ret = changed_cb(left_root, right_root,
5296 left_path, right_path,
5298 BTRFS_COMPARE_TREE_NEW,
5303 advance_left = ADVANCE;
5307 if (left_level == 0 && right_level == 0) {
5308 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5310 ret = changed_cb(left_root, right_root,
5311 left_path, right_path,
5313 BTRFS_COMPARE_TREE_NEW,
5317 advance_left = ADVANCE;
5318 } else if (cmp > 0) {
5319 ret = changed_cb(left_root, right_root,
5320 left_path, right_path,
5322 BTRFS_COMPARE_TREE_DELETED,
5326 advance_right = ADVANCE;
5328 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5329 ret = tree_compare_item(left_root, left_path,
5330 right_path, tmp_buf);
5332 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5333 ret = changed_cb(left_root, right_root,
5334 left_path, right_path,
5336 BTRFS_COMPARE_TREE_CHANGED,
5341 advance_left = ADVANCE;
5342 advance_right = ADVANCE;
5344 } else if (left_level == right_level) {
5345 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5347 advance_left = ADVANCE;
5348 } else if (cmp > 0) {
5349 advance_right = ADVANCE;
5351 left_blockptr = btrfs_node_blockptr(
5352 left_path->nodes[left_level],
5353 left_path->slots[left_level]);
5354 right_blockptr = btrfs_node_blockptr(
5355 right_path->nodes[right_level],
5356 right_path->slots[right_level]);
5357 if (left_blockptr == right_blockptr) {
5359 * As we're on a shared block, don't
5360 * allow to go deeper.
5362 advance_left = ADVANCE_ONLY_NEXT;
5363 advance_right = ADVANCE_ONLY_NEXT;
5365 advance_left = ADVANCE;
5366 advance_right = ADVANCE;
5369 } else if (left_level < right_level) {
5370 advance_right = ADVANCE;
5372 advance_left = ADVANCE;
5377 btrfs_free_path(left_path);
5378 btrfs_free_path(right_path);
5383 ret = btrfs_end_transaction(trans, left_root);
5385 btrfs_end_transaction(trans, left_root);
5392 * this is similar to btrfs_next_leaf, but does not try to preserve
5393 * and fixup the path. It looks for and returns the next key in the
5394 * tree based on the current path and the cache_only and min_trans
5397 * 0 is returned if another key is found, < 0 if there are any errors
5398 * and 1 is returned if there are no higher keys in the tree
5400 * path->keep_locks should be set to 1 on the search made before
5401 * calling this function.
5403 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5404 struct btrfs_key *key, int level,
5405 int cache_only, u64 min_trans)
5408 struct extent_buffer *c;
5410 WARN_ON(!path->keep_locks);
5411 while (level < BTRFS_MAX_LEVEL) {
5412 if (!path->nodes[level])
5415 slot = path->slots[level] + 1;
5416 c = path->nodes[level];
5418 if (slot >= btrfs_header_nritems(c)) {
5421 struct btrfs_key cur_key;
5422 if (level + 1 >= BTRFS_MAX_LEVEL ||
5423 !path->nodes[level + 1])
5426 if (path->locks[level + 1]) {
5431 slot = btrfs_header_nritems(c) - 1;
5433 btrfs_item_key_to_cpu(c, &cur_key, slot);
5435 btrfs_node_key_to_cpu(c, &cur_key, slot);
5437 orig_lowest = path->lowest_level;
5438 btrfs_release_path(path);
5439 path->lowest_level = level;
5440 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5442 path->lowest_level = orig_lowest;
5446 c = path->nodes[level];
5447 slot = path->slots[level];
5454 btrfs_item_key_to_cpu(c, key, slot);
5456 u64 blockptr = btrfs_node_blockptr(c, slot);
5457 u64 gen = btrfs_node_ptr_generation(c, slot);
5460 struct extent_buffer *cur;
5461 cur = btrfs_find_tree_block(root, blockptr,
5462 btrfs_level_size(root, level - 1));
5464 btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
5467 free_extent_buffer(cur);
5470 free_extent_buffer(cur);
5472 if (gen < min_trans) {
5476 btrfs_node_key_to_cpu(c, key, slot);
5484 * search the tree again to find a leaf with greater keys
5485 * returns 0 if it found something or 1 if there are no greater leaves.
5486 * returns < 0 on io errors.
5488 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5490 return btrfs_next_old_leaf(root, path, 0);
5493 /* Release the path up to but not including the given level */
5494 static void btrfs_release_level(struct btrfs_path *path, int level)
5498 for (i = 0; i < level; i++) {
5500 if (!path->nodes[i])
5502 if (path->locks[i]) {
5503 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
5506 free_extent_buffer(path->nodes[i]);
5507 path->nodes[i] = NULL;
5512 * This function assumes 2 things
5514 * 1) You are using path->keep_locks
5515 * 2) You are not inserting items.
5517 * If either of these are not true do not use this function. If you need a next
5518 * leaf with either of these not being true then this function can be easily
5519 * adapted to do that, but at the moment these are the limitations.
5521 int btrfs_next_leaf_write(struct btrfs_trans_handle *trans,
5522 struct btrfs_root *root, struct btrfs_path *path,
5525 struct extent_buffer *b;
5526 struct btrfs_key key;
5531 int write_lock_level = BTRFS_MAX_LEVEL;
5532 int ins_len = del ? -1 : 0;
5534 WARN_ON(!(path->keep_locks || path->really_keep_locks));
5536 nritems = btrfs_header_nritems(path->nodes[0]);
5537 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5539 while (path->nodes[level]) {
5540 nritems = btrfs_header_nritems(path->nodes[level]);
5541 if (!(path->locks[level] & BTRFS_WRITE_LOCK)) {
5543 btrfs_release_path(path);
5544 ret = btrfs_search_slot(trans, root, &key, path,
5552 if (path->slots[level] >= nritems - 1) {
5557 btrfs_release_level(path, level);
5561 if (!path->nodes[level]) {
5566 path->slots[level]++;
5567 b = path->nodes[level];
5570 level = btrfs_header_level(b);
5572 if (!should_cow_block(trans, root, b))
5575 btrfs_set_path_blocking(path);
5576 ret = btrfs_cow_block(trans, root, b,
5577 path->nodes[level + 1],
5578 path->slots[level + 1], &b);
5582 path->nodes[level] = b;
5583 btrfs_clear_path_blocking(path, NULL, 0);
5585 ret = setup_nodes_for_search(trans, root, path, b,
5593 b = path->nodes[level];
5594 slot = path->slots[level];
5596 ret = read_block_for_search(trans, root, path,
5597 &b, level, slot, &key, 0);
5602 level = btrfs_header_level(b);
5603 if (!btrfs_try_tree_write_lock(b)) {
5604 btrfs_set_path_blocking(path);
5606 btrfs_clear_path_blocking(path, b,
5609 path->locks[level] = BTRFS_WRITE_LOCK;
5610 path->nodes[level] = b;
5611 path->slots[level] = 0;
5613 path->slots[level] = 0;
5621 btrfs_release_path(path);
5626 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5631 struct extent_buffer *c;
5632 struct extent_buffer *next;
5633 struct btrfs_key key;
5636 int old_spinning = path->leave_spinning;
5637 int next_rw_lock = 0;
5639 nritems = btrfs_header_nritems(path->nodes[0]);
5643 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5648 btrfs_release_path(path);
5650 path->keep_locks = 1;
5651 path->leave_spinning = 1;
5654 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5656 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5657 path->keep_locks = 0;
5662 nritems = btrfs_header_nritems(path->nodes[0]);
5664 * by releasing the path above we dropped all our locks. A balance
5665 * could have added more items next to the key that used to be
5666 * at the very end of the block. So, check again here and
5667 * advance the path if there are now more items available.
5669 if (nritems > 0 && path->slots[0] < nritems - 1) {
5676 while (level < BTRFS_MAX_LEVEL) {
5677 if (!path->nodes[level]) {
5682 slot = path->slots[level] + 1;
5683 c = path->nodes[level];
5684 if (slot >= btrfs_header_nritems(c)) {
5686 if (level == BTRFS_MAX_LEVEL) {
5694 btrfs_tree_unlock_rw(next, next_rw_lock);
5695 free_extent_buffer(next);
5699 next_rw_lock = path->locks[level];
5700 ret = read_block_for_search(NULL, root, path, &next, level,
5706 btrfs_release_path(path);
5710 if (!path->skip_locking) {
5711 ret = btrfs_try_tree_read_lock(next);
5712 if (!ret && time_seq) {
5714 * If we don't get the lock, we may be racing
5715 * with push_leaf_left, holding that lock while
5716 * itself waiting for the leaf we've currently
5717 * locked. To solve this situation, we give up
5718 * on our lock and cycle.
5720 free_extent_buffer(next);
5721 btrfs_release_path(path);
5726 btrfs_set_path_blocking(path);
5727 btrfs_tree_read_lock(next);
5728 btrfs_clear_path_blocking(path, next,
5731 next_rw_lock = BTRFS_READ_LOCK;
5735 path->slots[level] = slot;
5738 c = path->nodes[level];
5739 if (path->locks[level])
5740 btrfs_tree_unlock_rw(c, path->locks[level]);
5742 free_extent_buffer(c);
5743 path->nodes[level] = next;
5744 path->slots[level] = 0;
5745 if (!path->skip_locking)
5746 path->locks[level] = next_rw_lock;
5750 ret = read_block_for_search(NULL, root, path, &next, level,
5756 btrfs_release_path(path);
5760 if (!path->skip_locking) {
5761 ret = btrfs_try_tree_read_lock(next);
5763 btrfs_set_path_blocking(path);
5764 btrfs_tree_read_lock(next);
5765 btrfs_clear_path_blocking(path, next,
5768 next_rw_lock = BTRFS_READ_LOCK;
5773 unlock_up(path, 0, 1, 0, NULL);
5774 path->leave_spinning = old_spinning;
5776 btrfs_set_path_blocking(path);
5782 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5783 * searching until it gets past min_objectid or finds an item of 'type'
5785 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5787 int btrfs_previous_item(struct btrfs_root *root,
5788 struct btrfs_path *path, u64 min_objectid,
5791 struct btrfs_key found_key;
5792 struct extent_buffer *leaf;
5797 if (path->slots[0] == 0) {
5798 btrfs_set_path_blocking(path);
5799 ret = btrfs_prev_leaf(root, path);
5805 leaf = path->nodes[0];
5806 nritems = btrfs_header_nritems(leaf);
5809 if (path->slots[0] == nritems)
5812 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5813 if (found_key.objectid < min_objectid)
5815 if (found_key.type == type)
5817 if (found_key.objectid == min_objectid &&
5818 found_key.type < type)