2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
41 struct btrfs_path *path, int level, int slot);
42 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
44 struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
45 u32 blocksize, u64 parent_transid,
47 struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
48 u64 bytenr, u32 blocksize,
51 struct btrfs_path *btrfs_alloc_path(void)
53 struct btrfs_path *path;
54 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
59 * set all locked nodes in the path to blocking locks. This should
60 * be done before scheduling
62 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
65 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
66 if (!p->nodes[i] || !p->locks[i])
68 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
69 if (p->locks[i] == BTRFS_READ_LOCK)
70 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
71 else if (p->locks[i] == BTRFS_WRITE_LOCK)
72 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
77 * reset all the locked nodes in the patch to spinning locks.
79 * held is used to keep lockdep happy, when lockdep is enabled
80 * we set held to a blocking lock before we go around and
81 * retake all the spinlocks in the path. You can safely use NULL
84 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
85 struct extent_buffer *held, int held_rw)
89 #ifdef CONFIG_DEBUG_LOCK_ALLOC
90 /* lockdep really cares that we take all of these spinlocks
91 * in the right order. If any of the locks in the path are not
92 * currently blocking, it is going to complain. So, make really
93 * really sure by forcing the path to blocking before we clear
97 btrfs_set_lock_blocking_rw(held, held_rw);
98 if (held_rw == BTRFS_WRITE_LOCK)
99 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
100 else if (held_rw == BTRFS_READ_LOCK)
101 held_rw = BTRFS_READ_LOCK_BLOCKING;
103 btrfs_set_path_blocking(p);
106 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
107 if (p->nodes[i] && p->locks[i]) {
108 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
109 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
110 p->locks[i] = BTRFS_WRITE_LOCK;
111 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
112 p->locks[i] = BTRFS_READ_LOCK;
116 #ifdef CONFIG_DEBUG_LOCK_ALLOC
118 btrfs_clear_lock_blocking_rw(held, held_rw);
122 /* this also releases the path */
123 void btrfs_free_path(struct btrfs_path *p)
127 btrfs_release_path(p);
128 kmem_cache_free(btrfs_path_cachep, p);
132 * path release drops references on the extent buffers in the path
133 * and it drops any locks held by this path
135 * It is safe to call this on paths that no locks or extent buffers held.
137 noinline void btrfs_release_path(struct btrfs_path *p)
141 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
146 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
149 free_extent_buffer(p->nodes[i]);
155 * safely gets a reference on the root node of a tree. A lock
156 * is not taken, so a concurrent writer may put a different node
157 * at the root of the tree. See btrfs_lock_root_node for the
160 * The extent buffer returned by this has a reference taken, so
161 * it won't disappear. It may stop being the root of the tree
162 * at any time because there are no locks held.
164 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
166 struct extent_buffer *eb;
170 eb = rcu_dereference(root->node);
173 * RCU really hurts here, we could free up the root node because
174 * it was cow'ed but we may not get the new root node yet so do
175 * the inc_not_zero dance and if it doesn't work then
176 * synchronize_rcu and try again.
178 if (atomic_inc_not_zero(&eb->refs)) {
188 /* loop around taking references on and locking the root node of the
189 * tree until you end up with a lock on the root. A locked buffer
190 * is returned, with a reference held.
192 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
194 struct extent_buffer *eb;
197 eb = btrfs_root_node(root);
199 if (eb == root->node)
201 btrfs_tree_unlock(eb);
202 free_extent_buffer(eb);
207 /* loop around taking references on and locking the root node of the
208 * tree until you end up with a lock on the root. A locked buffer
209 * is returned, with a reference held.
211 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
213 struct extent_buffer *eb;
216 eb = btrfs_root_node(root);
217 btrfs_tree_read_lock(eb);
218 if (eb == root->node)
220 btrfs_tree_read_unlock(eb);
221 free_extent_buffer(eb);
226 /* cowonly root (everything not a reference counted cow subvolume), just get
227 * put onto a simple dirty list. transaction.c walks this to make sure they
228 * get properly updated on disk.
230 static void add_root_to_dirty_list(struct btrfs_root *root)
232 spin_lock(&root->fs_info->trans_lock);
233 if (root->track_dirty && list_empty(&root->dirty_list)) {
234 list_add(&root->dirty_list,
235 &root->fs_info->dirty_cowonly_roots);
237 spin_unlock(&root->fs_info->trans_lock);
241 * used by snapshot creation to make a copy of a root for a tree with
242 * a given objectid. The buffer with the new root node is returned in
243 * cow_ret, and this func returns zero on success or a negative error code.
245 int btrfs_copy_root(struct btrfs_trans_handle *trans,
246 struct btrfs_root *root,
247 struct extent_buffer *buf,
248 struct extent_buffer **cow_ret, u64 new_root_objectid)
250 struct extent_buffer *cow;
253 struct btrfs_disk_key disk_key;
255 WARN_ON(root->ref_cows && trans->transid !=
256 root->fs_info->running_transaction->transid);
257 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
259 level = btrfs_header_level(buf);
261 btrfs_item_key(buf, &disk_key, 0);
263 btrfs_node_key(buf, &disk_key, 0);
265 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
266 new_root_objectid, &disk_key, level,
271 copy_extent_buffer(cow, buf, 0, 0, cow->len);
272 btrfs_set_header_bytenr(cow, cow->start);
273 btrfs_set_header_generation(cow, trans->transid);
274 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
275 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
276 BTRFS_HEADER_FLAG_RELOC);
277 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
278 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
280 btrfs_set_header_owner(cow, new_root_objectid);
282 write_extent_buffer(cow, root->fs_info->fsid,
283 (unsigned long)btrfs_header_fsid(cow),
286 WARN_ON(btrfs_header_generation(buf) > trans->transid);
287 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
288 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
290 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
295 btrfs_mark_buffer_dirty(cow);
304 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
305 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
307 MOD_LOG_ROOT_REPLACE,
310 struct tree_mod_move {
315 struct tree_mod_root {
320 struct tree_mod_elem {
322 u64 index; /* shifted logical */
326 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
329 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
332 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
333 struct btrfs_disk_key key;
336 /* this is used for op == MOD_LOG_MOVE_KEYS */
337 struct tree_mod_move move;
339 /* this is used for op == MOD_LOG_ROOT_REPLACE */
340 struct tree_mod_root old_root;
343 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
345 read_lock(&fs_info->tree_mod_log_lock);
348 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
350 read_unlock(&fs_info->tree_mod_log_lock);
353 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
355 write_lock(&fs_info->tree_mod_log_lock);
358 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
360 write_unlock(&fs_info->tree_mod_log_lock);
364 * This adds a new blocker to the tree mod log's blocker list if the @elem
365 * passed does not already have a sequence number set. So when a caller expects
366 * to record tree modifications, it should ensure to set elem->seq to zero
367 * before calling btrfs_get_tree_mod_seq.
368 * Returns a fresh, unused tree log modification sequence number, even if no new
371 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
372 struct seq_list *elem)
376 tree_mod_log_write_lock(fs_info);
377 spin_lock(&fs_info->tree_mod_seq_lock);
379 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
380 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
382 seq = btrfs_inc_tree_mod_seq(fs_info);
383 spin_unlock(&fs_info->tree_mod_seq_lock);
384 tree_mod_log_write_unlock(fs_info);
389 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
390 struct seq_list *elem)
392 struct rb_root *tm_root;
393 struct rb_node *node;
394 struct rb_node *next;
395 struct seq_list *cur_elem;
396 struct tree_mod_elem *tm;
397 u64 min_seq = (u64)-1;
398 u64 seq_putting = elem->seq;
403 spin_lock(&fs_info->tree_mod_seq_lock);
404 list_del(&elem->list);
407 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
408 if (cur_elem->seq < min_seq) {
409 if (seq_putting > cur_elem->seq) {
411 * blocker with lower sequence number exists, we
412 * cannot remove anything from the log
414 spin_unlock(&fs_info->tree_mod_seq_lock);
417 min_seq = cur_elem->seq;
420 spin_unlock(&fs_info->tree_mod_seq_lock);
423 * anything that's lower than the lowest existing (read: blocked)
424 * sequence number can be removed from the tree.
426 tree_mod_log_write_lock(fs_info);
427 tm_root = &fs_info->tree_mod_log;
428 for (node = rb_first(tm_root); node; node = next) {
429 next = rb_next(node);
430 tm = container_of(node, struct tree_mod_elem, node);
431 if (tm->seq > min_seq)
433 rb_erase(node, tm_root);
436 tree_mod_log_write_unlock(fs_info);
440 * key order of the log:
443 * the index is the shifted logical of the *new* root node for root replace
444 * operations, or the shifted logical of the affected block for all other
448 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
450 struct rb_root *tm_root;
451 struct rb_node **new;
452 struct rb_node *parent = NULL;
453 struct tree_mod_elem *cur;
455 BUG_ON(!tm || !tm->seq);
457 tm_root = &fs_info->tree_mod_log;
458 new = &tm_root->rb_node;
460 cur = container_of(*new, struct tree_mod_elem, node);
462 if (cur->index < tm->index)
463 new = &((*new)->rb_left);
464 else if (cur->index > tm->index)
465 new = &((*new)->rb_right);
466 else if (cur->seq < tm->seq)
467 new = &((*new)->rb_left);
468 else if (cur->seq > tm->seq)
469 new = &((*new)->rb_right);
476 rb_link_node(&tm->node, parent, new);
477 rb_insert_color(&tm->node, tm_root);
482 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
483 * returns zero with the tree_mod_log_lock acquired. The caller must hold
484 * this until all tree mod log insertions are recorded in the rb tree and then
485 * call tree_mod_log_write_unlock() to release.
487 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
488 struct extent_buffer *eb) {
490 if (list_empty(&(fs_info)->tree_mod_seq_list))
492 if (eb && btrfs_header_level(eb) == 0)
495 tree_mod_log_write_lock(fs_info);
496 if (list_empty(&fs_info->tree_mod_seq_list)) {
498 * someone emptied the list while we were waiting for the lock.
499 * we must not add to the list when no blocker exists.
501 tree_mod_log_write_unlock(fs_info);
509 * This allocates memory and gets a tree modification sequence number.
511 * Returns <0 on error.
512 * Returns >0 (the added sequence number) on success.
514 static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
515 struct tree_mod_elem **tm_ret)
517 struct tree_mod_elem *tm;
520 * once we switch from spin locks to something different, we should
521 * honor the flags parameter here.
523 tm = *tm_ret = kzalloc(sizeof(*tm), GFP_ATOMIC);
527 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
532 __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
533 struct extent_buffer *eb, int slot,
534 enum mod_log_op op, gfp_t flags)
537 struct tree_mod_elem *tm;
539 ret = tree_mod_alloc(fs_info, flags, &tm);
543 tm->index = eb->start >> PAGE_CACHE_SHIFT;
544 if (op != MOD_LOG_KEY_ADD) {
545 btrfs_node_key(eb, &tm->key, slot);
546 tm->blockptr = btrfs_node_blockptr(eb, slot);
550 tm->generation = btrfs_node_ptr_generation(eb, slot);
552 return __tree_mod_log_insert(fs_info, tm);
556 tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
557 struct extent_buffer *eb, int slot,
558 enum mod_log_op op, gfp_t flags)
562 if (tree_mod_dont_log(fs_info, eb))
565 ret = __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
567 tree_mod_log_write_unlock(fs_info);
572 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
573 int slot, enum mod_log_op op)
575 return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
579 tree_mod_log_insert_key_locked(struct btrfs_fs_info *fs_info,
580 struct extent_buffer *eb, int slot,
583 return __tree_mod_log_insert_key(fs_info, eb, slot, op, GFP_NOFS);
587 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
588 struct extent_buffer *eb, int dst_slot, int src_slot,
589 int nr_items, gfp_t flags)
591 struct tree_mod_elem *tm;
595 if (tree_mod_dont_log(fs_info, eb))
599 * When we override something during the move, we log these removals.
600 * This can only happen when we move towards the beginning of the
601 * buffer, i.e. dst_slot < src_slot.
603 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
604 ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
605 MOD_LOG_KEY_REMOVE_WHILE_MOVING);
609 ret = tree_mod_alloc(fs_info, flags, &tm);
613 tm->index = eb->start >> PAGE_CACHE_SHIFT;
615 tm->move.dst_slot = dst_slot;
616 tm->move.nr_items = nr_items;
617 tm->op = MOD_LOG_MOVE_KEYS;
619 ret = __tree_mod_log_insert(fs_info, tm);
621 tree_mod_log_write_unlock(fs_info);
626 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
632 if (btrfs_header_level(eb) == 0)
635 nritems = btrfs_header_nritems(eb);
636 for (i = nritems - 1; i >= 0; i--) {
637 ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
638 MOD_LOG_KEY_REMOVE_WHILE_FREEING);
644 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
645 struct extent_buffer *old_root,
646 struct extent_buffer *new_root, gfp_t flags,
649 struct tree_mod_elem *tm;
652 if (tree_mod_dont_log(fs_info, NULL))
656 __tree_mod_log_free_eb(fs_info, old_root);
658 ret = tree_mod_alloc(fs_info, flags, &tm);
662 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
663 tm->old_root.logical = old_root->start;
664 tm->old_root.level = btrfs_header_level(old_root);
665 tm->generation = btrfs_header_generation(old_root);
666 tm->op = MOD_LOG_ROOT_REPLACE;
668 ret = __tree_mod_log_insert(fs_info, tm);
670 tree_mod_log_write_unlock(fs_info);
674 static struct tree_mod_elem *
675 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
678 struct rb_root *tm_root;
679 struct rb_node *node;
680 struct tree_mod_elem *cur = NULL;
681 struct tree_mod_elem *found = NULL;
682 u64 index = start >> PAGE_CACHE_SHIFT;
684 tree_mod_log_read_lock(fs_info);
685 tm_root = &fs_info->tree_mod_log;
686 node = tm_root->rb_node;
688 cur = container_of(node, struct tree_mod_elem, node);
689 if (cur->index < index) {
690 node = node->rb_left;
691 } else if (cur->index > index) {
692 node = node->rb_right;
693 } else if (cur->seq < min_seq) {
694 node = node->rb_left;
695 } else if (!smallest) {
696 /* we want the node with the highest seq */
698 BUG_ON(found->seq > cur->seq);
700 node = node->rb_left;
701 } else if (cur->seq > min_seq) {
702 /* we want the node with the smallest seq */
704 BUG_ON(found->seq < cur->seq);
706 node = node->rb_right;
712 tree_mod_log_read_unlock(fs_info);
718 * this returns the element from the log with the smallest time sequence
719 * value that's in the log (the oldest log item). any element with a time
720 * sequence lower than min_seq will be ignored.
722 static struct tree_mod_elem *
723 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
726 return __tree_mod_log_search(fs_info, start, min_seq, 1);
730 * this returns the element from the log with the largest time sequence
731 * value that's in the log (the most recent log item). any element with
732 * a time sequence lower than min_seq will be ignored.
734 static struct tree_mod_elem *
735 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
737 return __tree_mod_log_search(fs_info, start, min_seq, 0);
741 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
742 struct extent_buffer *src, unsigned long dst_offset,
743 unsigned long src_offset, int nr_items)
748 if (tree_mod_dont_log(fs_info, NULL))
751 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) {
752 tree_mod_log_write_unlock(fs_info);
756 for (i = 0; i < nr_items; i++) {
757 ret = tree_mod_log_insert_key_locked(fs_info, src,
761 ret = tree_mod_log_insert_key_locked(fs_info, dst,
767 tree_mod_log_write_unlock(fs_info);
771 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
772 int dst_offset, int src_offset, int nr_items)
775 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
781 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
782 struct extent_buffer *eb, int slot, int atomic)
786 ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
788 atomic ? GFP_ATOMIC : GFP_NOFS);
793 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
795 if (tree_mod_dont_log(fs_info, eb))
798 __tree_mod_log_free_eb(fs_info, eb);
800 tree_mod_log_write_unlock(fs_info);
804 tree_mod_log_set_root_pointer(struct btrfs_root *root,
805 struct extent_buffer *new_root_node,
809 ret = tree_mod_log_insert_root(root->fs_info, root->node,
810 new_root_node, GFP_NOFS, log_removal);
815 * check if the tree block can be shared by multiple trees
817 int btrfs_block_can_be_shared(struct btrfs_root *root,
818 struct extent_buffer *buf)
821 * Tree blocks not in refernece counted trees and tree roots
822 * are never shared. If a block was allocated after the last
823 * snapshot and the block was not allocated by tree relocation,
824 * we know the block is not shared.
826 if (root->ref_cows &&
827 buf != root->node && buf != root->commit_root &&
828 (btrfs_header_generation(buf) <=
829 btrfs_root_last_snapshot(&root->root_item) ||
830 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
832 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
833 if (root->ref_cows &&
834 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
840 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
841 struct btrfs_root *root,
842 struct extent_buffer *buf,
843 struct extent_buffer *cow,
853 * Backrefs update rules:
855 * Always use full backrefs for extent pointers in tree block
856 * allocated by tree relocation.
858 * If a shared tree block is no longer referenced by its owner
859 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
860 * use full backrefs for extent pointers in tree block.
862 * If a tree block is been relocating
863 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
864 * use full backrefs for extent pointers in tree block.
865 * The reason for this is some operations (such as drop tree)
866 * are only allowed for blocks use full backrefs.
869 if (btrfs_block_can_be_shared(root, buf)) {
870 ret = btrfs_lookup_extent_info(trans, root, buf->start,
871 btrfs_header_level(buf), 1,
877 btrfs_std_error(root->fs_info, ret);
882 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
883 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
884 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
889 owner = btrfs_header_owner(buf);
890 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
891 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
894 if ((owner == root->root_key.objectid ||
895 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
896 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
897 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
898 BUG_ON(ret); /* -ENOMEM */
900 if (root->root_key.objectid ==
901 BTRFS_TREE_RELOC_OBJECTID) {
902 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
903 BUG_ON(ret); /* -ENOMEM */
904 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
905 BUG_ON(ret); /* -ENOMEM */
907 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
910 if (root->root_key.objectid ==
911 BTRFS_TREE_RELOC_OBJECTID)
912 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
914 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
915 BUG_ON(ret); /* -ENOMEM */
917 if (new_flags != 0) {
918 ret = btrfs_set_disk_extent_flags(trans, root,
926 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
927 if (root->root_key.objectid ==
928 BTRFS_TREE_RELOC_OBJECTID)
929 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
931 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
932 BUG_ON(ret); /* -ENOMEM */
933 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
934 BUG_ON(ret); /* -ENOMEM */
936 clean_tree_block(trans, root, buf);
943 * does the dirty work in cow of a single block. The parent block (if
944 * supplied) is updated to point to the new cow copy. The new buffer is marked
945 * dirty and returned locked. If you modify the block it needs to be marked
948 * search_start -- an allocation hint for the new block
950 * empty_size -- a hint that you plan on doing more cow. This is the size in
951 * bytes the allocator should try to find free next to the block it returns.
952 * This is just a hint and may be ignored by the allocator.
954 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
955 struct btrfs_root *root,
956 struct extent_buffer *buf,
957 struct extent_buffer *parent, int parent_slot,
958 struct extent_buffer **cow_ret,
959 u64 search_start, u64 empty_size)
961 struct btrfs_disk_key disk_key;
962 struct extent_buffer *cow;
971 btrfs_assert_tree_locked(buf);
973 WARN_ON(root->ref_cows && trans->transid !=
974 root->fs_info->running_transaction->transid);
975 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
977 level = btrfs_header_level(buf);
980 btrfs_item_key(buf, &disk_key, 0);
982 btrfs_node_key(buf, &disk_key, 0);
984 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
986 parent_start = parent->start;
992 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
993 root->root_key.objectid, &disk_key,
994 level, search_start, empty_size);
998 /* cow is set to blocking by btrfs_init_new_buffer */
1000 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1001 btrfs_set_header_bytenr(cow, cow->start);
1002 btrfs_set_header_generation(cow, trans->transid);
1003 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1004 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1005 BTRFS_HEADER_FLAG_RELOC);
1006 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1007 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1009 btrfs_set_header_owner(cow, root->root_key.objectid);
1011 write_extent_buffer(cow, root->fs_info->fsid,
1012 (unsigned long)btrfs_header_fsid(cow),
1015 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1017 btrfs_abort_transaction(trans, root, ret);
1022 btrfs_reloc_cow_block(trans, root, buf, cow);
1024 if (buf == root->node) {
1025 WARN_ON(parent && parent != buf);
1026 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1027 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1028 parent_start = buf->start;
1032 extent_buffer_get(cow);
1033 tree_mod_log_set_root_pointer(root, cow, 1);
1034 rcu_assign_pointer(root->node, cow);
1036 btrfs_free_tree_block(trans, root, buf, parent_start,
1038 free_extent_buffer(buf);
1039 add_root_to_dirty_list(root);
1041 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1042 parent_start = parent->start;
1046 WARN_ON(trans->transid != btrfs_header_generation(parent));
1047 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1048 MOD_LOG_KEY_REPLACE);
1049 btrfs_set_node_blockptr(parent, parent_slot,
1051 btrfs_set_node_ptr_generation(parent, parent_slot,
1053 btrfs_mark_buffer_dirty(parent);
1054 tree_mod_log_free_eb(root->fs_info, buf);
1055 btrfs_free_tree_block(trans, root, buf, parent_start,
1059 btrfs_tree_unlock(buf);
1060 free_extent_buffer_stale(buf);
1061 btrfs_mark_buffer_dirty(cow);
1067 * returns the logical address of the oldest predecessor of the given root.
1068 * entries older than time_seq are ignored.
1070 static struct tree_mod_elem *
1071 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1072 struct extent_buffer *eb_root, u64 time_seq)
1074 struct tree_mod_elem *tm;
1075 struct tree_mod_elem *found = NULL;
1076 u64 root_logical = eb_root->start;
1083 * the very last operation that's logged for a root is the replacement
1084 * operation (if it is replaced at all). this has the index of the *new*
1085 * root, making it the very first operation that's logged for this root.
1088 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1093 * if there are no tree operation for the oldest root, we simply
1094 * return it. this should only happen if that (old) root is at
1101 * if there's an operation that's not a root replacement, we
1102 * found the oldest version of our root. normally, we'll find a
1103 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1105 if (tm->op != MOD_LOG_ROOT_REPLACE)
1109 root_logical = tm->old_root.logical;
1113 /* if there's no old root to return, return what we found instead */
1121 * tm is a pointer to the first operation to rewind within eb. then, all
1122 * previous operations will be rewinded (until we reach something older than
1126 __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
1127 struct tree_mod_elem *first_tm)
1130 struct rb_node *next;
1131 struct tree_mod_elem *tm = first_tm;
1132 unsigned long o_dst;
1133 unsigned long o_src;
1134 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1136 n = btrfs_header_nritems(eb);
1137 while (tm && tm->seq >= time_seq) {
1139 * all the operations are recorded with the operator used for
1140 * the modification. as we're going backwards, we do the
1141 * opposite of each operation here.
1144 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1145 BUG_ON(tm->slot < n);
1147 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1148 case MOD_LOG_KEY_REMOVE:
1149 btrfs_set_node_key(eb, &tm->key, tm->slot);
1150 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1151 btrfs_set_node_ptr_generation(eb, tm->slot,
1155 case MOD_LOG_KEY_REPLACE:
1156 BUG_ON(tm->slot >= n);
1157 btrfs_set_node_key(eb, &tm->key, tm->slot);
1158 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1159 btrfs_set_node_ptr_generation(eb, tm->slot,
1162 case MOD_LOG_KEY_ADD:
1163 /* if a move operation is needed it's in the log */
1166 case MOD_LOG_MOVE_KEYS:
1167 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1168 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1169 memmove_extent_buffer(eb, o_dst, o_src,
1170 tm->move.nr_items * p_size);
1172 case MOD_LOG_ROOT_REPLACE:
1174 * this operation is special. for roots, this must be
1175 * handled explicitly before rewinding.
1176 * for non-roots, this operation may exist if the node
1177 * was a root: root A -> child B; then A gets empty and
1178 * B is promoted to the new root. in the mod log, we'll
1179 * have a root-replace operation for B, a tree block
1180 * that is no root. we simply ignore that operation.
1184 next = rb_next(&tm->node);
1187 tm = container_of(next, struct tree_mod_elem, node);
1188 if (tm->index != first_tm->index)
1191 btrfs_set_header_nritems(eb, n);
1195 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1196 * is returned. If rewind operations happen, a fresh buffer is returned. The
1197 * returned buffer is always read-locked. If the returned buffer is not the
1198 * input buffer, the lock on the input buffer is released and the input buffer
1199 * is freed (its refcount is decremented).
1201 static struct extent_buffer *
1202 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1205 struct extent_buffer *eb_rewin;
1206 struct tree_mod_elem *tm;
1211 if (btrfs_header_level(eb) == 0)
1214 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1218 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1219 BUG_ON(tm->slot != 0);
1220 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1221 fs_info->tree_root->nodesize);
1223 btrfs_set_header_bytenr(eb_rewin, eb->start);
1224 btrfs_set_header_backref_rev(eb_rewin,
1225 btrfs_header_backref_rev(eb));
1226 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1227 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1229 eb_rewin = btrfs_clone_extent_buffer(eb);
1233 extent_buffer_get(eb_rewin);
1234 btrfs_tree_read_unlock(eb);
1235 free_extent_buffer(eb);
1237 extent_buffer_get(eb_rewin);
1238 btrfs_tree_read_lock(eb_rewin);
1239 __tree_mod_log_rewind(eb_rewin, time_seq, tm);
1240 WARN_ON(btrfs_header_nritems(eb_rewin) >
1241 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1247 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1248 * value. If there are no changes, the current root->root_node is returned. If
1249 * anything changed in between, there's a fresh buffer allocated on which the
1250 * rewind operations are done. In any case, the returned buffer is read locked.
1251 * Returns NULL on error (with no locks held).
1253 static inline struct extent_buffer *
1254 get_old_root(struct btrfs_root *root, u64 time_seq)
1256 struct tree_mod_elem *tm;
1257 struct extent_buffer *eb = NULL;
1258 struct extent_buffer *eb_root;
1259 struct extent_buffer *old;
1260 struct tree_mod_root *old_root = NULL;
1261 u64 old_generation = 0;
1265 eb_root = btrfs_read_lock_root_node(root);
1266 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1270 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1271 old_root = &tm->old_root;
1272 old_generation = tm->generation;
1273 logical = old_root->logical;
1275 logical = eb_root->start;
1278 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1279 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1280 btrfs_tree_read_unlock(eb_root);
1281 free_extent_buffer(eb_root);
1282 blocksize = btrfs_level_size(root, old_root->level);
1283 old = read_tree_block(root, logical, blocksize, 0);
1285 pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1289 eb = btrfs_clone_extent_buffer(old);
1290 free_extent_buffer(old);
1292 } else if (old_root) {
1293 btrfs_tree_read_unlock(eb_root);
1294 free_extent_buffer(eb_root);
1295 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1297 eb = btrfs_clone_extent_buffer(eb_root);
1298 btrfs_tree_read_unlock(eb_root);
1299 free_extent_buffer(eb_root);
1304 extent_buffer_get(eb);
1305 btrfs_tree_read_lock(eb);
1307 btrfs_set_header_bytenr(eb, eb->start);
1308 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1309 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1310 btrfs_set_header_level(eb, old_root->level);
1311 btrfs_set_header_generation(eb, old_generation);
1314 __tree_mod_log_rewind(eb, time_seq, tm);
1316 WARN_ON(btrfs_header_level(eb) != 0);
1317 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1322 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1324 struct tree_mod_elem *tm;
1326 struct extent_buffer *eb_root = btrfs_root_node(root);
1328 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1329 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1330 level = tm->old_root.level;
1332 level = btrfs_header_level(eb_root);
1334 free_extent_buffer(eb_root);
1339 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1340 struct btrfs_root *root,
1341 struct extent_buffer *buf)
1343 /* ensure we can see the force_cow */
1347 * We do not need to cow a block if
1348 * 1) this block is not created or changed in this transaction;
1349 * 2) this block does not belong to TREE_RELOC tree;
1350 * 3) the root is not forced COW.
1352 * What is forced COW:
1353 * when we create snapshot during commiting the transaction,
1354 * after we've finished coping src root, we must COW the shared
1355 * block to ensure the metadata consistency.
1357 if (btrfs_header_generation(buf) == trans->transid &&
1358 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1359 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1360 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1367 * cows a single block, see __btrfs_cow_block for the real work.
1368 * This version of it has extra checks so that a block isn't cow'd more than
1369 * once per transaction, as long as it hasn't been written yet
1371 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1372 struct btrfs_root *root, struct extent_buffer *buf,
1373 struct extent_buffer *parent, int parent_slot,
1374 struct extent_buffer **cow_ret)
1379 if (trans->transaction != root->fs_info->running_transaction)
1380 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1381 (unsigned long long)trans->transid,
1382 (unsigned long long)
1383 root->fs_info->running_transaction->transid);
1385 if (trans->transid != root->fs_info->generation)
1386 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1387 (unsigned long long)trans->transid,
1388 (unsigned long long)root->fs_info->generation);
1390 if (!should_cow_block(trans, root, buf)) {
1395 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1398 btrfs_set_lock_blocking(parent);
1399 btrfs_set_lock_blocking(buf);
1401 ret = __btrfs_cow_block(trans, root, buf, parent,
1402 parent_slot, cow_ret, search_start, 0);
1404 trace_btrfs_cow_block(root, buf, *cow_ret);
1410 * helper function for defrag to decide if two blocks pointed to by a
1411 * node are actually close by
1413 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1415 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1417 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1423 * compare two keys in a memcmp fashion
1425 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1427 struct btrfs_key k1;
1429 btrfs_disk_key_to_cpu(&k1, disk);
1431 return btrfs_comp_cpu_keys(&k1, k2);
1435 * same as comp_keys only with two btrfs_key's
1437 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1439 if (k1->objectid > k2->objectid)
1441 if (k1->objectid < k2->objectid)
1443 if (k1->type > k2->type)
1445 if (k1->type < k2->type)
1447 if (k1->offset > k2->offset)
1449 if (k1->offset < k2->offset)
1455 * this is used by the defrag code to go through all the
1456 * leaves pointed to by a node and reallocate them so that
1457 * disk order is close to key order
1459 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1460 struct btrfs_root *root, struct extent_buffer *parent,
1461 int start_slot, u64 *last_ret,
1462 struct btrfs_key *progress)
1464 struct extent_buffer *cur;
1467 u64 search_start = *last_ret;
1477 int progress_passed = 0;
1478 struct btrfs_disk_key disk_key;
1480 parent_level = btrfs_header_level(parent);
1482 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1483 WARN_ON(trans->transid != root->fs_info->generation);
1485 parent_nritems = btrfs_header_nritems(parent);
1486 blocksize = btrfs_level_size(root, parent_level - 1);
1487 end_slot = parent_nritems;
1489 if (parent_nritems == 1)
1492 btrfs_set_lock_blocking(parent);
1494 for (i = start_slot; i < end_slot; i++) {
1497 btrfs_node_key(parent, &disk_key, i);
1498 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1501 progress_passed = 1;
1502 blocknr = btrfs_node_blockptr(parent, i);
1503 gen = btrfs_node_ptr_generation(parent, i);
1504 if (last_block == 0)
1505 last_block = blocknr;
1508 other = btrfs_node_blockptr(parent, i - 1);
1509 close = close_blocks(blocknr, other, blocksize);
1511 if (!close && i < end_slot - 2) {
1512 other = btrfs_node_blockptr(parent, i + 1);
1513 close = close_blocks(blocknr, other, blocksize);
1516 last_block = blocknr;
1520 cur = btrfs_find_tree_block(root, blocknr, blocksize);
1522 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1525 if (!cur || !uptodate) {
1527 cur = read_tree_block(root, blocknr,
1531 } else if (!uptodate) {
1532 err = btrfs_read_buffer(cur, gen);
1534 free_extent_buffer(cur);
1539 if (search_start == 0)
1540 search_start = last_block;
1542 btrfs_tree_lock(cur);
1543 btrfs_set_lock_blocking(cur);
1544 err = __btrfs_cow_block(trans, root, cur, parent, i,
1547 (end_slot - i) * blocksize));
1549 btrfs_tree_unlock(cur);
1550 free_extent_buffer(cur);
1553 search_start = cur->start;
1554 last_block = cur->start;
1555 *last_ret = search_start;
1556 btrfs_tree_unlock(cur);
1557 free_extent_buffer(cur);
1563 * The leaf data grows from end-to-front in the node.
1564 * this returns the address of the start of the last item,
1565 * which is the stop of the leaf data stack
1567 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1568 struct extent_buffer *leaf)
1570 u32 nr = btrfs_header_nritems(leaf);
1572 return BTRFS_LEAF_DATA_SIZE(root);
1573 return btrfs_item_offset_nr(leaf, nr - 1);
1578 * search for key in the extent_buffer. The items start at offset p,
1579 * and they are item_size apart. There are 'max' items in p.
1581 * the slot in the array is returned via slot, and it points to
1582 * the place where you would insert key if it is not found in
1585 * slot may point to max if the key is bigger than all of the keys
1587 static noinline int generic_bin_search(struct extent_buffer *eb,
1589 int item_size, struct btrfs_key *key,
1596 struct btrfs_disk_key *tmp = NULL;
1597 struct btrfs_disk_key unaligned;
1598 unsigned long offset;
1600 unsigned long map_start = 0;
1601 unsigned long map_len = 0;
1604 while (low < high) {
1605 mid = (low + high) / 2;
1606 offset = p + mid * item_size;
1608 if (!kaddr || offset < map_start ||
1609 (offset + sizeof(struct btrfs_disk_key)) >
1610 map_start + map_len) {
1612 err = map_private_extent_buffer(eb, offset,
1613 sizeof(struct btrfs_disk_key),
1614 &kaddr, &map_start, &map_len);
1617 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1620 read_extent_buffer(eb, &unaligned,
1621 offset, sizeof(unaligned));
1626 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1629 ret = comp_keys(tmp, key);
1645 * simple bin_search frontend that does the right thing for
1648 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1649 int level, int *slot)
1652 return generic_bin_search(eb,
1653 offsetof(struct btrfs_leaf, items),
1654 sizeof(struct btrfs_item),
1655 key, btrfs_header_nritems(eb),
1658 return generic_bin_search(eb,
1659 offsetof(struct btrfs_node, ptrs),
1660 sizeof(struct btrfs_key_ptr),
1661 key, btrfs_header_nritems(eb),
1665 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1666 int level, int *slot)
1668 return bin_search(eb, key, level, slot);
1671 static void root_add_used(struct btrfs_root *root, u32 size)
1673 spin_lock(&root->accounting_lock);
1674 btrfs_set_root_used(&root->root_item,
1675 btrfs_root_used(&root->root_item) + size);
1676 spin_unlock(&root->accounting_lock);
1679 static void root_sub_used(struct btrfs_root *root, u32 size)
1681 spin_lock(&root->accounting_lock);
1682 btrfs_set_root_used(&root->root_item,
1683 btrfs_root_used(&root->root_item) - size);
1684 spin_unlock(&root->accounting_lock);
1687 /* given a node and slot number, this reads the blocks it points to. The
1688 * extent buffer is returned with a reference taken (but unlocked).
1689 * NULL is returned on error.
1691 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1692 struct extent_buffer *parent, int slot)
1694 int level = btrfs_header_level(parent);
1697 if (slot >= btrfs_header_nritems(parent))
1702 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
1703 btrfs_level_size(root, level - 1),
1704 btrfs_node_ptr_generation(parent, slot));
1708 * node level balancing, used to make sure nodes are in proper order for
1709 * item deletion. We balance from the top down, so we have to make sure
1710 * that a deletion won't leave an node completely empty later on.
1712 static noinline int balance_level(struct btrfs_trans_handle *trans,
1713 struct btrfs_root *root,
1714 struct btrfs_path *path, int level)
1716 struct extent_buffer *right = NULL;
1717 struct extent_buffer *mid;
1718 struct extent_buffer *left = NULL;
1719 struct extent_buffer *parent = NULL;
1723 int orig_slot = path->slots[level];
1729 mid = path->nodes[level];
1731 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1732 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1733 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1735 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1737 if (level < BTRFS_MAX_LEVEL - 1) {
1738 parent = path->nodes[level + 1];
1739 pslot = path->slots[level + 1];
1743 * deal with the case where there is only one pointer in the root
1744 * by promoting the node below to a root
1747 struct extent_buffer *child;
1749 if (btrfs_header_nritems(mid) != 1)
1752 /* promote the child to a root */
1753 child = read_node_slot(root, mid, 0);
1756 btrfs_std_error(root->fs_info, ret);
1760 btrfs_tree_lock(child);
1761 btrfs_set_lock_blocking(child);
1762 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1764 btrfs_tree_unlock(child);
1765 free_extent_buffer(child);
1769 tree_mod_log_set_root_pointer(root, child, 1);
1770 rcu_assign_pointer(root->node, child);
1772 add_root_to_dirty_list(root);
1773 btrfs_tree_unlock(child);
1775 path->locks[level] = 0;
1776 path->nodes[level] = NULL;
1777 clean_tree_block(trans, root, mid);
1778 btrfs_tree_unlock(mid);
1779 /* once for the path */
1780 free_extent_buffer(mid);
1782 root_sub_used(root, mid->len);
1783 btrfs_free_tree_block(trans, root, mid, 0, 1);
1784 /* once for the root ptr */
1785 free_extent_buffer_stale(mid);
1788 if (btrfs_header_nritems(mid) >
1789 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1792 left = read_node_slot(root, parent, pslot - 1);
1794 btrfs_tree_lock(left);
1795 btrfs_set_lock_blocking(left);
1796 wret = btrfs_cow_block(trans, root, left,
1797 parent, pslot - 1, &left);
1803 right = read_node_slot(root, parent, pslot + 1);
1805 btrfs_tree_lock(right);
1806 btrfs_set_lock_blocking(right);
1807 wret = btrfs_cow_block(trans, root, right,
1808 parent, pslot + 1, &right);
1815 /* first, try to make some room in the middle buffer */
1817 orig_slot += btrfs_header_nritems(left);
1818 wret = push_node_left(trans, root, left, mid, 1);
1824 * then try to empty the right most buffer into the middle
1827 wret = push_node_left(trans, root, mid, right, 1);
1828 if (wret < 0 && wret != -ENOSPC)
1830 if (btrfs_header_nritems(right) == 0) {
1831 clean_tree_block(trans, root, right);
1832 btrfs_tree_unlock(right);
1833 del_ptr(trans, root, path, level + 1, pslot + 1);
1834 root_sub_used(root, right->len);
1835 btrfs_free_tree_block(trans, root, right, 0, 1);
1836 free_extent_buffer_stale(right);
1839 struct btrfs_disk_key right_key;
1840 btrfs_node_key(right, &right_key, 0);
1841 tree_mod_log_set_node_key(root->fs_info, parent,
1843 btrfs_set_node_key(parent, &right_key, pslot + 1);
1844 btrfs_mark_buffer_dirty(parent);
1847 if (btrfs_header_nritems(mid) == 1) {
1849 * we're not allowed to leave a node with one item in the
1850 * tree during a delete. A deletion from lower in the tree
1851 * could try to delete the only pointer in this node.
1852 * So, pull some keys from the left.
1853 * There has to be a left pointer at this point because
1854 * otherwise we would have pulled some pointers from the
1859 btrfs_std_error(root->fs_info, ret);
1862 wret = balance_node_right(trans, root, mid, left);
1868 wret = push_node_left(trans, root, left, mid, 1);
1874 if (btrfs_header_nritems(mid) == 0) {
1875 clean_tree_block(trans, root, mid);
1876 btrfs_tree_unlock(mid);
1877 del_ptr(trans, root, path, level + 1, pslot);
1878 root_sub_used(root, mid->len);
1879 btrfs_free_tree_block(trans, root, mid, 0, 1);
1880 free_extent_buffer_stale(mid);
1883 /* update the parent key to reflect our changes */
1884 struct btrfs_disk_key mid_key;
1885 btrfs_node_key(mid, &mid_key, 0);
1886 tree_mod_log_set_node_key(root->fs_info, parent,
1888 btrfs_set_node_key(parent, &mid_key, pslot);
1889 btrfs_mark_buffer_dirty(parent);
1892 /* update the path */
1894 if (btrfs_header_nritems(left) > orig_slot) {
1895 extent_buffer_get(left);
1896 /* left was locked after cow */
1897 path->nodes[level] = left;
1898 path->slots[level + 1] -= 1;
1899 path->slots[level] = orig_slot;
1901 btrfs_tree_unlock(mid);
1902 free_extent_buffer(mid);
1905 orig_slot -= btrfs_header_nritems(left);
1906 path->slots[level] = orig_slot;
1909 /* double check we haven't messed things up */
1911 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1915 btrfs_tree_unlock(right);
1916 free_extent_buffer(right);
1919 if (path->nodes[level] != left)
1920 btrfs_tree_unlock(left);
1921 free_extent_buffer(left);
1926 /* Node balancing for insertion. Here we only split or push nodes around
1927 * when they are completely full. This is also done top down, so we
1928 * have to be pessimistic.
1930 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1931 struct btrfs_root *root,
1932 struct btrfs_path *path, int level)
1934 struct extent_buffer *right = NULL;
1935 struct extent_buffer *mid;
1936 struct extent_buffer *left = NULL;
1937 struct extent_buffer *parent = NULL;
1941 int orig_slot = path->slots[level];
1946 mid = path->nodes[level];
1947 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1949 if (level < BTRFS_MAX_LEVEL - 1) {
1950 parent = path->nodes[level + 1];
1951 pslot = path->slots[level + 1];
1957 left = read_node_slot(root, parent, pslot - 1);
1959 /* first, try to make some room in the middle buffer */
1963 btrfs_tree_lock(left);
1964 btrfs_set_lock_blocking(left);
1966 left_nr = btrfs_header_nritems(left);
1967 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1970 ret = btrfs_cow_block(trans, root, left, parent,
1975 wret = push_node_left(trans, root,
1982 struct btrfs_disk_key disk_key;
1983 orig_slot += left_nr;
1984 btrfs_node_key(mid, &disk_key, 0);
1985 tree_mod_log_set_node_key(root->fs_info, parent,
1987 btrfs_set_node_key(parent, &disk_key, pslot);
1988 btrfs_mark_buffer_dirty(parent);
1989 if (btrfs_header_nritems(left) > orig_slot) {
1990 path->nodes[level] = left;
1991 path->slots[level + 1] -= 1;
1992 path->slots[level] = orig_slot;
1993 btrfs_tree_unlock(mid);
1994 free_extent_buffer(mid);
1997 btrfs_header_nritems(left);
1998 path->slots[level] = orig_slot;
1999 btrfs_tree_unlock(left);
2000 free_extent_buffer(left);
2004 btrfs_tree_unlock(left);
2005 free_extent_buffer(left);
2007 right = read_node_slot(root, parent, pslot + 1);
2010 * then try to empty the right most buffer into the middle
2015 btrfs_tree_lock(right);
2016 btrfs_set_lock_blocking(right);
2018 right_nr = btrfs_header_nritems(right);
2019 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2022 ret = btrfs_cow_block(trans, root, right,
2028 wret = balance_node_right(trans, root,
2035 struct btrfs_disk_key disk_key;
2037 btrfs_node_key(right, &disk_key, 0);
2038 tree_mod_log_set_node_key(root->fs_info, parent,
2040 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2041 btrfs_mark_buffer_dirty(parent);
2043 if (btrfs_header_nritems(mid) <= orig_slot) {
2044 path->nodes[level] = right;
2045 path->slots[level + 1] += 1;
2046 path->slots[level] = orig_slot -
2047 btrfs_header_nritems(mid);
2048 btrfs_tree_unlock(mid);
2049 free_extent_buffer(mid);
2051 btrfs_tree_unlock(right);
2052 free_extent_buffer(right);
2056 btrfs_tree_unlock(right);
2057 free_extent_buffer(right);
2063 * readahead one full node of leaves, finding things that are close
2064 * to the block in 'slot', and triggering ra on them.
2066 static void reada_for_search(struct btrfs_root *root,
2067 struct btrfs_path *path,
2068 int level, int slot, u64 objectid)
2070 struct extent_buffer *node;
2071 struct btrfs_disk_key disk_key;
2077 int direction = path->reada;
2078 struct extent_buffer *eb;
2086 if (!path->nodes[level])
2089 node = path->nodes[level];
2091 search = btrfs_node_blockptr(node, slot);
2092 blocksize = btrfs_level_size(root, level - 1);
2093 eb = btrfs_find_tree_block(root, search, blocksize);
2095 free_extent_buffer(eb);
2101 nritems = btrfs_header_nritems(node);
2105 if (direction < 0) {
2109 } else if (direction > 0) {
2114 if (path->reada < 0 && objectid) {
2115 btrfs_node_key(node, &disk_key, nr);
2116 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2119 search = btrfs_node_blockptr(node, nr);
2120 if ((search <= target && target - search <= 65536) ||
2121 (search > target && search - target <= 65536)) {
2122 gen = btrfs_node_ptr_generation(node, nr);
2123 readahead_tree_block(root, search, blocksize, gen);
2127 if ((nread > 65536 || nscan > 32))
2133 * returns -EAGAIN if it had to drop the path, or zero if everything was in
2136 static noinline int reada_for_balance(struct btrfs_root *root,
2137 struct btrfs_path *path, int level)
2141 struct extent_buffer *parent;
2142 struct extent_buffer *eb;
2149 parent = path->nodes[level + 1];
2153 nritems = btrfs_header_nritems(parent);
2154 slot = path->slots[level + 1];
2155 blocksize = btrfs_level_size(root, level);
2158 block1 = btrfs_node_blockptr(parent, slot - 1);
2159 gen = btrfs_node_ptr_generation(parent, slot - 1);
2160 eb = btrfs_find_tree_block(root, block1, blocksize);
2162 * if we get -eagain from btrfs_buffer_uptodate, we
2163 * don't want to return eagain here. That will loop
2166 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2168 free_extent_buffer(eb);
2170 if (slot + 1 < nritems) {
2171 block2 = btrfs_node_blockptr(parent, slot + 1);
2172 gen = btrfs_node_ptr_generation(parent, slot + 1);
2173 eb = btrfs_find_tree_block(root, block2, blocksize);
2174 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2176 free_extent_buffer(eb);
2178 if (block1 || block2) {
2181 /* release the whole path */
2182 btrfs_release_path(path);
2184 /* read the blocks */
2186 readahead_tree_block(root, block1, blocksize, 0);
2188 readahead_tree_block(root, block2, blocksize, 0);
2191 eb = read_tree_block(root, block1, blocksize, 0);
2192 free_extent_buffer(eb);
2195 eb = read_tree_block(root, block2, blocksize, 0);
2196 free_extent_buffer(eb);
2204 * when we walk down the tree, it is usually safe to unlock the higher layers
2205 * in the tree. The exceptions are when our path goes through slot 0, because
2206 * operations on the tree might require changing key pointers higher up in the
2209 * callers might also have set path->keep_locks, which tells this code to keep
2210 * the lock if the path points to the last slot in the block. This is part of
2211 * walking through the tree, and selecting the next slot in the higher block.
2213 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2214 * if lowest_unlock is 1, level 0 won't be unlocked
2216 static noinline void unlock_up(struct btrfs_path *path, int level,
2217 int lowest_unlock, int min_write_lock_level,
2218 int *write_lock_level)
2221 int skip_level = level;
2223 struct extent_buffer *t;
2225 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2226 if (!path->nodes[i])
2228 if (!path->locks[i])
2230 if (!no_skips && path->slots[i] == 0) {
2234 if (!no_skips && path->keep_locks) {
2237 nritems = btrfs_header_nritems(t);
2238 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2243 if (skip_level < i && i >= lowest_unlock)
2247 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2248 btrfs_tree_unlock_rw(t, path->locks[i]);
2250 if (write_lock_level &&
2251 i > min_write_lock_level &&
2252 i <= *write_lock_level) {
2253 *write_lock_level = i - 1;
2260 * This releases any locks held in the path starting at level and
2261 * going all the way up to the root.
2263 * btrfs_search_slot will keep the lock held on higher nodes in a few
2264 * corner cases, such as COW of the block at slot zero in the node. This
2265 * ignores those rules, and it should only be called when there are no
2266 * more updates to be done higher up in the tree.
2268 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2272 if (path->keep_locks)
2275 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2276 if (!path->nodes[i])
2278 if (!path->locks[i])
2280 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2286 * helper function for btrfs_search_slot. The goal is to find a block
2287 * in cache without setting the path to blocking. If we find the block
2288 * we return zero and the path is unchanged.
2290 * If we can't find the block, we set the path blocking and do some
2291 * reada. -EAGAIN is returned and the search must be repeated.
2294 read_block_for_search(struct btrfs_trans_handle *trans,
2295 struct btrfs_root *root, struct btrfs_path *p,
2296 struct extent_buffer **eb_ret, int level, int slot,
2297 struct btrfs_key *key, u64 time_seq)
2302 struct extent_buffer *b = *eb_ret;
2303 struct extent_buffer *tmp;
2306 blocknr = btrfs_node_blockptr(b, slot);
2307 gen = btrfs_node_ptr_generation(b, slot);
2308 blocksize = btrfs_level_size(root, level - 1);
2310 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2312 /* first we do an atomic uptodate check */
2313 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
2314 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2316 * we found an up to date block without
2323 /* the pages were up to date, but we failed
2324 * the generation number check. Do a full
2325 * read for the generation number that is correct.
2326 * We must do this without dropping locks so
2327 * we can trust our generation number
2329 free_extent_buffer(tmp);
2330 btrfs_set_path_blocking(p);
2332 /* now we're allowed to do a blocking uptodate check */
2333 tmp = read_tree_block(root, blocknr, blocksize, gen);
2334 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
2338 free_extent_buffer(tmp);
2339 btrfs_release_path(p);
2345 * reduce lock contention at high levels
2346 * of the btree by dropping locks before
2347 * we read. Don't release the lock on the current
2348 * level because we need to walk this node to figure
2349 * out which blocks to read.
2351 btrfs_unlock_up_safe(p, level + 1);
2352 btrfs_set_path_blocking(p);
2354 free_extent_buffer(tmp);
2356 reada_for_search(root, p, level, slot, key->objectid);
2358 btrfs_release_path(p);
2361 tmp = read_tree_block(root, blocknr, blocksize, 0);
2364 * If the read above didn't mark this buffer up to date,
2365 * it will never end up being up to date. Set ret to EIO now
2366 * and give up so that our caller doesn't loop forever
2369 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2371 free_extent_buffer(tmp);
2377 * helper function for btrfs_search_slot. This does all of the checks
2378 * for node-level blocks and does any balancing required based on
2381 * If no extra work was required, zero is returned. If we had to
2382 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2386 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2387 struct btrfs_root *root, struct btrfs_path *p,
2388 struct extent_buffer *b, int level, int ins_len,
2389 int *write_lock_level)
2392 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2393 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2396 if (*write_lock_level < level + 1) {
2397 *write_lock_level = level + 1;
2398 btrfs_release_path(p);
2402 sret = reada_for_balance(root, p, level);
2406 btrfs_set_path_blocking(p);
2407 sret = split_node(trans, root, p, level);
2408 btrfs_clear_path_blocking(p, NULL, 0);
2415 b = p->nodes[level];
2416 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2417 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2420 if (*write_lock_level < level + 1) {
2421 *write_lock_level = level + 1;
2422 btrfs_release_path(p);
2426 sret = reada_for_balance(root, p, level);
2430 btrfs_set_path_blocking(p);
2431 sret = balance_level(trans, root, p, level);
2432 btrfs_clear_path_blocking(p, NULL, 0);
2438 b = p->nodes[level];
2440 btrfs_release_path(p);
2443 BUG_ON(btrfs_header_nritems(b) == 1);
2454 * look for key in the tree. path is filled in with nodes along the way
2455 * if key is found, we return zero and you can find the item in the leaf
2456 * level of the path (level 0)
2458 * If the key isn't found, the path points to the slot where it should
2459 * be inserted, and 1 is returned. If there are other errors during the
2460 * search a negative error number is returned.
2462 * if ins_len > 0, nodes and leaves will be split as we walk down the
2463 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2466 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2467 *root, struct btrfs_key *key, struct btrfs_path *p, int
2470 struct extent_buffer *b;
2475 int lowest_unlock = 1;
2477 /* everything at write_lock_level or lower must be write locked */
2478 int write_lock_level = 0;
2479 u8 lowest_level = 0;
2480 int min_write_lock_level;
2482 lowest_level = p->lowest_level;
2483 WARN_ON(lowest_level && ins_len > 0);
2484 WARN_ON(p->nodes[0] != NULL);
2489 /* when we are removing items, we might have to go up to level
2490 * two as we update tree pointers Make sure we keep write
2491 * for those levels as well
2493 write_lock_level = 2;
2494 } else if (ins_len > 0) {
2496 * for inserting items, make sure we have a write lock on
2497 * level 1 so we can update keys
2499 write_lock_level = 1;
2503 write_lock_level = -1;
2505 if (cow && (p->keep_locks || p->lowest_level))
2506 write_lock_level = BTRFS_MAX_LEVEL;
2508 min_write_lock_level = write_lock_level;
2512 * we try very hard to do read locks on the root
2514 root_lock = BTRFS_READ_LOCK;
2516 if (p->search_commit_root) {
2518 * the commit roots are read only
2519 * so we always do read locks
2521 b = root->commit_root;
2522 extent_buffer_get(b);
2523 level = btrfs_header_level(b);
2524 if (!p->skip_locking)
2525 btrfs_tree_read_lock(b);
2527 if (p->skip_locking) {
2528 b = btrfs_root_node(root);
2529 level = btrfs_header_level(b);
2531 /* we don't know the level of the root node
2532 * until we actually have it read locked
2534 b = btrfs_read_lock_root_node(root);
2535 level = btrfs_header_level(b);
2536 if (level <= write_lock_level) {
2537 /* whoops, must trade for write lock */
2538 btrfs_tree_read_unlock(b);
2539 free_extent_buffer(b);
2540 b = btrfs_lock_root_node(root);
2541 root_lock = BTRFS_WRITE_LOCK;
2543 /* the level might have changed, check again */
2544 level = btrfs_header_level(b);
2548 p->nodes[level] = b;
2549 if (!p->skip_locking)
2550 p->locks[level] = root_lock;
2553 level = btrfs_header_level(b);
2556 * setup the path here so we can release it under lock
2557 * contention with the cow code
2561 * if we don't really need to cow this block
2562 * then we don't want to set the path blocking,
2563 * so we test it here
2565 if (!should_cow_block(trans, root, b))
2568 btrfs_set_path_blocking(p);
2571 * must have write locks on this node and the
2574 if (level > write_lock_level ||
2575 (level + 1 > write_lock_level &&
2576 level + 1 < BTRFS_MAX_LEVEL &&
2577 p->nodes[level + 1])) {
2578 write_lock_level = level + 1;
2579 btrfs_release_path(p);
2583 err = btrfs_cow_block(trans, root, b,
2584 p->nodes[level + 1],
2585 p->slots[level + 1], &b);
2592 BUG_ON(!cow && ins_len);
2594 p->nodes[level] = b;
2595 btrfs_clear_path_blocking(p, NULL, 0);
2598 * we have a lock on b and as long as we aren't changing
2599 * the tree, there is no way to for the items in b to change.
2600 * It is safe to drop the lock on our parent before we
2601 * go through the expensive btree search on b.
2603 * If cow is true, then we might be changing slot zero,
2604 * which may require changing the parent. So, we can't
2605 * drop the lock until after we know which slot we're
2609 btrfs_unlock_up_safe(p, level + 1);
2611 ret = bin_search(b, key, level, &slot);
2615 if (ret && slot > 0) {
2619 p->slots[level] = slot;
2620 err = setup_nodes_for_search(trans, root, p, b, level,
2621 ins_len, &write_lock_level);
2628 b = p->nodes[level];
2629 slot = p->slots[level];
2632 * slot 0 is special, if we change the key
2633 * we have to update the parent pointer
2634 * which means we must have a write lock
2637 if (slot == 0 && cow &&
2638 write_lock_level < level + 1) {
2639 write_lock_level = level + 1;
2640 btrfs_release_path(p);
2644 unlock_up(p, level, lowest_unlock,
2645 min_write_lock_level, &write_lock_level);
2647 if (level == lowest_level) {
2653 err = read_block_for_search(trans, root, p,
2654 &b, level, slot, key, 0);
2662 if (!p->skip_locking) {
2663 level = btrfs_header_level(b);
2664 if (level <= write_lock_level) {
2665 err = btrfs_try_tree_write_lock(b);
2667 btrfs_set_path_blocking(p);
2669 btrfs_clear_path_blocking(p, b,
2672 p->locks[level] = BTRFS_WRITE_LOCK;
2674 err = btrfs_try_tree_read_lock(b);
2676 btrfs_set_path_blocking(p);
2677 btrfs_tree_read_lock(b);
2678 btrfs_clear_path_blocking(p, b,
2681 p->locks[level] = BTRFS_READ_LOCK;
2683 p->nodes[level] = b;
2686 p->slots[level] = slot;
2688 btrfs_leaf_free_space(root, b) < ins_len) {
2689 if (write_lock_level < 1) {
2690 write_lock_level = 1;
2691 btrfs_release_path(p);
2695 btrfs_set_path_blocking(p);
2696 err = split_leaf(trans, root, key,
2697 p, ins_len, ret == 0);
2698 btrfs_clear_path_blocking(p, NULL, 0);
2706 if (!p->search_for_split)
2707 unlock_up(p, level, lowest_unlock,
2708 min_write_lock_level, &write_lock_level);
2715 * we don't really know what they plan on doing with the path
2716 * from here on, so for now just mark it as blocking
2718 if (!p->leave_spinning)
2719 btrfs_set_path_blocking(p);
2721 btrfs_release_path(p);
2726 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2727 * current state of the tree together with the operations recorded in the tree
2728 * modification log to search for the key in a previous version of this tree, as
2729 * denoted by the time_seq parameter.
2731 * Naturally, there is no support for insert, delete or cow operations.
2733 * The resulting path and return value will be set up as if we called
2734 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2736 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2737 struct btrfs_path *p, u64 time_seq)
2739 struct extent_buffer *b;
2744 int lowest_unlock = 1;
2745 u8 lowest_level = 0;
2747 lowest_level = p->lowest_level;
2748 WARN_ON(p->nodes[0] != NULL);
2750 if (p->search_commit_root) {
2752 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2756 b = get_old_root(root, time_seq);
2757 level = btrfs_header_level(b);
2758 p->locks[level] = BTRFS_READ_LOCK;
2761 level = btrfs_header_level(b);
2762 p->nodes[level] = b;
2763 btrfs_clear_path_blocking(p, NULL, 0);
2766 * we have a lock on b and as long as we aren't changing
2767 * the tree, there is no way to for the items in b to change.
2768 * It is safe to drop the lock on our parent before we
2769 * go through the expensive btree search on b.
2771 btrfs_unlock_up_safe(p, level + 1);
2773 ret = bin_search(b, key, level, &slot);
2777 if (ret && slot > 0) {
2781 p->slots[level] = slot;
2782 unlock_up(p, level, lowest_unlock, 0, NULL);
2784 if (level == lowest_level) {
2790 err = read_block_for_search(NULL, root, p, &b, level,
2791 slot, key, time_seq);
2799 level = btrfs_header_level(b);
2800 err = btrfs_try_tree_read_lock(b);
2802 btrfs_set_path_blocking(p);
2803 btrfs_tree_read_lock(b);
2804 btrfs_clear_path_blocking(p, b,
2807 b = tree_mod_log_rewind(root->fs_info, b, time_seq);
2808 p->locks[level] = BTRFS_READ_LOCK;
2809 p->nodes[level] = b;
2811 p->slots[level] = slot;
2812 unlock_up(p, level, lowest_unlock, 0, NULL);
2818 if (!p->leave_spinning)
2819 btrfs_set_path_blocking(p);
2821 btrfs_release_path(p);
2827 * helper to use instead of search slot if no exact match is needed but
2828 * instead the next or previous item should be returned.
2829 * When find_higher is true, the next higher item is returned, the next lower
2831 * When return_any and find_higher are both true, and no higher item is found,
2832 * return the next lower instead.
2833 * When return_any is true and find_higher is false, and no lower item is found,
2834 * return the next higher instead.
2835 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2838 int btrfs_search_slot_for_read(struct btrfs_root *root,
2839 struct btrfs_key *key, struct btrfs_path *p,
2840 int find_higher, int return_any)
2843 struct extent_buffer *leaf;
2846 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2850 * a return value of 1 means the path is at the position where the
2851 * item should be inserted. Normally this is the next bigger item,
2852 * but in case the previous item is the last in a leaf, path points
2853 * to the first free slot in the previous leaf, i.e. at an invalid
2859 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2860 ret = btrfs_next_leaf(root, p);
2866 * no higher item found, return the next
2871 btrfs_release_path(p);
2875 if (p->slots[0] == 0) {
2876 ret = btrfs_prev_leaf(root, p);
2880 p->slots[0] = btrfs_header_nritems(leaf) - 1;
2886 * no lower item found, return the next
2891 btrfs_release_path(p);
2901 * adjust the pointers going up the tree, starting at level
2902 * making sure the right key of each node is points to 'key'.
2903 * This is used after shifting pointers to the left, so it stops
2904 * fixing up pointers when a given leaf/node is not in slot 0 of the
2908 static void fixup_low_keys(struct btrfs_trans_handle *trans,
2909 struct btrfs_root *root, struct btrfs_path *path,
2910 struct btrfs_disk_key *key, int level)
2913 struct extent_buffer *t;
2915 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2916 int tslot = path->slots[i];
2917 if (!path->nodes[i])
2920 tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
2921 btrfs_set_node_key(t, key, tslot);
2922 btrfs_mark_buffer_dirty(path->nodes[i]);
2931 * This function isn't completely safe. It's the caller's responsibility
2932 * that the new key won't break the order
2934 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2935 struct btrfs_root *root, struct btrfs_path *path,
2936 struct btrfs_key *new_key)
2938 struct btrfs_disk_key disk_key;
2939 struct extent_buffer *eb;
2942 eb = path->nodes[0];
2943 slot = path->slots[0];
2945 btrfs_item_key(eb, &disk_key, slot - 1);
2946 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
2948 if (slot < btrfs_header_nritems(eb) - 1) {
2949 btrfs_item_key(eb, &disk_key, slot + 1);
2950 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
2953 btrfs_cpu_key_to_disk(&disk_key, new_key);
2954 btrfs_set_item_key(eb, &disk_key, slot);
2955 btrfs_mark_buffer_dirty(eb);
2957 fixup_low_keys(trans, root, path, &disk_key, 1);
2961 * try to push data from one node into the next node left in the
2964 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2965 * error, and > 0 if there was no room in the left hand block.
2967 static int push_node_left(struct btrfs_trans_handle *trans,
2968 struct btrfs_root *root, struct extent_buffer *dst,
2969 struct extent_buffer *src, int empty)
2976 src_nritems = btrfs_header_nritems(src);
2977 dst_nritems = btrfs_header_nritems(dst);
2978 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2979 WARN_ON(btrfs_header_generation(src) != trans->transid);
2980 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2982 if (!empty && src_nritems <= 8)
2985 if (push_items <= 0)
2989 push_items = min(src_nritems, push_items);
2990 if (push_items < src_nritems) {
2991 /* leave at least 8 pointers in the node if
2992 * we aren't going to empty it
2994 if (src_nritems - push_items < 8) {
2995 if (push_items <= 8)
3001 push_items = min(src_nritems - 8, push_items);
3003 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3005 copy_extent_buffer(dst, src,
3006 btrfs_node_key_ptr_offset(dst_nritems),
3007 btrfs_node_key_ptr_offset(0),
3008 push_items * sizeof(struct btrfs_key_ptr));
3010 if (push_items < src_nritems) {
3012 * don't call tree_mod_log_eb_move here, key removal was already
3013 * fully logged by tree_mod_log_eb_copy above.
3015 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3016 btrfs_node_key_ptr_offset(push_items),
3017 (src_nritems - push_items) *
3018 sizeof(struct btrfs_key_ptr));
3020 btrfs_set_header_nritems(src, src_nritems - push_items);
3021 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3022 btrfs_mark_buffer_dirty(src);
3023 btrfs_mark_buffer_dirty(dst);
3029 * try to push data from one node into the next node right in the
3032 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3033 * error, and > 0 if there was no room in the right hand block.
3035 * this will only push up to 1/2 the contents of the left node over
3037 static int balance_node_right(struct btrfs_trans_handle *trans,
3038 struct btrfs_root *root,
3039 struct extent_buffer *dst,
3040 struct extent_buffer *src)
3048 WARN_ON(btrfs_header_generation(src) != trans->transid);
3049 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3051 src_nritems = btrfs_header_nritems(src);
3052 dst_nritems = btrfs_header_nritems(dst);
3053 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3054 if (push_items <= 0)
3057 if (src_nritems < 4)
3060 max_push = src_nritems / 2 + 1;
3061 /* don't try to empty the node */
3062 if (max_push >= src_nritems)
3065 if (max_push < push_items)
3066 push_items = max_push;
3068 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3069 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3070 btrfs_node_key_ptr_offset(0),
3072 sizeof(struct btrfs_key_ptr));
3074 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3075 src_nritems - push_items, push_items);
3076 copy_extent_buffer(dst, src,
3077 btrfs_node_key_ptr_offset(0),
3078 btrfs_node_key_ptr_offset(src_nritems - push_items),
3079 push_items * sizeof(struct btrfs_key_ptr));
3081 btrfs_set_header_nritems(src, src_nritems - push_items);
3082 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3084 btrfs_mark_buffer_dirty(src);
3085 btrfs_mark_buffer_dirty(dst);
3091 * helper function to insert a new root level in the tree.
3092 * A new node is allocated, and a single item is inserted to
3093 * point to the existing root
3095 * returns zero on success or < 0 on failure.
3097 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3098 struct btrfs_root *root,
3099 struct btrfs_path *path, int level, int log_removal)
3102 struct extent_buffer *lower;
3103 struct extent_buffer *c;
3104 struct extent_buffer *old;
3105 struct btrfs_disk_key lower_key;
3107 BUG_ON(path->nodes[level]);
3108 BUG_ON(path->nodes[level-1] != root->node);
3110 lower = path->nodes[level-1];
3112 btrfs_item_key(lower, &lower_key, 0);
3114 btrfs_node_key(lower, &lower_key, 0);
3116 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3117 root->root_key.objectid, &lower_key,
3118 level, root->node->start, 0);
3122 root_add_used(root, root->nodesize);
3124 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3125 btrfs_set_header_nritems(c, 1);
3126 btrfs_set_header_level(c, level);
3127 btrfs_set_header_bytenr(c, c->start);
3128 btrfs_set_header_generation(c, trans->transid);
3129 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3130 btrfs_set_header_owner(c, root->root_key.objectid);
3132 write_extent_buffer(c, root->fs_info->fsid,
3133 (unsigned long)btrfs_header_fsid(c),
3136 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3137 (unsigned long)btrfs_header_chunk_tree_uuid(c),
3140 btrfs_set_node_key(c, &lower_key, 0);
3141 btrfs_set_node_blockptr(c, 0, lower->start);
3142 lower_gen = btrfs_header_generation(lower);
3143 WARN_ON(lower_gen != trans->transid);
3145 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3147 btrfs_mark_buffer_dirty(c);
3150 tree_mod_log_set_root_pointer(root, c, log_removal);
3151 rcu_assign_pointer(root->node, c);
3153 /* the super has an extra ref to root->node */
3154 free_extent_buffer(old);
3156 add_root_to_dirty_list(root);
3157 extent_buffer_get(c);
3158 path->nodes[level] = c;
3159 path->locks[level] = BTRFS_WRITE_LOCK;
3160 path->slots[level] = 0;
3165 * worker function to insert a single pointer in a node.
3166 * the node should have enough room for the pointer already
3168 * slot and level indicate where you want the key to go, and
3169 * blocknr is the block the key points to.
3171 static void insert_ptr(struct btrfs_trans_handle *trans,
3172 struct btrfs_root *root, struct btrfs_path *path,
3173 struct btrfs_disk_key *key, u64 bytenr,
3174 int slot, int level)
3176 struct extent_buffer *lower;
3180 BUG_ON(!path->nodes[level]);
3181 btrfs_assert_tree_locked(path->nodes[level]);
3182 lower = path->nodes[level];
3183 nritems = btrfs_header_nritems(lower);
3184 BUG_ON(slot > nritems);
3185 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3186 if (slot != nritems) {
3188 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3189 slot, nritems - slot);
3190 memmove_extent_buffer(lower,
3191 btrfs_node_key_ptr_offset(slot + 1),
3192 btrfs_node_key_ptr_offset(slot),
3193 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3196 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3200 btrfs_set_node_key(lower, key, slot);
3201 btrfs_set_node_blockptr(lower, slot, bytenr);
3202 WARN_ON(trans->transid == 0);
3203 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3204 btrfs_set_header_nritems(lower, nritems + 1);
3205 btrfs_mark_buffer_dirty(lower);
3209 * split the node at the specified level in path in two.
3210 * The path is corrected to point to the appropriate node after the split
3212 * Before splitting this tries to make some room in the node by pushing
3213 * left and right, if either one works, it returns right away.
3215 * returns 0 on success and < 0 on failure
3217 static noinline int split_node(struct btrfs_trans_handle *trans,
3218 struct btrfs_root *root,
3219 struct btrfs_path *path, int level)
3221 struct extent_buffer *c;
3222 struct extent_buffer *split;
3223 struct btrfs_disk_key disk_key;
3228 c = path->nodes[level];
3229 WARN_ON(btrfs_header_generation(c) != trans->transid);
3230 if (c == root->node) {
3232 * trying to split the root, lets make a new one
3234 * tree mod log: We pass 0 as log_removal parameter to
3235 * insert_new_root, because that root buffer will be kept as a
3236 * normal node. We are going to log removal of half of the
3237 * elements below with tree_mod_log_eb_copy. We're holding a
3238 * tree lock on the buffer, which is why we cannot race with
3239 * other tree_mod_log users.
3241 ret = insert_new_root(trans, root, path, level + 1, 0);
3245 ret = push_nodes_for_insert(trans, root, path, level);
3246 c = path->nodes[level];
3247 if (!ret && btrfs_header_nritems(c) <
3248 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3254 c_nritems = btrfs_header_nritems(c);
3255 mid = (c_nritems + 1) / 2;
3256 btrfs_node_key(c, &disk_key, mid);
3258 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3259 root->root_key.objectid,
3260 &disk_key, level, c->start, 0);
3262 return PTR_ERR(split);
3264 root_add_used(root, root->nodesize);
3266 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3267 btrfs_set_header_level(split, btrfs_header_level(c));
3268 btrfs_set_header_bytenr(split, split->start);
3269 btrfs_set_header_generation(split, trans->transid);
3270 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3271 btrfs_set_header_owner(split, root->root_key.objectid);
3272 write_extent_buffer(split, root->fs_info->fsid,
3273 (unsigned long)btrfs_header_fsid(split),
3275 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3276 (unsigned long)btrfs_header_chunk_tree_uuid(split),
3279 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3280 copy_extent_buffer(split, c,
3281 btrfs_node_key_ptr_offset(0),
3282 btrfs_node_key_ptr_offset(mid),
3283 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3284 btrfs_set_header_nritems(split, c_nritems - mid);
3285 btrfs_set_header_nritems(c, mid);
3288 btrfs_mark_buffer_dirty(c);
3289 btrfs_mark_buffer_dirty(split);
3291 insert_ptr(trans, root, path, &disk_key, split->start,
3292 path->slots[level + 1] + 1, level + 1);
3294 if (path->slots[level] >= mid) {
3295 path->slots[level] -= mid;
3296 btrfs_tree_unlock(c);
3297 free_extent_buffer(c);
3298 path->nodes[level] = split;
3299 path->slots[level + 1] += 1;
3301 btrfs_tree_unlock(split);
3302 free_extent_buffer(split);
3308 * how many bytes are required to store the items in a leaf. start
3309 * and nr indicate which items in the leaf to check. This totals up the
3310 * space used both by the item structs and the item data
3312 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3314 struct btrfs_item *start_item;
3315 struct btrfs_item *end_item;
3316 struct btrfs_map_token token;
3318 int nritems = btrfs_header_nritems(l);
3319 int end = min(nritems, start + nr) - 1;
3323 btrfs_init_map_token(&token);
3324 start_item = btrfs_item_nr(l, start);
3325 end_item = btrfs_item_nr(l, end);
3326 data_len = btrfs_token_item_offset(l, start_item, &token) +
3327 btrfs_token_item_size(l, start_item, &token);
3328 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3329 data_len += sizeof(struct btrfs_item) * nr;
3330 WARN_ON(data_len < 0);
3335 * The space between the end of the leaf items and
3336 * the start of the leaf data. IOW, how much room
3337 * the leaf has left for both items and data
3339 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3340 struct extent_buffer *leaf)
3342 int nritems = btrfs_header_nritems(leaf);
3344 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3346 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3347 "used %d nritems %d\n",
3348 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3349 leaf_space_used(leaf, 0, nritems), nritems);
3355 * min slot controls the lowest index we're willing to push to the
3356 * right. We'll push up to and including min_slot, but no lower
3358 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3359 struct btrfs_root *root,
3360 struct btrfs_path *path,
3361 int data_size, int empty,
3362 struct extent_buffer *right,
3363 int free_space, u32 left_nritems,
3366 struct extent_buffer *left = path->nodes[0];
3367 struct extent_buffer *upper = path->nodes[1];
3368 struct btrfs_map_token token;
3369 struct btrfs_disk_key disk_key;
3374 struct btrfs_item *item;
3380 btrfs_init_map_token(&token);
3385 nr = max_t(u32, 1, min_slot);
3387 if (path->slots[0] >= left_nritems)
3388 push_space += data_size;
3390 slot = path->slots[1];
3391 i = left_nritems - 1;
3393 item = btrfs_item_nr(left, i);
3395 if (!empty && push_items > 0) {
3396 if (path->slots[0] > i)
3398 if (path->slots[0] == i) {
3399 int space = btrfs_leaf_free_space(root, left);
3400 if (space + push_space * 2 > free_space)
3405 if (path->slots[0] == i)
3406 push_space += data_size;
3408 this_item_size = btrfs_item_size(left, item);
3409 if (this_item_size + sizeof(*item) + push_space > free_space)
3413 push_space += this_item_size + sizeof(*item);
3419 if (push_items == 0)
3422 WARN_ON(!empty && push_items == left_nritems);
3424 /* push left to right */
3425 right_nritems = btrfs_header_nritems(right);
3427 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3428 push_space -= leaf_data_end(root, left);
3430 /* make room in the right data area */
3431 data_end = leaf_data_end(root, right);
3432 memmove_extent_buffer(right,
3433 btrfs_leaf_data(right) + data_end - push_space,
3434 btrfs_leaf_data(right) + data_end,
3435 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3437 /* copy from the left data area */
3438 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3439 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3440 btrfs_leaf_data(left) + leaf_data_end(root, left),
3443 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3444 btrfs_item_nr_offset(0),
3445 right_nritems * sizeof(struct btrfs_item));
3447 /* copy the items from left to right */
3448 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3449 btrfs_item_nr_offset(left_nritems - push_items),
3450 push_items * sizeof(struct btrfs_item));
3452 /* update the item pointers */
3453 right_nritems += push_items;
3454 btrfs_set_header_nritems(right, right_nritems);
3455 push_space = BTRFS_LEAF_DATA_SIZE(root);
3456 for (i = 0; i < right_nritems; i++) {
3457 item = btrfs_item_nr(right, i);
3458 push_space -= btrfs_token_item_size(right, item, &token);
3459 btrfs_set_token_item_offset(right, item, push_space, &token);
3462 left_nritems -= push_items;
3463 btrfs_set_header_nritems(left, left_nritems);
3466 btrfs_mark_buffer_dirty(left);
3468 clean_tree_block(trans, root, left);
3470 btrfs_mark_buffer_dirty(right);
3472 btrfs_item_key(right, &disk_key, 0);
3473 btrfs_set_node_key(upper, &disk_key, slot + 1);
3474 btrfs_mark_buffer_dirty(upper);
3476 /* then fixup the leaf pointer in the path */
3477 if (path->slots[0] >= left_nritems) {
3478 path->slots[0] -= left_nritems;
3479 if (btrfs_header_nritems(path->nodes[0]) == 0)
3480 clean_tree_block(trans, root, path->nodes[0]);
3481 btrfs_tree_unlock(path->nodes[0]);
3482 free_extent_buffer(path->nodes[0]);
3483 path->nodes[0] = right;
3484 path->slots[1] += 1;
3486 btrfs_tree_unlock(right);
3487 free_extent_buffer(right);
3492 btrfs_tree_unlock(right);
3493 free_extent_buffer(right);
3498 * push some data in the path leaf to the right, trying to free up at
3499 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3501 * returns 1 if the push failed because the other node didn't have enough
3502 * room, 0 if everything worked out and < 0 if there were major errors.
3504 * this will push starting from min_slot to the end of the leaf. It won't
3505 * push any slot lower than min_slot
3507 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3508 *root, struct btrfs_path *path,
3509 int min_data_size, int data_size,
3510 int empty, u32 min_slot)
3512 struct extent_buffer *left = path->nodes[0];
3513 struct extent_buffer *right;
3514 struct extent_buffer *upper;
3520 if (!path->nodes[1])
3523 slot = path->slots[1];
3524 upper = path->nodes[1];
3525 if (slot >= btrfs_header_nritems(upper) - 1)
3528 btrfs_assert_tree_locked(path->nodes[1]);
3530 right = read_node_slot(root, upper, slot + 1);
3534 btrfs_tree_lock(right);
3535 btrfs_set_lock_blocking(right);
3537 free_space = btrfs_leaf_free_space(root, right);
3538 if (free_space < data_size)
3541 /* cow and double check */
3542 ret = btrfs_cow_block(trans, root, right, upper,
3547 free_space = btrfs_leaf_free_space(root, right);
3548 if (free_space < data_size)
3551 left_nritems = btrfs_header_nritems(left);
3552 if (left_nritems == 0)
3555 return __push_leaf_right(trans, root, path, min_data_size, empty,
3556 right, free_space, left_nritems, min_slot);
3558 btrfs_tree_unlock(right);
3559 free_extent_buffer(right);
3564 * push some data in the path leaf to the left, trying to free up at
3565 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3567 * max_slot can put a limit on how far into the leaf we'll push items. The
3568 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3571 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3572 struct btrfs_root *root,
3573 struct btrfs_path *path, int data_size,
3574 int empty, struct extent_buffer *left,
3575 int free_space, u32 right_nritems,
3578 struct btrfs_disk_key disk_key;
3579 struct extent_buffer *right = path->nodes[0];
3583 struct btrfs_item *item;
3584 u32 old_left_nritems;
3588 u32 old_left_item_size;
3589 struct btrfs_map_token token;
3591 btrfs_init_map_token(&token);
3594 nr = min(right_nritems, max_slot);
3596 nr = min(right_nritems - 1, max_slot);
3598 for (i = 0; i < nr; i++) {
3599 item = btrfs_item_nr(right, i);
3601 if (!empty && push_items > 0) {
3602 if (path->slots[0] < i)
3604 if (path->slots[0] == i) {
3605 int space = btrfs_leaf_free_space(root, right);
3606 if (space + push_space * 2 > free_space)
3611 if (path->slots[0] == i)
3612 push_space += data_size;
3614 this_item_size = btrfs_item_size(right, item);
3615 if (this_item_size + sizeof(*item) + push_space > free_space)
3619 push_space += this_item_size + sizeof(*item);
3622 if (push_items == 0) {
3626 if (!empty && push_items == btrfs_header_nritems(right))
3629 /* push data from right to left */
3630 copy_extent_buffer(left, right,
3631 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3632 btrfs_item_nr_offset(0),
3633 push_items * sizeof(struct btrfs_item));
3635 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3636 btrfs_item_offset_nr(right, push_items - 1);
3638 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3639 leaf_data_end(root, left) - push_space,
3640 btrfs_leaf_data(right) +
3641 btrfs_item_offset_nr(right, push_items - 1),
3643 old_left_nritems = btrfs_header_nritems(left);
3644 BUG_ON(old_left_nritems <= 0);
3646 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3647 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3650 item = btrfs_item_nr(left, i);
3652 ioff = btrfs_token_item_offset(left, item, &token);
3653 btrfs_set_token_item_offset(left, item,
3654 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3657 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3659 /* fixup right node */
3660 if (push_items > right_nritems)
3661 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3664 if (push_items < right_nritems) {
3665 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3666 leaf_data_end(root, right);
3667 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3668 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3669 btrfs_leaf_data(right) +
3670 leaf_data_end(root, right), push_space);
3672 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3673 btrfs_item_nr_offset(push_items),
3674 (btrfs_header_nritems(right) - push_items) *
3675 sizeof(struct btrfs_item));
3677 right_nritems -= push_items;
3678 btrfs_set_header_nritems(right, right_nritems);
3679 push_space = BTRFS_LEAF_DATA_SIZE(root);
3680 for (i = 0; i < right_nritems; i++) {
3681 item = btrfs_item_nr(right, i);
3683 push_space = push_space - btrfs_token_item_size(right,
3685 btrfs_set_token_item_offset(right, item, push_space, &token);
3688 btrfs_mark_buffer_dirty(left);
3690 btrfs_mark_buffer_dirty(right);
3692 clean_tree_block(trans, root, right);
3694 btrfs_item_key(right, &disk_key, 0);
3695 fixup_low_keys(trans, root, path, &disk_key, 1);
3697 /* then fixup the leaf pointer in the path */
3698 if (path->slots[0] < push_items) {
3699 path->slots[0] += old_left_nritems;
3700 btrfs_tree_unlock(path->nodes[0]);
3701 free_extent_buffer(path->nodes[0]);
3702 path->nodes[0] = left;
3703 path->slots[1] -= 1;
3705 btrfs_tree_unlock(left);
3706 free_extent_buffer(left);
3707 path->slots[0] -= push_items;
3709 BUG_ON(path->slots[0] < 0);
3712 btrfs_tree_unlock(left);
3713 free_extent_buffer(left);
3718 * push some data in the path leaf to the left, trying to free up at
3719 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3721 * max_slot can put a limit on how far into the leaf we'll push items. The
3722 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3725 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3726 *root, struct btrfs_path *path, int min_data_size,
3727 int data_size, int empty, u32 max_slot)
3729 struct extent_buffer *right = path->nodes[0];
3730 struct extent_buffer *left;
3736 slot = path->slots[1];
3739 if (!path->nodes[1])
3742 right_nritems = btrfs_header_nritems(right);
3743 if (right_nritems == 0)
3746 btrfs_assert_tree_locked(path->nodes[1]);
3748 left = read_node_slot(root, path->nodes[1], slot - 1);
3752 btrfs_tree_lock(left);
3753 btrfs_set_lock_blocking(left);
3755 free_space = btrfs_leaf_free_space(root, left);
3756 if (free_space < data_size) {
3761 /* cow and double check */
3762 ret = btrfs_cow_block(trans, root, left,
3763 path->nodes[1], slot - 1, &left);
3765 /* we hit -ENOSPC, but it isn't fatal here */
3771 free_space = btrfs_leaf_free_space(root, left);
3772 if (free_space < data_size) {
3777 return __push_leaf_left(trans, root, path, min_data_size,
3778 empty, left, free_space, right_nritems,
3781 btrfs_tree_unlock(left);
3782 free_extent_buffer(left);
3787 * split the path's leaf in two, making sure there is at least data_size
3788 * available for the resulting leaf level of the path.
3790 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3791 struct btrfs_root *root,
3792 struct btrfs_path *path,
3793 struct extent_buffer *l,
3794 struct extent_buffer *right,
3795 int slot, int mid, int nritems)
3800 struct btrfs_disk_key disk_key;
3801 struct btrfs_map_token token;
3803 btrfs_init_map_token(&token);
3805 nritems = nritems - mid;
3806 btrfs_set_header_nritems(right, nritems);
3807 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3809 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3810 btrfs_item_nr_offset(mid),
3811 nritems * sizeof(struct btrfs_item));
3813 copy_extent_buffer(right, l,
3814 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3815 data_copy_size, btrfs_leaf_data(l) +
3816 leaf_data_end(root, l), data_copy_size);
3818 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3819 btrfs_item_end_nr(l, mid);
3821 for (i = 0; i < nritems; i++) {
3822 struct btrfs_item *item = btrfs_item_nr(right, i);
3825 ioff = btrfs_token_item_offset(right, item, &token);
3826 btrfs_set_token_item_offset(right, item,
3827 ioff + rt_data_off, &token);
3830 btrfs_set_header_nritems(l, mid);
3831 btrfs_item_key(right, &disk_key, 0);
3832 insert_ptr(trans, root, path, &disk_key, right->start,
3833 path->slots[1] + 1, 1);
3835 btrfs_mark_buffer_dirty(right);
3836 btrfs_mark_buffer_dirty(l);
3837 BUG_ON(path->slots[0] != slot);
3840 btrfs_tree_unlock(path->nodes[0]);
3841 free_extent_buffer(path->nodes[0]);
3842 path->nodes[0] = right;
3843 path->slots[0] -= mid;
3844 path->slots[1] += 1;
3846 btrfs_tree_unlock(right);
3847 free_extent_buffer(right);
3850 BUG_ON(path->slots[0] < 0);
3854 * double splits happen when we need to insert a big item in the middle
3855 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3856 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3859 * We avoid this by trying to push the items on either side of our target
3860 * into the adjacent leaves. If all goes well we can avoid the double split
3863 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3864 struct btrfs_root *root,
3865 struct btrfs_path *path,
3873 slot = path->slots[0];
3876 * try to push all the items after our slot into the
3879 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3886 nritems = btrfs_header_nritems(path->nodes[0]);
3888 * our goal is to get our slot at the start or end of a leaf. If
3889 * we've done so we're done
3891 if (path->slots[0] == 0 || path->slots[0] == nritems)
3894 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3897 /* try to push all the items before our slot into the next leaf */
3898 slot = path->slots[0];
3899 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3912 * split the path's leaf in two, making sure there is at least data_size
3913 * available for the resulting leaf level of the path.
3915 * returns 0 if all went well and < 0 on failure.
3917 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3918 struct btrfs_root *root,
3919 struct btrfs_key *ins_key,
3920 struct btrfs_path *path, int data_size,
3923 struct btrfs_disk_key disk_key;
3924 struct extent_buffer *l;
3928 struct extent_buffer *right;
3932 int num_doubles = 0;
3933 int tried_avoid_double = 0;
3936 slot = path->slots[0];
3937 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3938 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3941 /* first try to make some room by pushing left and right */
3943 wret = push_leaf_right(trans, root, path, data_size,
3948 wret = push_leaf_left(trans, root, path, data_size,
3949 data_size, 0, (u32)-1);
3955 /* did the pushes work? */
3956 if (btrfs_leaf_free_space(root, l) >= data_size)
3960 if (!path->nodes[1]) {
3961 ret = insert_new_root(trans, root, path, 1, 1);
3968 slot = path->slots[0];
3969 nritems = btrfs_header_nritems(l);
3970 mid = (nritems + 1) / 2;
3974 leaf_space_used(l, mid, nritems - mid) + data_size >
3975 BTRFS_LEAF_DATA_SIZE(root)) {
3976 if (slot >= nritems) {
3980 if (mid != nritems &&
3981 leaf_space_used(l, mid, nritems - mid) +
3982 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3983 if (data_size && !tried_avoid_double)
3984 goto push_for_double;
3990 if (leaf_space_used(l, 0, mid) + data_size >
3991 BTRFS_LEAF_DATA_SIZE(root)) {
3992 if (!extend && data_size && slot == 0) {
3994 } else if ((extend || !data_size) && slot == 0) {
3998 if (mid != nritems &&
3999 leaf_space_used(l, mid, nritems - mid) +
4000 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4001 if (data_size && !tried_avoid_double)
4002 goto push_for_double;
4010 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4012 btrfs_item_key(l, &disk_key, mid);
4014 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
4015 root->root_key.objectid,
4016 &disk_key, 0, l->start, 0);
4018 return PTR_ERR(right);
4020 root_add_used(root, root->leafsize);
4022 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4023 btrfs_set_header_bytenr(right, right->start);
4024 btrfs_set_header_generation(right, trans->transid);
4025 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4026 btrfs_set_header_owner(right, root->root_key.objectid);
4027 btrfs_set_header_level(right, 0);
4028 write_extent_buffer(right, root->fs_info->fsid,
4029 (unsigned long)btrfs_header_fsid(right),
4032 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4033 (unsigned long)btrfs_header_chunk_tree_uuid(right),
4038 btrfs_set_header_nritems(right, 0);
4039 insert_ptr(trans, root, path, &disk_key, right->start,
4040 path->slots[1] + 1, 1);
4041 btrfs_tree_unlock(path->nodes[0]);
4042 free_extent_buffer(path->nodes[0]);
4043 path->nodes[0] = right;
4045 path->slots[1] += 1;
4047 btrfs_set_header_nritems(right, 0);
4048 insert_ptr(trans, root, path, &disk_key, right->start,
4050 btrfs_tree_unlock(path->nodes[0]);
4051 free_extent_buffer(path->nodes[0]);
4052 path->nodes[0] = right;
4054 if (path->slots[1] == 0)
4055 fixup_low_keys(trans, root, path,
4058 btrfs_mark_buffer_dirty(right);
4062 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4065 BUG_ON(num_doubles != 0);
4073 push_for_double_split(trans, root, path, data_size);
4074 tried_avoid_double = 1;
4075 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4080 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4081 struct btrfs_root *root,
4082 struct btrfs_path *path, int ins_len)
4084 struct btrfs_key key;
4085 struct extent_buffer *leaf;
4086 struct btrfs_file_extent_item *fi;
4091 leaf = path->nodes[0];
4092 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4094 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4095 key.type != BTRFS_EXTENT_CSUM_KEY);
4097 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4100 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4101 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4102 fi = btrfs_item_ptr(leaf, path->slots[0],
4103 struct btrfs_file_extent_item);
4104 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4106 btrfs_release_path(path);
4108 path->keep_locks = 1;
4109 path->search_for_split = 1;
4110 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4111 path->search_for_split = 0;
4116 leaf = path->nodes[0];
4117 /* if our item isn't there or got smaller, return now */
4118 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4121 /* the leaf has changed, it now has room. return now */
4122 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4125 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4126 fi = btrfs_item_ptr(leaf, path->slots[0],
4127 struct btrfs_file_extent_item);
4128 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4132 btrfs_set_path_blocking(path);
4133 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4137 path->keep_locks = 0;
4138 btrfs_unlock_up_safe(path, 1);
4141 path->keep_locks = 0;
4145 static noinline int split_item(struct btrfs_trans_handle *trans,
4146 struct btrfs_root *root,
4147 struct btrfs_path *path,
4148 struct btrfs_key *new_key,
4149 unsigned long split_offset)
4151 struct extent_buffer *leaf;
4152 struct btrfs_item *item;
4153 struct btrfs_item *new_item;
4159 struct btrfs_disk_key disk_key;
4161 leaf = path->nodes[0];
4162 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4164 btrfs_set_path_blocking(path);
4166 item = btrfs_item_nr(leaf, path->slots[0]);
4167 orig_offset = btrfs_item_offset(leaf, item);
4168 item_size = btrfs_item_size(leaf, item);
4170 buf = kmalloc(item_size, GFP_NOFS);
4174 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4175 path->slots[0]), item_size);
4177 slot = path->slots[0] + 1;
4178 nritems = btrfs_header_nritems(leaf);
4179 if (slot != nritems) {
4180 /* shift the items */
4181 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4182 btrfs_item_nr_offset(slot),
4183 (nritems - slot) * sizeof(struct btrfs_item));
4186 btrfs_cpu_key_to_disk(&disk_key, new_key);
4187 btrfs_set_item_key(leaf, &disk_key, slot);
4189 new_item = btrfs_item_nr(leaf, slot);
4191 btrfs_set_item_offset(leaf, new_item, orig_offset);
4192 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4194 btrfs_set_item_offset(leaf, item,
4195 orig_offset + item_size - split_offset);
4196 btrfs_set_item_size(leaf, item, split_offset);
4198 btrfs_set_header_nritems(leaf, nritems + 1);
4200 /* write the data for the start of the original item */
4201 write_extent_buffer(leaf, buf,
4202 btrfs_item_ptr_offset(leaf, path->slots[0]),
4205 /* write the data for the new item */
4206 write_extent_buffer(leaf, buf + split_offset,
4207 btrfs_item_ptr_offset(leaf, slot),
4208 item_size - split_offset);
4209 btrfs_mark_buffer_dirty(leaf);
4211 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4217 * This function splits a single item into two items,
4218 * giving 'new_key' to the new item and splitting the
4219 * old one at split_offset (from the start of the item).
4221 * The path may be released by this operation. After
4222 * the split, the path is pointing to the old item. The
4223 * new item is going to be in the same node as the old one.
4225 * Note, the item being split must be smaller enough to live alone on
4226 * a tree block with room for one extra struct btrfs_item
4228 * This allows us to split the item in place, keeping a lock on the
4229 * leaf the entire time.
4231 int btrfs_split_item(struct btrfs_trans_handle *trans,
4232 struct btrfs_root *root,
4233 struct btrfs_path *path,
4234 struct btrfs_key *new_key,
4235 unsigned long split_offset)
4238 ret = setup_leaf_for_split(trans, root, path,
4239 sizeof(struct btrfs_item));
4243 ret = split_item(trans, root, path, new_key, split_offset);
4248 * This function duplicate a item, giving 'new_key' to the new item.
4249 * It guarantees both items live in the same tree leaf and the new item
4250 * is contiguous with the original item.
4252 * This allows us to split file extent in place, keeping a lock on the
4253 * leaf the entire time.
4255 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4256 struct btrfs_root *root,
4257 struct btrfs_path *path,
4258 struct btrfs_key *new_key)
4260 struct extent_buffer *leaf;
4264 leaf = path->nodes[0];
4265 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4266 ret = setup_leaf_for_split(trans, root, path,
4267 item_size + sizeof(struct btrfs_item));
4272 setup_items_for_insert(trans, root, path, new_key, &item_size,
4273 item_size, item_size +
4274 sizeof(struct btrfs_item), 1);
4275 leaf = path->nodes[0];
4276 memcpy_extent_buffer(leaf,
4277 btrfs_item_ptr_offset(leaf, path->slots[0]),
4278 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4284 * make the item pointed to by the path smaller. new_size indicates
4285 * how small to make it, and from_end tells us if we just chop bytes
4286 * off the end of the item or if we shift the item to chop bytes off
4289 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
4290 struct btrfs_root *root,
4291 struct btrfs_path *path,
4292 u32 new_size, int from_end)
4295 struct extent_buffer *leaf;
4296 struct btrfs_item *item;
4298 unsigned int data_end;
4299 unsigned int old_data_start;
4300 unsigned int old_size;
4301 unsigned int size_diff;
4303 struct btrfs_map_token token;
4305 btrfs_init_map_token(&token);
4307 leaf = path->nodes[0];
4308 slot = path->slots[0];
4310 old_size = btrfs_item_size_nr(leaf, slot);
4311 if (old_size == new_size)
4314 nritems = btrfs_header_nritems(leaf);
4315 data_end = leaf_data_end(root, leaf);
4317 old_data_start = btrfs_item_offset_nr(leaf, slot);
4319 size_diff = old_size - new_size;
4322 BUG_ON(slot >= nritems);
4325 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4327 /* first correct the data pointers */
4328 for (i = slot; i < nritems; i++) {
4330 item = btrfs_item_nr(leaf, i);
4332 ioff = btrfs_token_item_offset(leaf, item, &token);
4333 btrfs_set_token_item_offset(leaf, item,
4334 ioff + size_diff, &token);
4337 /* shift the data */
4339 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4340 data_end + size_diff, btrfs_leaf_data(leaf) +
4341 data_end, old_data_start + new_size - data_end);
4343 struct btrfs_disk_key disk_key;
4346 btrfs_item_key(leaf, &disk_key, slot);
4348 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4350 struct btrfs_file_extent_item *fi;
4352 fi = btrfs_item_ptr(leaf, slot,
4353 struct btrfs_file_extent_item);
4354 fi = (struct btrfs_file_extent_item *)(
4355 (unsigned long)fi - size_diff);
4357 if (btrfs_file_extent_type(leaf, fi) ==
4358 BTRFS_FILE_EXTENT_INLINE) {
4359 ptr = btrfs_item_ptr_offset(leaf, slot);
4360 memmove_extent_buffer(leaf, ptr,
4362 offsetof(struct btrfs_file_extent_item,
4367 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4368 data_end + size_diff, btrfs_leaf_data(leaf) +
4369 data_end, old_data_start - data_end);
4371 offset = btrfs_disk_key_offset(&disk_key);
4372 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4373 btrfs_set_item_key(leaf, &disk_key, slot);
4375 fixup_low_keys(trans, root, path, &disk_key, 1);
4378 item = btrfs_item_nr(leaf, slot);
4379 btrfs_set_item_size(leaf, item, new_size);
4380 btrfs_mark_buffer_dirty(leaf);
4382 if (btrfs_leaf_free_space(root, leaf) < 0) {
4383 btrfs_print_leaf(root, leaf);
4389 * make the item pointed to by the path bigger, data_size is the new size.
4391 void btrfs_extend_item(struct btrfs_trans_handle *trans,
4392 struct btrfs_root *root, struct btrfs_path *path,
4396 struct extent_buffer *leaf;
4397 struct btrfs_item *item;
4399 unsigned int data_end;
4400 unsigned int old_data;
4401 unsigned int old_size;
4403 struct btrfs_map_token token;
4405 btrfs_init_map_token(&token);
4407 leaf = path->nodes[0];
4409 nritems = btrfs_header_nritems(leaf);
4410 data_end = leaf_data_end(root, leaf);
4412 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4413 btrfs_print_leaf(root, leaf);
4416 slot = path->slots[0];
4417 old_data = btrfs_item_end_nr(leaf, slot);
4420 if (slot >= nritems) {
4421 btrfs_print_leaf(root, leaf);
4422 printk(KERN_CRIT "slot %d too large, nritems %d\n",
4428 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4430 /* first correct the data pointers */
4431 for (i = slot; i < nritems; i++) {
4433 item = btrfs_item_nr(leaf, i);
4435 ioff = btrfs_token_item_offset(leaf, item, &token);
4436 btrfs_set_token_item_offset(leaf, item,
4437 ioff - data_size, &token);
4440 /* shift the data */
4441 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4442 data_end - data_size, btrfs_leaf_data(leaf) +
4443 data_end, old_data - data_end);
4445 data_end = old_data;
4446 old_size = btrfs_item_size_nr(leaf, slot);
4447 item = btrfs_item_nr(leaf, slot);
4448 btrfs_set_item_size(leaf, item, old_size + data_size);
4449 btrfs_mark_buffer_dirty(leaf);
4451 if (btrfs_leaf_free_space(root, leaf) < 0) {
4452 btrfs_print_leaf(root, leaf);
4458 * this is a helper for btrfs_insert_empty_items, the main goal here is
4459 * to save stack depth by doing the bulk of the work in a function
4460 * that doesn't call btrfs_search_slot
4462 void setup_items_for_insert(struct btrfs_trans_handle *trans,
4463 struct btrfs_root *root, struct btrfs_path *path,
4464 struct btrfs_key *cpu_key, u32 *data_size,
4465 u32 total_data, u32 total_size, int nr)
4467 struct btrfs_item *item;
4470 unsigned int data_end;
4471 struct btrfs_disk_key disk_key;
4472 struct extent_buffer *leaf;
4474 struct btrfs_map_token token;
4476 btrfs_init_map_token(&token);
4478 leaf = path->nodes[0];
4479 slot = path->slots[0];
4481 nritems = btrfs_header_nritems(leaf);
4482 data_end = leaf_data_end(root, leaf);
4484 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4485 btrfs_print_leaf(root, leaf);
4486 printk(KERN_CRIT "not enough freespace need %u have %d\n",
4487 total_size, btrfs_leaf_free_space(root, leaf));
4491 if (slot != nritems) {
4492 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4494 if (old_data < data_end) {
4495 btrfs_print_leaf(root, leaf);
4496 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4497 slot, old_data, data_end);
4501 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4503 /* first correct the data pointers */
4504 for (i = slot; i < nritems; i++) {
4507 item = btrfs_item_nr(leaf, i);
4508 ioff = btrfs_token_item_offset(leaf, item, &token);
4509 btrfs_set_token_item_offset(leaf, item,
4510 ioff - total_data, &token);
4512 /* shift the items */
4513 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4514 btrfs_item_nr_offset(slot),
4515 (nritems - slot) * sizeof(struct btrfs_item));
4517 /* shift the data */
4518 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4519 data_end - total_data, btrfs_leaf_data(leaf) +
4520 data_end, old_data - data_end);
4521 data_end = old_data;
4524 /* setup the item for the new data */
4525 for (i = 0; i < nr; i++) {
4526 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4527 btrfs_set_item_key(leaf, &disk_key, slot + i);
4528 item = btrfs_item_nr(leaf, slot + i);
4529 btrfs_set_token_item_offset(leaf, item,
4530 data_end - data_size[i], &token);
4531 data_end -= data_size[i];
4532 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4535 btrfs_set_header_nritems(leaf, nritems + nr);
4538 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4539 fixup_low_keys(trans, root, path, &disk_key, 1);
4541 btrfs_unlock_up_safe(path, 1);
4542 btrfs_mark_buffer_dirty(leaf);
4544 if (btrfs_leaf_free_space(root, leaf) < 0) {
4545 btrfs_print_leaf(root, leaf);
4551 * Given a key and some data, insert items into the tree.
4552 * This does all the path init required, making room in the tree if needed.
4554 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4555 struct btrfs_root *root,
4556 struct btrfs_path *path,
4557 struct btrfs_key *cpu_key, u32 *data_size,
4566 for (i = 0; i < nr; i++)
4567 total_data += data_size[i];
4569 total_size = total_data + (nr * sizeof(struct btrfs_item));
4570 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4576 slot = path->slots[0];
4579 setup_items_for_insert(trans, root, path, cpu_key, data_size,
4580 total_data, total_size, nr);
4585 * Given a key and some data, insert an item into the tree.
4586 * This does all the path init required, making room in the tree if needed.
4588 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4589 *root, struct btrfs_key *cpu_key, void *data, u32
4593 struct btrfs_path *path;
4594 struct extent_buffer *leaf;
4597 path = btrfs_alloc_path();
4600 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4602 leaf = path->nodes[0];
4603 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4604 write_extent_buffer(leaf, data, ptr, data_size);
4605 btrfs_mark_buffer_dirty(leaf);
4607 btrfs_free_path(path);
4612 * delete the pointer from a given node.
4614 * the tree should have been previously balanced so the deletion does not
4617 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4618 struct btrfs_path *path, int level, int slot)
4620 struct extent_buffer *parent = path->nodes[level];
4624 nritems = btrfs_header_nritems(parent);
4625 if (slot != nritems - 1) {
4627 tree_mod_log_eb_move(root->fs_info, parent, slot,
4628 slot + 1, nritems - slot - 1);
4629 memmove_extent_buffer(parent,
4630 btrfs_node_key_ptr_offset(slot),
4631 btrfs_node_key_ptr_offset(slot + 1),
4632 sizeof(struct btrfs_key_ptr) *
4633 (nritems - slot - 1));
4635 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4636 MOD_LOG_KEY_REMOVE);
4641 btrfs_set_header_nritems(parent, nritems);
4642 if (nritems == 0 && parent == root->node) {
4643 BUG_ON(btrfs_header_level(root->node) != 1);
4644 /* just turn the root into a leaf and break */
4645 btrfs_set_header_level(root->node, 0);
4646 } else if (slot == 0) {
4647 struct btrfs_disk_key disk_key;
4649 btrfs_node_key(parent, &disk_key, 0);
4650 fixup_low_keys(trans, root, path, &disk_key, level + 1);
4652 btrfs_mark_buffer_dirty(parent);
4656 * a helper function to delete the leaf pointed to by path->slots[1] and
4659 * This deletes the pointer in path->nodes[1] and frees the leaf
4660 * block extent. zero is returned if it all worked out, < 0 otherwise.
4662 * The path must have already been setup for deleting the leaf, including
4663 * all the proper balancing. path->nodes[1] must be locked.
4665 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4666 struct btrfs_root *root,
4667 struct btrfs_path *path,
4668 struct extent_buffer *leaf)
4670 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4671 del_ptr(trans, root, path, 1, path->slots[1]);
4674 * btrfs_free_extent is expensive, we want to make sure we
4675 * aren't holding any locks when we call it
4677 btrfs_unlock_up_safe(path, 0);
4679 root_sub_used(root, leaf->len);
4681 extent_buffer_get(leaf);
4682 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4683 free_extent_buffer_stale(leaf);
4686 * delete the item at the leaf level in path. If that empties
4687 * the leaf, remove it from the tree
4689 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4690 struct btrfs_path *path, int slot, int nr)
4692 struct extent_buffer *leaf;
4693 struct btrfs_item *item;
4700 struct btrfs_map_token token;
4702 btrfs_init_map_token(&token);
4704 leaf = path->nodes[0];
4705 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4707 for (i = 0; i < nr; i++)
4708 dsize += btrfs_item_size_nr(leaf, slot + i);
4710 nritems = btrfs_header_nritems(leaf);
4712 if (slot + nr != nritems) {
4713 int data_end = leaf_data_end(root, leaf);
4715 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4717 btrfs_leaf_data(leaf) + data_end,
4718 last_off - data_end);
4720 for (i = slot + nr; i < nritems; i++) {
4723 item = btrfs_item_nr(leaf, i);
4724 ioff = btrfs_token_item_offset(leaf, item, &token);
4725 btrfs_set_token_item_offset(leaf, item,
4726 ioff + dsize, &token);
4729 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4730 btrfs_item_nr_offset(slot + nr),
4731 sizeof(struct btrfs_item) *
4732 (nritems - slot - nr));
4734 btrfs_set_header_nritems(leaf, nritems - nr);
4737 /* delete the leaf if we've emptied it */
4739 if (leaf == root->node) {
4740 btrfs_set_header_level(leaf, 0);
4742 btrfs_set_path_blocking(path);
4743 clean_tree_block(trans, root, leaf);
4744 btrfs_del_leaf(trans, root, path, leaf);
4747 int used = leaf_space_used(leaf, 0, nritems);
4749 struct btrfs_disk_key disk_key;
4751 btrfs_item_key(leaf, &disk_key, 0);
4752 fixup_low_keys(trans, root, path, &disk_key, 1);
4755 /* delete the leaf if it is mostly empty */
4756 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4757 /* push_leaf_left fixes the path.
4758 * make sure the path still points to our leaf
4759 * for possible call to del_ptr below
4761 slot = path->slots[1];
4762 extent_buffer_get(leaf);
4764 btrfs_set_path_blocking(path);
4765 wret = push_leaf_left(trans, root, path, 1, 1,
4767 if (wret < 0 && wret != -ENOSPC)
4770 if (path->nodes[0] == leaf &&
4771 btrfs_header_nritems(leaf)) {
4772 wret = push_leaf_right(trans, root, path, 1,
4774 if (wret < 0 && wret != -ENOSPC)
4778 if (btrfs_header_nritems(leaf) == 0) {
4779 path->slots[1] = slot;
4780 btrfs_del_leaf(trans, root, path, leaf);
4781 free_extent_buffer(leaf);
4784 /* if we're still in the path, make sure
4785 * we're dirty. Otherwise, one of the
4786 * push_leaf functions must have already
4787 * dirtied this buffer
4789 if (path->nodes[0] == leaf)
4790 btrfs_mark_buffer_dirty(leaf);
4791 free_extent_buffer(leaf);
4794 btrfs_mark_buffer_dirty(leaf);
4801 * search the tree again to find a leaf with lesser keys
4802 * returns 0 if it found something or 1 if there are no lesser leaves.
4803 * returns < 0 on io errors.
4805 * This may release the path, and so you may lose any locks held at the
4808 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4810 struct btrfs_key key;
4811 struct btrfs_disk_key found_key;
4814 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4818 else if (key.type > 0)
4820 else if (key.objectid > 0)
4825 btrfs_release_path(path);
4826 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4829 btrfs_item_key(path->nodes[0], &found_key, 0);
4830 ret = comp_keys(&found_key, &key);
4837 * A helper function to walk down the tree starting at min_key, and looking
4838 * for nodes or leaves that are have a minimum transaction id.
4839 * This is used by the btree defrag code, and tree logging
4841 * This does not cow, but it does stuff the starting key it finds back
4842 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4843 * key and get a writable path.
4845 * This does lock as it descends, and path->keep_locks should be set
4846 * to 1 by the caller.
4848 * This honors path->lowest_level to prevent descent past a given level
4851 * min_trans indicates the oldest transaction that you are interested
4852 * in walking through. Any nodes or leaves older than min_trans are
4853 * skipped over (without reading them).
4855 * returns zero if something useful was found, < 0 on error and 1 if there
4856 * was nothing in the tree that matched the search criteria.
4858 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4859 struct btrfs_key *max_key,
4860 struct btrfs_path *path,
4863 struct extent_buffer *cur;
4864 struct btrfs_key found_key;
4871 WARN_ON(!path->keep_locks);
4873 cur = btrfs_read_lock_root_node(root);
4874 level = btrfs_header_level(cur);
4875 WARN_ON(path->nodes[level]);
4876 path->nodes[level] = cur;
4877 path->locks[level] = BTRFS_READ_LOCK;
4879 if (btrfs_header_generation(cur) < min_trans) {
4884 nritems = btrfs_header_nritems(cur);
4885 level = btrfs_header_level(cur);
4886 sret = bin_search(cur, min_key, level, &slot);
4888 /* at the lowest level, we're done, setup the path and exit */
4889 if (level == path->lowest_level) {
4890 if (slot >= nritems)
4893 path->slots[level] = slot;
4894 btrfs_item_key_to_cpu(cur, &found_key, slot);
4897 if (sret && slot > 0)
4900 * check this node pointer against the min_trans parameters.
4901 * If it is too old, old, skip to the next one.
4903 while (slot < nritems) {
4907 blockptr = btrfs_node_blockptr(cur, slot);
4908 gen = btrfs_node_ptr_generation(cur, slot);
4909 if (gen < min_trans) {
4917 * we didn't find a candidate key in this node, walk forward
4918 * and find another one
4920 if (slot >= nritems) {
4921 path->slots[level] = slot;
4922 btrfs_set_path_blocking(path);
4923 sret = btrfs_find_next_key(root, path, min_key, level,
4926 btrfs_release_path(path);
4932 /* save our key for returning back */
4933 btrfs_node_key_to_cpu(cur, &found_key, slot);
4934 path->slots[level] = slot;
4935 if (level == path->lowest_level) {
4937 unlock_up(path, level, 1, 0, NULL);
4940 btrfs_set_path_blocking(path);
4941 cur = read_node_slot(root, cur, slot);
4942 BUG_ON(!cur); /* -ENOMEM */
4944 btrfs_tree_read_lock(cur);
4946 path->locks[level - 1] = BTRFS_READ_LOCK;
4947 path->nodes[level - 1] = cur;
4948 unlock_up(path, level, 1, 0, NULL);
4949 btrfs_clear_path_blocking(path, NULL, 0);
4953 memcpy(min_key, &found_key, sizeof(found_key));
4954 btrfs_set_path_blocking(path);
4958 static void tree_move_down(struct btrfs_root *root,
4959 struct btrfs_path *path,
4960 int *level, int root_level)
4962 BUG_ON(*level == 0);
4963 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
4964 path->slots[*level]);
4965 path->slots[*level - 1] = 0;
4969 static int tree_move_next_or_upnext(struct btrfs_root *root,
4970 struct btrfs_path *path,
4971 int *level, int root_level)
4975 nritems = btrfs_header_nritems(path->nodes[*level]);
4977 path->slots[*level]++;
4979 while (path->slots[*level] >= nritems) {
4980 if (*level == root_level)
4984 path->slots[*level] = 0;
4985 free_extent_buffer(path->nodes[*level]);
4986 path->nodes[*level] = NULL;
4988 path->slots[*level]++;
4990 nritems = btrfs_header_nritems(path->nodes[*level]);
4997 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5000 static int tree_advance(struct btrfs_root *root,
5001 struct btrfs_path *path,
5002 int *level, int root_level,
5004 struct btrfs_key *key)
5008 if (*level == 0 || !allow_down) {
5009 ret = tree_move_next_or_upnext(root, path, level, root_level);
5011 tree_move_down(root, path, level, root_level);
5016 btrfs_item_key_to_cpu(path->nodes[*level], key,
5017 path->slots[*level]);
5019 btrfs_node_key_to_cpu(path->nodes[*level], key,
5020 path->slots[*level]);
5025 static int tree_compare_item(struct btrfs_root *left_root,
5026 struct btrfs_path *left_path,
5027 struct btrfs_path *right_path,
5032 unsigned long off1, off2;
5034 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5035 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5039 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5040 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5041 right_path->slots[0]);
5043 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5045 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5052 #define ADVANCE_ONLY_NEXT -1
5055 * This function compares two trees and calls the provided callback for
5056 * every changed/new/deleted item it finds.
5057 * If shared tree blocks are encountered, whole subtrees are skipped, making
5058 * the compare pretty fast on snapshotted subvolumes.
5060 * This currently works on commit roots only. As commit roots are read only,
5061 * we don't do any locking. The commit roots are protected with transactions.
5062 * Transactions are ended and rejoined when a commit is tried in between.
5064 * This function checks for modifications done to the trees while comparing.
5065 * If it detects a change, it aborts immediately.
5067 int btrfs_compare_trees(struct btrfs_root *left_root,
5068 struct btrfs_root *right_root,
5069 btrfs_changed_cb_t changed_cb, void *ctx)
5073 struct btrfs_trans_handle *trans = NULL;
5074 struct btrfs_path *left_path = NULL;
5075 struct btrfs_path *right_path = NULL;
5076 struct btrfs_key left_key;
5077 struct btrfs_key right_key;
5078 char *tmp_buf = NULL;
5079 int left_root_level;
5080 int right_root_level;
5083 int left_end_reached;
5084 int right_end_reached;
5089 u64 left_start_ctransid;
5090 u64 right_start_ctransid;
5093 left_path = btrfs_alloc_path();
5098 right_path = btrfs_alloc_path();
5104 tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
5110 left_path->search_commit_root = 1;
5111 left_path->skip_locking = 1;
5112 right_path->search_commit_root = 1;
5113 right_path->skip_locking = 1;
5115 spin_lock(&left_root->root_item_lock);
5116 left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
5117 spin_unlock(&left_root->root_item_lock);
5119 spin_lock(&right_root->root_item_lock);
5120 right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
5121 spin_unlock(&right_root->root_item_lock);
5123 trans = btrfs_join_transaction(left_root);
5124 if (IS_ERR(trans)) {
5125 ret = PTR_ERR(trans);
5131 * Strategy: Go to the first items of both trees. Then do
5133 * If both trees are at level 0
5134 * Compare keys of current items
5135 * If left < right treat left item as new, advance left tree
5137 * If left > right treat right item as deleted, advance right tree
5139 * If left == right do deep compare of items, treat as changed if
5140 * needed, advance both trees and repeat
5141 * If both trees are at the same level but not at level 0
5142 * Compare keys of current nodes/leafs
5143 * If left < right advance left tree and repeat
5144 * If left > right advance right tree and repeat
5145 * If left == right compare blockptrs of the next nodes/leafs
5146 * If they match advance both trees but stay at the same level
5148 * If they don't match advance both trees while allowing to go
5150 * If tree levels are different
5151 * Advance the tree that needs it and repeat
5153 * Advancing a tree means:
5154 * If we are at level 0, try to go to the next slot. If that's not
5155 * possible, go one level up and repeat. Stop when we found a level
5156 * where we could go to the next slot. We may at this point be on a
5159 * If we are not at level 0 and not on shared tree blocks, go one
5162 * If we are not at level 0 and on shared tree blocks, go one slot to
5163 * the right if possible or go up and right.
5166 left_level = btrfs_header_level(left_root->commit_root);
5167 left_root_level = left_level;
5168 left_path->nodes[left_level] = left_root->commit_root;
5169 extent_buffer_get(left_path->nodes[left_level]);
5171 right_level = btrfs_header_level(right_root->commit_root);
5172 right_root_level = right_level;
5173 right_path->nodes[right_level] = right_root->commit_root;
5174 extent_buffer_get(right_path->nodes[right_level]);
5176 if (left_level == 0)
5177 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5178 &left_key, left_path->slots[left_level]);
5180 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5181 &left_key, left_path->slots[left_level]);
5182 if (right_level == 0)
5183 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5184 &right_key, right_path->slots[right_level]);
5186 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5187 &right_key, right_path->slots[right_level]);
5189 left_end_reached = right_end_reached = 0;
5190 advance_left = advance_right = 0;
5194 * We need to make sure the transaction does not get committed
5195 * while we do anything on commit roots. This means, we need to
5196 * join and leave transactions for every item that we process.
5198 if (trans && btrfs_should_end_transaction(trans, left_root)) {
5199 btrfs_release_path(left_path);
5200 btrfs_release_path(right_path);
5202 ret = btrfs_end_transaction(trans, left_root);
5207 /* now rejoin the transaction */
5209 trans = btrfs_join_transaction(left_root);
5210 if (IS_ERR(trans)) {
5211 ret = PTR_ERR(trans);
5216 spin_lock(&left_root->root_item_lock);
5217 ctransid = btrfs_root_ctransid(&left_root->root_item);
5218 spin_unlock(&left_root->root_item_lock);
5219 if (ctransid != left_start_ctransid)
5220 left_start_ctransid = 0;
5222 spin_lock(&right_root->root_item_lock);
5223 ctransid = btrfs_root_ctransid(&right_root->root_item);
5224 spin_unlock(&right_root->root_item_lock);
5225 if (ctransid != right_start_ctransid)
5226 right_start_ctransid = 0;
5228 if (!left_start_ctransid || !right_start_ctransid) {
5229 WARN(1, KERN_WARNING
5230 "btrfs: btrfs_compare_tree detected "
5231 "a change in one of the trees while "
5232 "iterating. This is probably a "
5239 * the commit root may have changed, so start again
5242 left_path->lowest_level = left_level;
5243 right_path->lowest_level = right_level;
5244 ret = btrfs_search_slot(NULL, left_root,
5245 &left_key, left_path, 0, 0);
5248 ret = btrfs_search_slot(NULL, right_root,
5249 &right_key, right_path, 0, 0);
5254 if (advance_left && !left_end_reached) {
5255 ret = tree_advance(left_root, left_path, &left_level,
5257 advance_left != ADVANCE_ONLY_NEXT,
5260 left_end_reached = ADVANCE;
5263 if (advance_right && !right_end_reached) {
5264 ret = tree_advance(right_root, right_path, &right_level,
5266 advance_right != ADVANCE_ONLY_NEXT,
5269 right_end_reached = ADVANCE;
5273 if (left_end_reached && right_end_reached) {
5276 } else if (left_end_reached) {
5277 if (right_level == 0) {
5278 ret = changed_cb(left_root, right_root,
5279 left_path, right_path,
5281 BTRFS_COMPARE_TREE_DELETED,
5286 advance_right = ADVANCE;
5288 } else if (right_end_reached) {
5289 if (left_level == 0) {
5290 ret = changed_cb(left_root, right_root,
5291 left_path, right_path,
5293 BTRFS_COMPARE_TREE_NEW,
5298 advance_left = ADVANCE;
5302 if (left_level == 0 && right_level == 0) {
5303 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5305 ret = changed_cb(left_root, right_root,
5306 left_path, right_path,
5308 BTRFS_COMPARE_TREE_NEW,
5312 advance_left = ADVANCE;
5313 } else if (cmp > 0) {
5314 ret = changed_cb(left_root, right_root,
5315 left_path, right_path,
5317 BTRFS_COMPARE_TREE_DELETED,
5321 advance_right = ADVANCE;
5323 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5324 ret = tree_compare_item(left_root, left_path,
5325 right_path, tmp_buf);
5327 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5328 ret = changed_cb(left_root, right_root,
5329 left_path, right_path,
5331 BTRFS_COMPARE_TREE_CHANGED,
5336 advance_left = ADVANCE;
5337 advance_right = ADVANCE;
5339 } else if (left_level == right_level) {
5340 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5342 advance_left = ADVANCE;
5343 } else if (cmp > 0) {
5344 advance_right = ADVANCE;
5346 left_blockptr = btrfs_node_blockptr(
5347 left_path->nodes[left_level],
5348 left_path->slots[left_level]);
5349 right_blockptr = btrfs_node_blockptr(
5350 right_path->nodes[right_level],
5351 right_path->slots[right_level]);
5352 if (left_blockptr == right_blockptr) {
5354 * As we're on a shared block, don't
5355 * allow to go deeper.
5357 advance_left = ADVANCE_ONLY_NEXT;
5358 advance_right = ADVANCE_ONLY_NEXT;
5360 advance_left = ADVANCE;
5361 advance_right = ADVANCE;
5364 } else if (left_level < right_level) {
5365 advance_right = ADVANCE;
5367 advance_left = ADVANCE;
5372 btrfs_free_path(left_path);
5373 btrfs_free_path(right_path);
5378 ret = btrfs_end_transaction(trans, left_root);
5380 btrfs_end_transaction(trans, left_root);
5387 * this is similar to btrfs_next_leaf, but does not try to preserve
5388 * and fixup the path. It looks for and returns the next key in the
5389 * tree based on the current path and the min_trans parameters.
5391 * 0 is returned if another key is found, < 0 if there are any errors
5392 * and 1 is returned if there are no higher keys in the tree
5394 * path->keep_locks should be set to 1 on the search made before
5395 * calling this function.
5397 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5398 struct btrfs_key *key, int level, u64 min_trans)
5401 struct extent_buffer *c;
5403 WARN_ON(!path->keep_locks);
5404 while (level < BTRFS_MAX_LEVEL) {
5405 if (!path->nodes[level])
5408 slot = path->slots[level] + 1;
5409 c = path->nodes[level];
5411 if (slot >= btrfs_header_nritems(c)) {
5414 struct btrfs_key cur_key;
5415 if (level + 1 >= BTRFS_MAX_LEVEL ||
5416 !path->nodes[level + 1])
5419 if (path->locks[level + 1]) {
5424 slot = btrfs_header_nritems(c) - 1;
5426 btrfs_item_key_to_cpu(c, &cur_key, slot);
5428 btrfs_node_key_to_cpu(c, &cur_key, slot);
5430 orig_lowest = path->lowest_level;
5431 btrfs_release_path(path);
5432 path->lowest_level = level;
5433 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5435 path->lowest_level = orig_lowest;
5439 c = path->nodes[level];
5440 slot = path->slots[level];
5447 btrfs_item_key_to_cpu(c, key, slot);
5449 u64 gen = btrfs_node_ptr_generation(c, slot);
5451 if (gen < min_trans) {
5455 btrfs_node_key_to_cpu(c, key, slot);
5463 * search the tree again to find a leaf with greater keys
5464 * returns 0 if it found something or 1 if there are no greater leaves.
5465 * returns < 0 on io errors.
5467 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5469 return btrfs_next_old_leaf(root, path, 0);
5472 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5477 struct extent_buffer *c;
5478 struct extent_buffer *next;
5479 struct btrfs_key key;
5482 int old_spinning = path->leave_spinning;
5483 int next_rw_lock = 0;
5485 nritems = btrfs_header_nritems(path->nodes[0]);
5489 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5494 btrfs_release_path(path);
5496 path->keep_locks = 1;
5497 path->leave_spinning = 1;
5500 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5502 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5503 path->keep_locks = 0;
5508 nritems = btrfs_header_nritems(path->nodes[0]);
5510 * by releasing the path above we dropped all our locks. A balance
5511 * could have added more items next to the key that used to be
5512 * at the very end of the block. So, check again here and
5513 * advance the path if there are now more items available.
5515 if (nritems > 0 && path->slots[0] < nritems - 1) {
5522 while (level < BTRFS_MAX_LEVEL) {
5523 if (!path->nodes[level]) {
5528 slot = path->slots[level] + 1;
5529 c = path->nodes[level];
5530 if (slot >= btrfs_header_nritems(c)) {
5532 if (level == BTRFS_MAX_LEVEL) {
5540 btrfs_tree_unlock_rw(next, next_rw_lock);
5541 free_extent_buffer(next);
5545 next_rw_lock = path->locks[level];
5546 ret = read_block_for_search(NULL, root, path, &next, level,
5552 btrfs_release_path(path);
5556 if (!path->skip_locking) {
5557 ret = btrfs_try_tree_read_lock(next);
5558 if (!ret && time_seq) {
5560 * If we don't get the lock, we may be racing
5561 * with push_leaf_left, holding that lock while
5562 * itself waiting for the leaf we've currently
5563 * locked. To solve this situation, we give up
5564 * on our lock and cycle.
5566 free_extent_buffer(next);
5567 btrfs_release_path(path);
5572 btrfs_set_path_blocking(path);
5573 btrfs_tree_read_lock(next);
5574 btrfs_clear_path_blocking(path, next,
5577 next_rw_lock = BTRFS_READ_LOCK;
5581 path->slots[level] = slot;
5584 c = path->nodes[level];
5585 if (path->locks[level])
5586 btrfs_tree_unlock_rw(c, path->locks[level]);
5588 free_extent_buffer(c);
5589 path->nodes[level] = next;
5590 path->slots[level] = 0;
5591 if (!path->skip_locking)
5592 path->locks[level] = next_rw_lock;
5596 ret = read_block_for_search(NULL, root, path, &next, level,
5602 btrfs_release_path(path);
5606 if (!path->skip_locking) {
5607 ret = btrfs_try_tree_read_lock(next);
5609 btrfs_set_path_blocking(path);
5610 btrfs_tree_read_lock(next);
5611 btrfs_clear_path_blocking(path, next,
5614 next_rw_lock = BTRFS_READ_LOCK;
5619 unlock_up(path, 0, 1, 0, NULL);
5620 path->leave_spinning = old_spinning;
5622 btrfs_set_path_blocking(path);
5628 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5629 * searching until it gets past min_objectid or finds an item of 'type'
5631 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5633 int btrfs_previous_item(struct btrfs_root *root,
5634 struct btrfs_path *path, u64 min_objectid,
5637 struct btrfs_key found_key;
5638 struct extent_buffer *leaf;
5643 if (path->slots[0] == 0) {
5644 btrfs_set_path_blocking(path);
5645 ret = btrfs_prev_leaf(root, path);
5651 leaf = path->nodes[0];
5652 nritems = btrfs_header_nritems(leaf);
5655 if (path->slots[0] == nritems)
5658 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5659 if (found_key.objectid < min_objectid)
5661 if (found_key.type == type)
5663 if (found_key.objectid == min_objectid &&
5664 found_key.type < type)