2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
41 struct btrfs_path *path, int level, int slot,
43 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
44 struct extent_buffer *eb);
45 struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
46 u32 blocksize, u64 parent_transid,
48 struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
49 u64 bytenr, u32 blocksize,
52 struct btrfs_path *btrfs_alloc_path(void)
54 struct btrfs_path *path;
55 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
60 * set all locked nodes in the path to blocking locks. This should
61 * be done before scheduling
63 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
66 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
67 if (!p->nodes[i] || !p->locks[i])
69 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
70 if (p->locks[i] == BTRFS_READ_LOCK)
71 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
72 else if (p->locks[i] == BTRFS_WRITE_LOCK)
73 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
78 * reset all the locked nodes in the patch to spinning locks.
80 * held is used to keep lockdep happy, when lockdep is enabled
81 * we set held to a blocking lock before we go around and
82 * retake all the spinlocks in the path. You can safely use NULL
85 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
86 struct extent_buffer *held, int held_rw)
90 #ifdef CONFIG_DEBUG_LOCK_ALLOC
91 /* lockdep really cares that we take all of these spinlocks
92 * in the right order. If any of the locks in the path are not
93 * currently blocking, it is going to complain. So, make really
94 * really sure by forcing the path to blocking before we clear
98 btrfs_set_lock_blocking_rw(held, held_rw);
99 if (held_rw == BTRFS_WRITE_LOCK)
100 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
101 else if (held_rw == BTRFS_READ_LOCK)
102 held_rw = BTRFS_READ_LOCK_BLOCKING;
104 btrfs_set_path_blocking(p);
107 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
108 if (p->nodes[i] && p->locks[i]) {
109 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
110 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
111 p->locks[i] = BTRFS_WRITE_LOCK;
112 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
113 p->locks[i] = BTRFS_READ_LOCK;
117 #ifdef CONFIG_DEBUG_LOCK_ALLOC
119 btrfs_clear_lock_blocking_rw(held, held_rw);
123 /* this also releases the path */
124 void btrfs_free_path(struct btrfs_path *p)
128 btrfs_release_path(p);
129 kmem_cache_free(btrfs_path_cachep, p);
133 * path release drops references on the extent buffers in the path
134 * and it drops any locks held by this path
136 * It is safe to call this on paths that no locks or extent buffers held.
138 noinline void btrfs_release_path(struct btrfs_path *p)
142 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
147 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
150 free_extent_buffer(p->nodes[i]);
156 * safely gets a reference on the root node of a tree. A lock
157 * is not taken, so a concurrent writer may put a different node
158 * at the root of the tree. See btrfs_lock_root_node for the
161 * The extent buffer returned by this has a reference taken, so
162 * it won't disappear. It may stop being the root of the tree
163 * at any time because there are no locks held.
165 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
167 struct extent_buffer *eb;
171 eb = rcu_dereference(root->node);
174 * RCU really hurts here, we could free up the root node because
175 * it was cow'ed but we may not get the new root node yet so do
176 * the inc_not_zero dance and if it doesn't work then
177 * synchronize_rcu and try again.
179 if (atomic_inc_not_zero(&eb->refs)) {
189 /* loop around taking references on and locking the root node of the
190 * tree until you end up with a lock on the root. A locked buffer
191 * is returned, with a reference held.
193 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
195 struct extent_buffer *eb;
198 eb = btrfs_root_node(root);
200 if (eb == root->node)
202 btrfs_tree_unlock(eb);
203 free_extent_buffer(eb);
208 /* loop around taking references on and locking the root node of the
209 * tree until you end up with a lock on the root. A locked buffer
210 * is returned, with a reference held.
212 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
214 struct extent_buffer *eb;
217 eb = btrfs_root_node(root);
218 btrfs_tree_read_lock(eb);
219 if (eb == root->node)
221 btrfs_tree_read_unlock(eb);
222 free_extent_buffer(eb);
227 /* cowonly root (everything not a reference counted cow subvolume), just get
228 * put onto a simple dirty list. transaction.c walks this to make sure they
229 * get properly updated on disk.
231 static void add_root_to_dirty_list(struct btrfs_root *root)
233 spin_lock(&root->fs_info->trans_lock);
234 if (root->track_dirty && list_empty(&root->dirty_list)) {
235 list_add(&root->dirty_list,
236 &root->fs_info->dirty_cowonly_roots);
238 spin_unlock(&root->fs_info->trans_lock);
242 * used by snapshot creation to make a copy of a root for a tree with
243 * a given objectid. The buffer with the new root node is returned in
244 * cow_ret, and this func returns zero on success or a negative error code.
246 int btrfs_copy_root(struct btrfs_trans_handle *trans,
247 struct btrfs_root *root,
248 struct extent_buffer *buf,
249 struct extent_buffer **cow_ret, u64 new_root_objectid)
251 struct extent_buffer *cow;
254 struct btrfs_disk_key disk_key;
256 WARN_ON(root->ref_cows && trans->transid !=
257 root->fs_info->running_transaction->transid);
258 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
260 level = btrfs_header_level(buf);
262 btrfs_item_key(buf, &disk_key, 0);
264 btrfs_node_key(buf, &disk_key, 0);
266 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
267 new_root_objectid, &disk_key, level,
272 copy_extent_buffer(cow, buf, 0, 0, cow->len);
273 btrfs_set_header_bytenr(cow, cow->start);
274 btrfs_set_header_generation(cow, trans->transid);
275 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
276 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
277 BTRFS_HEADER_FLAG_RELOC);
278 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
279 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
281 btrfs_set_header_owner(cow, new_root_objectid);
283 write_extent_buffer(cow, root->fs_info->fsid,
284 (unsigned long)btrfs_header_fsid(cow),
287 WARN_ON(btrfs_header_generation(buf) > trans->transid);
288 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
289 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
291 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
296 btrfs_mark_buffer_dirty(cow);
305 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
306 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
308 MOD_LOG_ROOT_REPLACE,
311 struct tree_mod_move {
316 struct tree_mod_root {
321 struct tree_mod_elem {
323 u64 index; /* shifted logical */
327 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
330 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
333 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
334 struct btrfs_disk_key key;
337 /* this is used for op == MOD_LOG_MOVE_KEYS */
338 struct tree_mod_move move;
340 /* this is used for op == MOD_LOG_ROOT_REPLACE */
341 struct tree_mod_root old_root;
344 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
346 read_lock(&fs_info->tree_mod_log_lock);
349 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
351 read_unlock(&fs_info->tree_mod_log_lock);
354 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
356 write_lock(&fs_info->tree_mod_log_lock);
359 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
361 write_unlock(&fs_info->tree_mod_log_lock);
365 * This adds a new blocker to the tree mod log's blocker list if the @elem
366 * passed does not already have a sequence number set. So when a caller expects
367 * to record tree modifications, it should ensure to set elem->seq to zero
368 * before calling btrfs_get_tree_mod_seq.
369 * Returns a fresh, unused tree log modification sequence number, even if no new
372 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
373 struct seq_list *elem)
377 tree_mod_log_write_lock(fs_info);
378 spin_lock(&fs_info->tree_mod_seq_lock);
380 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
381 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
383 seq = btrfs_inc_tree_mod_seq(fs_info);
384 spin_unlock(&fs_info->tree_mod_seq_lock);
385 tree_mod_log_write_unlock(fs_info);
390 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
391 struct seq_list *elem)
393 struct rb_root *tm_root;
394 struct rb_node *node;
395 struct rb_node *next;
396 struct seq_list *cur_elem;
397 struct tree_mod_elem *tm;
398 u64 min_seq = (u64)-1;
399 u64 seq_putting = elem->seq;
404 spin_lock(&fs_info->tree_mod_seq_lock);
405 list_del(&elem->list);
408 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
409 if (cur_elem->seq < min_seq) {
410 if (seq_putting > cur_elem->seq) {
412 * blocker with lower sequence number exists, we
413 * cannot remove anything from the log
415 spin_unlock(&fs_info->tree_mod_seq_lock);
418 min_seq = cur_elem->seq;
421 spin_unlock(&fs_info->tree_mod_seq_lock);
424 * anything that's lower than the lowest existing (read: blocked)
425 * sequence number can be removed from the tree.
427 tree_mod_log_write_lock(fs_info);
428 tm_root = &fs_info->tree_mod_log;
429 for (node = rb_first(tm_root); node; node = next) {
430 next = rb_next(node);
431 tm = container_of(node, struct tree_mod_elem, node);
432 if (tm->seq > min_seq)
434 rb_erase(node, tm_root);
437 tree_mod_log_write_unlock(fs_info);
441 * key order of the log:
444 * the index is the shifted logical of the *new* root node for root replace
445 * operations, or the shifted logical of the affected block for all other
449 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
451 struct rb_root *tm_root;
452 struct rb_node **new;
453 struct rb_node *parent = NULL;
454 struct tree_mod_elem *cur;
456 BUG_ON(!tm || !tm->seq);
458 tm_root = &fs_info->tree_mod_log;
459 new = &tm_root->rb_node;
461 cur = container_of(*new, struct tree_mod_elem, node);
463 if (cur->index < tm->index)
464 new = &((*new)->rb_left);
465 else if (cur->index > tm->index)
466 new = &((*new)->rb_right);
467 else if (cur->seq < tm->seq)
468 new = &((*new)->rb_left);
469 else if (cur->seq > tm->seq)
470 new = &((*new)->rb_right);
477 rb_link_node(&tm->node, parent, new);
478 rb_insert_color(&tm->node, tm_root);
483 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
484 * returns zero with the tree_mod_log_lock acquired. The caller must hold
485 * this until all tree mod log insertions are recorded in the rb tree and then
486 * call tree_mod_log_write_unlock() to release.
488 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
489 struct extent_buffer *eb) {
491 if (list_empty(&(fs_info)->tree_mod_seq_list))
493 if (eb && btrfs_header_level(eb) == 0)
496 tree_mod_log_write_lock(fs_info);
497 if (list_empty(&fs_info->tree_mod_seq_list)) {
499 * someone emptied the list while we were waiting for the lock.
500 * we must not add to the list when no blocker exists.
502 tree_mod_log_write_unlock(fs_info);
510 * This allocates memory and gets a tree modification sequence number.
512 * Returns <0 on error.
513 * Returns >0 (the added sequence number) on success.
515 static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
516 struct tree_mod_elem **tm_ret)
518 struct tree_mod_elem *tm;
521 * once we switch from spin locks to something different, we should
522 * honor the flags parameter here.
524 tm = *tm_ret = kzalloc(sizeof(*tm), GFP_ATOMIC);
528 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
533 __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
534 struct extent_buffer *eb, int slot,
535 enum mod_log_op op, gfp_t flags)
538 struct tree_mod_elem *tm;
540 ret = tree_mod_alloc(fs_info, flags, &tm);
544 tm->index = eb->start >> PAGE_CACHE_SHIFT;
545 if (op != MOD_LOG_KEY_ADD) {
546 btrfs_node_key(eb, &tm->key, slot);
547 tm->blockptr = btrfs_node_blockptr(eb, slot);
551 tm->generation = btrfs_node_ptr_generation(eb, slot);
553 return __tree_mod_log_insert(fs_info, tm);
557 tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
558 struct extent_buffer *eb, int slot,
559 enum mod_log_op op, gfp_t flags)
563 if (tree_mod_dont_log(fs_info, eb))
566 ret = __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
568 tree_mod_log_write_unlock(fs_info);
573 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
574 int slot, enum mod_log_op op)
576 return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
580 tree_mod_log_insert_key_locked(struct btrfs_fs_info *fs_info,
581 struct extent_buffer *eb, int slot,
584 return __tree_mod_log_insert_key(fs_info, eb, slot, op, GFP_NOFS);
588 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
589 struct extent_buffer *eb, int dst_slot, int src_slot,
590 int nr_items, gfp_t flags)
592 struct tree_mod_elem *tm;
596 if (tree_mod_dont_log(fs_info, eb))
600 * When we override something during the move, we log these removals.
601 * This can only happen when we move towards the beginning of the
602 * buffer, i.e. dst_slot < src_slot.
604 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
605 ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
606 MOD_LOG_KEY_REMOVE_WHILE_MOVING);
610 ret = tree_mod_alloc(fs_info, flags, &tm);
614 tm->index = eb->start >> PAGE_CACHE_SHIFT;
616 tm->move.dst_slot = dst_slot;
617 tm->move.nr_items = nr_items;
618 tm->op = MOD_LOG_MOVE_KEYS;
620 ret = __tree_mod_log_insert(fs_info, tm);
622 tree_mod_log_write_unlock(fs_info);
627 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
633 if (btrfs_header_level(eb) == 0)
636 nritems = btrfs_header_nritems(eb);
637 for (i = nritems - 1; i >= 0; i--) {
638 ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
639 MOD_LOG_KEY_REMOVE_WHILE_FREEING);
645 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
646 struct extent_buffer *old_root,
647 struct extent_buffer *new_root, gfp_t flags)
649 struct tree_mod_elem *tm;
652 if (tree_mod_dont_log(fs_info, NULL))
655 ret = tree_mod_alloc(fs_info, flags, &tm);
659 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
660 tm->old_root.logical = old_root->start;
661 tm->old_root.level = btrfs_header_level(old_root);
662 tm->generation = btrfs_header_generation(old_root);
663 tm->op = MOD_LOG_ROOT_REPLACE;
665 ret = __tree_mod_log_insert(fs_info, tm);
667 tree_mod_log_write_unlock(fs_info);
671 static struct tree_mod_elem *
672 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
675 struct rb_root *tm_root;
676 struct rb_node *node;
677 struct tree_mod_elem *cur = NULL;
678 struct tree_mod_elem *found = NULL;
679 u64 index = start >> PAGE_CACHE_SHIFT;
681 tree_mod_log_read_lock(fs_info);
682 tm_root = &fs_info->tree_mod_log;
683 node = tm_root->rb_node;
685 cur = container_of(node, struct tree_mod_elem, node);
686 if (cur->index < index) {
687 node = node->rb_left;
688 } else if (cur->index > index) {
689 node = node->rb_right;
690 } else if (cur->seq < min_seq) {
691 node = node->rb_left;
692 } else if (!smallest) {
693 /* we want the node with the highest seq */
695 BUG_ON(found->seq > cur->seq);
697 node = node->rb_left;
698 } else if (cur->seq > min_seq) {
699 /* we want the node with the smallest seq */
701 BUG_ON(found->seq < cur->seq);
703 node = node->rb_right;
709 tree_mod_log_read_unlock(fs_info);
715 * this returns the element from the log with the smallest time sequence
716 * value that's in the log (the oldest log item). any element with a time
717 * sequence lower than min_seq will be ignored.
719 static struct tree_mod_elem *
720 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
723 return __tree_mod_log_search(fs_info, start, min_seq, 1);
727 * this returns the element from the log with the largest time sequence
728 * value that's in the log (the most recent log item). any element with
729 * a time sequence lower than min_seq will be ignored.
731 static struct tree_mod_elem *
732 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
734 return __tree_mod_log_search(fs_info, start, min_seq, 0);
738 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
739 struct extent_buffer *src, unsigned long dst_offset,
740 unsigned long src_offset, int nr_items)
745 if (tree_mod_dont_log(fs_info, NULL))
748 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) {
749 tree_mod_log_write_unlock(fs_info);
753 for (i = 0; i < nr_items; i++) {
754 ret = tree_mod_log_insert_key_locked(fs_info, src,
758 ret = tree_mod_log_insert_key_locked(fs_info, dst,
764 tree_mod_log_write_unlock(fs_info);
768 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
769 int dst_offset, int src_offset, int nr_items)
772 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
778 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
779 struct extent_buffer *eb,
780 struct btrfs_disk_key *disk_key, int slot, int atomic)
784 ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
786 atomic ? GFP_ATOMIC : GFP_NOFS);
791 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
793 if (tree_mod_dont_log(fs_info, eb))
796 __tree_mod_log_free_eb(fs_info, eb);
798 tree_mod_log_write_unlock(fs_info);
802 tree_mod_log_set_root_pointer(struct btrfs_root *root,
803 struct extent_buffer *new_root_node)
806 ret = tree_mod_log_insert_root(root->fs_info, root->node,
807 new_root_node, GFP_NOFS);
812 * check if the tree block can be shared by multiple trees
814 int btrfs_block_can_be_shared(struct btrfs_root *root,
815 struct extent_buffer *buf)
818 * Tree blocks not in refernece counted trees and tree roots
819 * are never shared. If a block was allocated after the last
820 * snapshot and the block was not allocated by tree relocation,
821 * we know the block is not shared.
823 if (root->ref_cows &&
824 buf != root->node && buf != root->commit_root &&
825 (btrfs_header_generation(buf) <=
826 btrfs_root_last_snapshot(&root->root_item) ||
827 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
829 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
830 if (root->ref_cows &&
831 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
837 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
838 struct btrfs_root *root,
839 struct extent_buffer *buf,
840 struct extent_buffer *cow,
850 * Backrefs update rules:
852 * Always use full backrefs for extent pointers in tree block
853 * allocated by tree relocation.
855 * If a shared tree block is no longer referenced by its owner
856 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
857 * use full backrefs for extent pointers in tree block.
859 * If a tree block is been relocating
860 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
861 * use full backrefs for extent pointers in tree block.
862 * The reason for this is some operations (such as drop tree)
863 * are only allowed for blocks use full backrefs.
866 if (btrfs_block_can_be_shared(root, buf)) {
867 ret = btrfs_lookup_extent_info(trans, root, buf->start,
868 buf->len, &refs, &flags);
873 btrfs_std_error(root->fs_info, ret);
878 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
879 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
880 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
885 owner = btrfs_header_owner(buf);
886 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
887 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
890 if ((owner == root->root_key.objectid ||
891 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
892 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
893 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
894 BUG_ON(ret); /* -ENOMEM */
896 if (root->root_key.objectid ==
897 BTRFS_TREE_RELOC_OBJECTID) {
898 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
899 BUG_ON(ret); /* -ENOMEM */
900 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
901 BUG_ON(ret); /* -ENOMEM */
903 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
906 if (root->root_key.objectid ==
907 BTRFS_TREE_RELOC_OBJECTID)
908 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
910 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
911 BUG_ON(ret); /* -ENOMEM */
913 if (new_flags != 0) {
914 ret = btrfs_set_disk_extent_flags(trans, root,
922 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
923 if (root->root_key.objectid ==
924 BTRFS_TREE_RELOC_OBJECTID)
925 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
927 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
928 BUG_ON(ret); /* -ENOMEM */
929 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
930 BUG_ON(ret); /* -ENOMEM */
932 tree_mod_log_free_eb(root->fs_info, buf);
933 clean_tree_block(trans, root, buf);
940 * does the dirty work in cow of a single block. The parent block (if
941 * supplied) is updated to point to the new cow copy. The new buffer is marked
942 * dirty and returned locked. If you modify the block it needs to be marked
945 * search_start -- an allocation hint for the new block
947 * empty_size -- a hint that you plan on doing more cow. This is the size in
948 * bytes the allocator should try to find free next to the block it returns.
949 * This is just a hint and may be ignored by the allocator.
951 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
952 struct btrfs_root *root,
953 struct extent_buffer *buf,
954 struct extent_buffer *parent, int parent_slot,
955 struct extent_buffer **cow_ret,
956 u64 search_start, u64 empty_size)
958 struct btrfs_disk_key disk_key;
959 struct extent_buffer *cow;
968 btrfs_assert_tree_locked(buf);
970 WARN_ON(root->ref_cows && trans->transid !=
971 root->fs_info->running_transaction->transid);
972 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
974 level = btrfs_header_level(buf);
977 btrfs_item_key(buf, &disk_key, 0);
979 btrfs_node_key(buf, &disk_key, 0);
981 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
983 parent_start = parent->start;
989 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
990 root->root_key.objectid, &disk_key,
991 level, search_start, empty_size);
995 /* cow is set to blocking by btrfs_init_new_buffer */
997 copy_extent_buffer(cow, buf, 0, 0, cow->len);
998 btrfs_set_header_bytenr(cow, cow->start);
999 btrfs_set_header_generation(cow, trans->transid);
1000 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1001 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1002 BTRFS_HEADER_FLAG_RELOC);
1003 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1004 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1006 btrfs_set_header_owner(cow, root->root_key.objectid);
1008 write_extent_buffer(cow, root->fs_info->fsid,
1009 (unsigned long)btrfs_header_fsid(cow),
1012 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1014 btrfs_abort_transaction(trans, root, ret);
1019 btrfs_reloc_cow_block(trans, root, buf, cow);
1021 if (buf == root->node) {
1022 WARN_ON(parent && parent != buf);
1023 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1024 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1025 parent_start = buf->start;
1029 extent_buffer_get(cow);
1030 tree_mod_log_set_root_pointer(root, cow);
1031 rcu_assign_pointer(root->node, cow);
1033 btrfs_free_tree_block(trans, root, buf, parent_start,
1035 free_extent_buffer(buf);
1036 add_root_to_dirty_list(root);
1038 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1039 parent_start = parent->start;
1043 WARN_ON(trans->transid != btrfs_header_generation(parent));
1044 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1045 MOD_LOG_KEY_REPLACE);
1046 btrfs_set_node_blockptr(parent, parent_slot,
1048 btrfs_set_node_ptr_generation(parent, parent_slot,
1050 btrfs_mark_buffer_dirty(parent);
1051 btrfs_free_tree_block(trans, root, buf, parent_start,
1055 btrfs_tree_unlock(buf);
1056 free_extent_buffer_stale(buf);
1057 btrfs_mark_buffer_dirty(cow);
1063 * returns the logical address of the oldest predecessor of the given root.
1064 * entries older than time_seq are ignored.
1066 static struct tree_mod_elem *
1067 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1068 struct btrfs_root *root, u64 time_seq)
1070 struct tree_mod_elem *tm;
1071 struct tree_mod_elem *found = NULL;
1072 u64 root_logical = root->node->start;
1079 * the very last operation that's logged for a root is the replacement
1080 * operation (if it is replaced at all). this has the index of the *new*
1081 * root, making it the very first operation that's logged for this root.
1084 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1089 * if there are no tree operation for the oldest root, we simply
1090 * return it. this should only happen if that (old) root is at
1097 * if there's an operation that's not a root replacement, we
1098 * found the oldest version of our root. normally, we'll find a
1099 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1101 if (tm->op != MOD_LOG_ROOT_REPLACE)
1105 root_logical = tm->old_root.logical;
1106 BUG_ON(root_logical == root->node->start);
1110 /* if there's no old root to return, return what we found instead */
1118 * tm is a pointer to the first operation to rewind within eb. then, all
1119 * previous operations will be rewinded (until we reach something older than
1123 __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
1124 struct tree_mod_elem *first_tm)
1127 struct rb_node *next;
1128 struct tree_mod_elem *tm = first_tm;
1129 unsigned long o_dst;
1130 unsigned long o_src;
1131 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1133 n = btrfs_header_nritems(eb);
1134 while (tm && tm->seq >= time_seq) {
1136 * all the operations are recorded with the operator used for
1137 * the modification. as we're going backwards, we do the
1138 * opposite of each operation here.
1141 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1142 BUG_ON(tm->slot < n);
1143 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1144 case MOD_LOG_KEY_REMOVE:
1145 btrfs_set_node_key(eb, &tm->key, tm->slot);
1146 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1147 btrfs_set_node_ptr_generation(eb, tm->slot,
1151 case MOD_LOG_KEY_REPLACE:
1152 BUG_ON(tm->slot >= n);
1153 btrfs_set_node_key(eb, &tm->key, tm->slot);
1154 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1155 btrfs_set_node_ptr_generation(eb, tm->slot,
1158 case MOD_LOG_KEY_ADD:
1159 /* if a move operation is needed it's in the log */
1162 case MOD_LOG_MOVE_KEYS:
1163 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1164 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1165 memmove_extent_buffer(eb, o_dst, o_src,
1166 tm->move.nr_items * p_size);
1168 case MOD_LOG_ROOT_REPLACE:
1170 * this operation is special. for roots, this must be
1171 * handled explicitly before rewinding.
1172 * for non-roots, this operation may exist if the node
1173 * was a root: root A -> child B; then A gets empty and
1174 * B is promoted to the new root. in the mod log, we'll
1175 * have a root-replace operation for B, a tree block
1176 * that is no root. we simply ignore that operation.
1180 next = rb_next(&tm->node);
1183 tm = container_of(next, struct tree_mod_elem, node);
1184 if (tm->index != first_tm->index)
1187 btrfs_set_header_nritems(eb, n);
1190 static struct extent_buffer *
1191 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1194 struct extent_buffer *eb_rewin;
1195 struct tree_mod_elem *tm;
1200 if (btrfs_header_level(eb) == 0)
1203 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1207 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1208 BUG_ON(tm->slot != 0);
1209 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1210 fs_info->tree_root->nodesize);
1212 btrfs_set_header_bytenr(eb_rewin, eb->start);
1213 btrfs_set_header_backref_rev(eb_rewin,
1214 btrfs_header_backref_rev(eb));
1215 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1216 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1218 eb_rewin = btrfs_clone_extent_buffer(eb);
1222 extent_buffer_get(eb_rewin);
1223 free_extent_buffer(eb);
1225 __tree_mod_log_rewind(eb_rewin, time_seq, tm);
1226 WARN_ON(btrfs_header_nritems(eb_rewin) >
1227 BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root));
1233 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1234 * value. If there are no changes, the current root->root_node is returned. If
1235 * anything changed in between, there's a fresh buffer allocated on which the
1236 * rewind operations are done. In any case, the returned buffer is read locked.
1237 * Returns NULL on error (with no locks held).
1239 static inline struct extent_buffer *
1240 get_old_root(struct btrfs_root *root, u64 time_seq)
1242 struct tree_mod_elem *tm;
1243 struct extent_buffer *eb;
1244 struct tree_mod_root *old_root = NULL;
1245 u64 old_generation = 0;
1249 eb = btrfs_read_lock_root_node(root);
1250 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
1254 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1255 old_root = &tm->old_root;
1256 old_generation = tm->generation;
1257 logical = old_root->logical;
1259 logical = root->node->start;
1262 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1263 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1264 btrfs_tree_read_unlock(root->node);
1265 free_extent_buffer(root->node);
1266 blocksize = btrfs_level_size(root, old_root->level);
1267 eb = read_tree_block(root, logical, blocksize, 0);
1269 pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1273 eb = btrfs_clone_extent_buffer(eb);
1275 } else if (old_root) {
1276 btrfs_tree_read_unlock(root->node);
1277 free_extent_buffer(root->node);
1278 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1280 eb = btrfs_clone_extent_buffer(root->node);
1281 btrfs_tree_read_unlock(root->node);
1282 free_extent_buffer(root->node);
1287 extent_buffer_get(eb);
1288 btrfs_tree_read_lock(eb);
1290 btrfs_set_header_bytenr(eb, eb->start);
1291 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1292 btrfs_set_header_owner(eb, root->root_key.objectid);
1293 btrfs_set_header_level(eb, old_root->level);
1294 btrfs_set_header_generation(eb, old_generation);
1297 __tree_mod_log_rewind(eb, time_seq, tm);
1299 WARN_ON(btrfs_header_level(eb) != 0);
1300 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1305 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1307 struct tree_mod_elem *tm;
1310 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
1311 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1312 level = tm->old_root.level;
1315 level = btrfs_header_level(root->node);
1322 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1323 struct btrfs_root *root,
1324 struct extent_buffer *buf)
1326 /* ensure we can see the force_cow */
1330 * We do not need to cow a block if
1331 * 1) this block is not created or changed in this transaction;
1332 * 2) this block does not belong to TREE_RELOC tree;
1333 * 3) the root is not forced COW.
1335 * What is forced COW:
1336 * when we create snapshot during commiting the transaction,
1337 * after we've finished coping src root, we must COW the shared
1338 * block to ensure the metadata consistency.
1340 if (btrfs_header_generation(buf) == trans->transid &&
1341 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1342 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1343 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1350 * cows a single block, see __btrfs_cow_block for the real work.
1351 * This version of it has extra checks so that a block isn't cow'd more than
1352 * once per transaction, as long as it hasn't been written yet
1354 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1355 struct btrfs_root *root, struct extent_buffer *buf,
1356 struct extent_buffer *parent, int parent_slot,
1357 struct extent_buffer **cow_ret)
1362 if (trans->transaction != root->fs_info->running_transaction) {
1363 printk(KERN_CRIT "trans %llu running %llu\n",
1364 (unsigned long long)trans->transid,
1365 (unsigned long long)
1366 root->fs_info->running_transaction->transid);
1369 if (trans->transid != root->fs_info->generation) {
1370 printk(KERN_CRIT "trans %llu running %llu\n",
1371 (unsigned long long)trans->transid,
1372 (unsigned long long)root->fs_info->generation);
1376 if (!should_cow_block(trans, root, buf)) {
1381 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1384 btrfs_set_lock_blocking(parent);
1385 btrfs_set_lock_blocking(buf);
1387 ret = __btrfs_cow_block(trans, root, buf, parent,
1388 parent_slot, cow_ret, search_start, 0);
1390 trace_btrfs_cow_block(root, buf, *cow_ret);
1396 * helper function for defrag to decide if two blocks pointed to by a
1397 * node are actually close by
1399 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1401 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1403 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1409 * compare two keys in a memcmp fashion
1411 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1413 struct btrfs_key k1;
1415 btrfs_disk_key_to_cpu(&k1, disk);
1417 return btrfs_comp_cpu_keys(&k1, k2);
1421 * same as comp_keys only with two btrfs_key's
1423 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1425 if (k1->objectid > k2->objectid)
1427 if (k1->objectid < k2->objectid)
1429 if (k1->type > k2->type)
1431 if (k1->type < k2->type)
1433 if (k1->offset > k2->offset)
1435 if (k1->offset < k2->offset)
1441 * this is used by the defrag code to go through all the
1442 * leaves pointed to by a node and reallocate them so that
1443 * disk order is close to key order
1445 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1446 struct btrfs_root *root, struct extent_buffer *parent,
1447 int start_slot, int cache_only, u64 *last_ret,
1448 struct btrfs_key *progress)
1450 struct extent_buffer *cur;
1453 u64 search_start = *last_ret;
1463 int progress_passed = 0;
1464 struct btrfs_disk_key disk_key;
1466 parent_level = btrfs_header_level(parent);
1467 if (cache_only && parent_level != 1)
1470 if (trans->transaction != root->fs_info->running_transaction)
1472 if (trans->transid != root->fs_info->generation)
1475 parent_nritems = btrfs_header_nritems(parent);
1476 blocksize = btrfs_level_size(root, parent_level - 1);
1477 end_slot = parent_nritems;
1479 if (parent_nritems == 1)
1482 btrfs_set_lock_blocking(parent);
1484 for (i = start_slot; i < end_slot; i++) {
1487 btrfs_node_key(parent, &disk_key, i);
1488 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1491 progress_passed = 1;
1492 blocknr = btrfs_node_blockptr(parent, i);
1493 gen = btrfs_node_ptr_generation(parent, i);
1494 if (last_block == 0)
1495 last_block = blocknr;
1498 other = btrfs_node_blockptr(parent, i - 1);
1499 close = close_blocks(blocknr, other, blocksize);
1501 if (!close && i < end_slot - 2) {
1502 other = btrfs_node_blockptr(parent, i + 1);
1503 close = close_blocks(blocknr, other, blocksize);
1506 last_block = blocknr;
1510 cur = btrfs_find_tree_block(root, blocknr, blocksize);
1512 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1515 if (!cur || !uptodate) {
1517 free_extent_buffer(cur);
1521 cur = read_tree_block(root, blocknr,
1525 } else if (!uptodate) {
1526 err = btrfs_read_buffer(cur, gen);
1528 free_extent_buffer(cur);
1533 if (search_start == 0)
1534 search_start = last_block;
1536 btrfs_tree_lock(cur);
1537 btrfs_set_lock_blocking(cur);
1538 err = __btrfs_cow_block(trans, root, cur, parent, i,
1541 (end_slot - i) * blocksize));
1543 btrfs_tree_unlock(cur);
1544 free_extent_buffer(cur);
1547 search_start = cur->start;
1548 last_block = cur->start;
1549 *last_ret = search_start;
1550 btrfs_tree_unlock(cur);
1551 free_extent_buffer(cur);
1557 * The leaf data grows from end-to-front in the node.
1558 * this returns the address of the start of the last item,
1559 * which is the stop of the leaf data stack
1561 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1562 struct extent_buffer *leaf)
1564 u32 nr = btrfs_header_nritems(leaf);
1566 return BTRFS_LEAF_DATA_SIZE(root);
1567 return btrfs_item_offset_nr(leaf, nr - 1);
1572 * search for key in the extent_buffer. The items start at offset p,
1573 * and they are item_size apart. There are 'max' items in p.
1575 * the slot in the array is returned via slot, and it points to
1576 * the place where you would insert key if it is not found in
1579 * slot may point to max if the key is bigger than all of the keys
1581 static noinline int generic_bin_search(struct extent_buffer *eb,
1583 int item_size, struct btrfs_key *key,
1590 struct btrfs_disk_key *tmp = NULL;
1591 struct btrfs_disk_key unaligned;
1592 unsigned long offset;
1594 unsigned long map_start = 0;
1595 unsigned long map_len = 0;
1598 while (low < high) {
1599 mid = (low + high) / 2;
1600 offset = p + mid * item_size;
1602 if (!kaddr || offset < map_start ||
1603 (offset + sizeof(struct btrfs_disk_key)) >
1604 map_start + map_len) {
1606 err = map_private_extent_buffer(eb, offset,
1607 sizeof(struct btrfs_disk_key),
1608 &kaddr, &map_start, &map_len);
1611 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1614 read_extent_buffer(eb, &unaligned,
1615 offset, sizeof(unaligned));
1620 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1623 ret = comp_keys(tmp, key);
1639 * simple bin_search frontend that does the right thing for
1642 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1643 int level, int *slot)
1646 return generic_bin_search(eb,
1647 offsetof(struct btrfs_leaf, items),
1648 sizeof(struct btrfs_item),
1649 key, btrfs_header_nritems(eb),
1652 return generic_bin_search(eb,
1653 offsetof(struct btrfs_node, ptrs),
1654 sizeof(struct btrfs_key_ptr),
1655 key, btrfs_header_nritems(eb),
1659 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1660 int level, int *slot)
1662 return bin_search(eb, key, level, slot);
1665 static void root_add_used(struct btrfs_root *root, u32 size)
1667 spin_lock(&root->accounting_lock);
1668 btrfs_set_root_used(&root->root_item,
1669 btrfs_root_used(&root->root_item) + size);
1670 spin_unlock(&root->accounting_lock);
1673 static void root_sub_used(struct btrfs_root *root, u32 size)
1675 spin_lock(&root->accounting_lock);
1676 btrfs_set_root_used(&root->root_item,
1677 btrfs_root_used(&root->root_item) - size);
1678 spin_unlock(&root->accounting_lock);
1681 /* given a node and slot number, this reads the blocks it points to. The
1682 * extent buffer is returned with a reference taken (but unlocked).
1683 * NULL is returned on error.
1685 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1686 struct extent_buffer *parent, int slot)
1688 int level = btrfs_header_level(parent);
1691 if (slot >= btrfs_header_nritems(parent))
1696 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
1697 btrfs_level_size(root, level - 1),
1698 btrfs_node_ptr_generation(parent, slot));
1702 * node level balancing, used to make sure nodes are in proper order for
1703 * item deletion. We balance from the top down, so we have to make sure
1704 * that a deletion won't leave an node completely empty later on.
1706 static noinline int balance_level(struct btrfs_trans_handle *trans,
1707 struct btrfs_root *root,
1708 struct btrfs_path *path, int level)
1710 struct extent_buffer *right = NULL;
1711 struct extent_buffer *mid;
1712 struct extent_buffer *left = NULL;
1713 struct extent_buffer *parent = NULL;
1717 int orig_slot = path->slots[level];
1723 mid = path->nodes[level];
1725 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1726 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1727 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1729 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1731 if (level < BTRFS_MAX_LEVEL - 1) {
1732 parent = path->nodes[level + 1];
1733 pslot = path->slots[level + 1];
1737 * deal with the case where there is only one pointer in the root
1738 * by promoting the node below to a root
1741 struct extent_buffer *child;
1743 if (btrfs_header_nritems(mid) != 1)
1746 /* promote the child to a root */
1747 child = read_node_slot(root, mid, 0);
1750 btrfs_std_error(root->fs_info, ret);
1754 btrfs_tree_lock(child);
1755 btrfs_set_lock_blocking(child);
1756 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1758 btrfs_tree_unlock(child);
1759 free_extent_buffer(child);
1763 tree_mod_log_free_eb(root->fs_info, root->node);
1764 tree_mod_log_set_root_pointer(root, child);
1765 rcu_assign_pointer(root->node, child);
1767 add_root_to_dirty_list(root);
1768 btrfs_tree_unlock(child);
1770 path->locks[level] = 0;
1771 path->nodes[level] = NULL;
1772 clean_tree_block(trans, root, mid);
1773 btrfs_tree_unlock(mid);
1774 /* once for the path */
1775 free_extent_buffer(mid);
1777 root_sub_used(root, mid->len);
1778 btrfs_free_tree_block(trans, root, mid, 0, 1);
1779 /* once for the root ptr */
1780 free_extent_buffer_stale(mid);
1783 if (btrfs_header_nritems(mid) >
1784 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1787 left = read_node_slot(root, parent, pslot - 1);
1789 btrfs_tree_lock(left);
1790 btrfs_set_lock_blocking(left);
1791 wret = btrfs_cow_block(trans, root, left,
1792 parent, pslot - 1, &left);
1798 right = read_node_slot(root, parent, pslot + 1);
1800 btrfs_tree_lock(right);
1801 btrfs_set_lock_blocking(right);
1802 wret = btrfs_cow_block(trans, root, right,
1803 parent, pslot + 1, &right);
1810 /* first, try to make some room in the middle buffer */
1812 orig_slot += btrfs_header_nritems(left);
1813 wret = push_node_left(trans, root, left, mid, 1);
1819 * then try to empty the right most buffer into the middle
1822 wret = push_node_left(trans, root, mid, right, 1);
1823 if (wret < 0 && wret != -ENOSPC)
1825 if (btrfs_header_nritems(right) == 0) {
1826 clean_tree_block(trans, root, right);
1827 btrfs_tree_unlock(right);
1828 del_ptr(trans, root, path, level + 1, pslot + 1, 1);
1829 root_sub_used(root, right->len);
1830 btrfs_free_tree_block(trans, root, right, 0, 1);
1831 free_extent_buffer_stale(right);
1834 struct btrfs_disk_key right_key;
1835 btrfs_node_key(right, &right_key, 0);
1836 tree_mod_log_set_node_key(root->fs_info, parent,
1837 &right_key, pslot + 1, 0);
1838 btrfs_set_node_key(parent, &right_key, pslot + 1);
1839 btrfs_mark_buffer_dirty(parent);
1842 if (btrfs_header_nritems(mid) == 1) {
1844 * we're not allowed to leave a node with one item in the
1845 * tree during a delete. A deletion from lower in the tree
1846 * could try to delete the only pointer in this node.
1847 * So, pull some keys from the left.
1848 * There has to be a left pointer at this point because
1849 * otherwise we would have pulled some pointers from the
1854 btrfs_std_error(root->fs_info, ret);
1857 wret = balance_node_right(trans, root, mid, left);
1863 wret = push_node_left(trans, root, left, mid, 1);
1869 if (btrfs_header_nritems(mid) == 0) {
1870 clean_tree_block(trans, root, mid);
1871 btrfs_tree_unlock(mid);
1872 del_ptr(trans, root, path, level + 1, pslot, 1);
1873 root_sub_used(root, mid->len);
1874 btrfs_free_tree_block(trans, root, mid, 0, 1);
1875 free_extent_buffer_stale(mid);
1878 /* update the parent key to reflect our changes */
1879 struct btrfs_disk_key mid_key;
1880 btrfs_node_key(mid, &mid_key, 0);
1881 tree_mod_log_set_node_key(root->fs_info, parent, &mid_key,
1883 btrfs_set_node_key(parent, &mid_key, pslot);
1884 btrfs_mark_buffer_dirty(parent);
1887 /* update the path */
1889 if (btrfs_header_nritems(left) > orig_slot) {
1890 extent_buffer_get(left);
1891 /* left was locked after cow */
1892 path->nodes[level] = left;
1893 path->slots[level + 1] -= 1;
1894 path->slots[level] = orig_slot;
1896 btrfs_tree_unlock(mid);
1897 free_extent_buffer(mid);
1900 orig_slot -= btrfs_header_nritems(left);
1901 path->slots[level] = orig_slot;
1904 /* double check we haven't messed things up */
1906 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1910 btrfs_tree_unlock(right);
1911 free_extent_buffer(right);
1914 if (path->nodes[level] != left)
1915 btrfs_tree_unlock(left);
1916 free_extent_buffer(left);
1921 /* Node balancing for insertion. Here we only split or push nodes around
1922 * when they are completely full. This is also done top down, so we
1923 * have to be pessimistic.
1925 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1926 struct btrfs_root *root,
1927 struct btrfs_path *path, int level)
1929 struct extent_buffer *right = NULL;
1930 struct extent_buffer *mid;
1931 struct extent_buffer *left = NULL;
1932 struct extent_buffer *parent = NULL;
1936 int orig_slot = path->slots[level];
1941 mid = path->nodes[level];
1942 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1944 if (level < BTRFS_MAX_LEVEL - 1) {
1945 parent = path->nodes[level + 1];
1946 pslot = path->slots[level + 1];
1952 left = read_node_slot(root, parent, pslot - 1);
1954 /* first, try to make some room in the middle buffer */
1958 btrfs_tree_lock(left);
1959 btrfs_set_lock_blocking(left);
1961 left_nr = btrfs_header_nritems(left);
1962 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1965 ret = btrfs_cow_block(trans, root, left, parent,
1970 wret = push_node_left(trans, root,
1977 struct btrfs_disk_key disk_key;
1978 orig_slot += left_nr;
1979 btrfs_node_key(mid, &disk_key, 0);
1980 tree_mod_log_set_node_key(root->fs_info, parent,
1981 &disk_key, pslot, 0);
1982 btrfs_set_node_key(parent, &disk_key, pslot);
1983 btrfs_mark_buffer_dirty(parent);
1984 if (btrfs_header_nritems(left) > orig_slot) {
1985 path->nodes[level] = left;
1986 path->slots[level + 1] -= 1;
1987 path->slots[level] = orig_slot;
1988 btrfs_tree_unlock(mid);
1989 free_extent_buffer(mid);
1992 btrfs_header_nritems(left);
1993 path->slots[level] = orig_slot;
1994 btrfs_tree_unlock(left);
1995 free_extent_buffer(left);
1999 btrfs_tree_unlock(left);
2000 free_extent_buffer(left);
2002 right = read_node_slot(root, parent, pslot + 1);
2005 * then try to empty the right most buffer into the middle
2010 btrfs_tree_lock(right);
2011 btrfs_set_lock_blocking(right);
2013 right_nr = btrfs_header_nritems(right);
2014 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2017 ret = btrfs_cow_block(trans, root, right,
2023 wret = balance_node_right(trans, root,
2030 struct btrfs_disk_key disk_key;
2032 btrfs_node_key(right, &disk_key, 0);
2033 tree_mod_log_set_node_key(root->fs_info, parent,
2034 &disk_key, pslot + 1, 0);
2035 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2036 btrfs_mark_buffer_dirty(parent);
2038 if (btrfs_header_nritems(mid) <= orig_slot) {
2039 path->nodes[level] = right;
2040 path->slots[level + 1] += 1;
2041 path->slots[level] = orig_slot -
2042 btrfs_header_nritems(mid);
2043 btrfs_tree_unlock(mid);
2044 free_extent_buffer(mid);
2046 btrfs_tree_unlock(right);
2047 free_extent_buffer(right);
2051 btrfs_tree_unlock(right);
2052 free_extent_buffer(right);
2058 * readahead one full node of leaves, finding things that are close
2059 * to the block in 'slot', and triggering ra on them.
2061 static void reada_for_search(struct btrfs_root *root,
2062 struct btrfs_path *path,
2063 int level, int slot, u64 objectid)
2065 struct extent_buffer *node;
2066 struct btrfs_disk_key disk_key;
2072 int direction = path->reada;
2073 struct extent_buffer *eb;
2081 if (!path->nodes[level])
2084 node = path->nodes[level];
2086 search = btrfs_node_blockptr(node, slot);
2087 blocksize = btrfs_level_size(root, level - 1);
2088 eb = btrfs_find_tree_block(root, search, blocksize);
2090 free_extent_buffer(eb);
2096 nritems = btrfs_header_nritems(node);
2100 if (direction < 0) {
2104 } else if (direction > 0) {
2109 if (path->reada < 0 && objectid) {
2110 btrfs_node_key(node, &disk_key, nr);
2111 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2114 search = btrfs_node_blockptr(node, nr);
2115 if ((search <= target && target - search <= 65536) ||
2116 (search > target && search - target <= 65536)) {
2117 gen = btrfs_node_ptr_generation(node, nr);
2118 readahead_tree_block(root, search, blocksize, gen);
2122 if ((nread > 65536 || nscan > 32))
2128 * returns -EAGAIN if it had to drop the path, or zero if everything was in
2131 static noinline int reada_for_balance(struct btrfs_root *root,
2132 struct btrfs_path *path, int level)
2136 struct extent_buffer *parent;
2137 struct extent_buffer *eb;
2144 parent = path->nodes[level + 1];
2148 nritems = btrfs_header_nritems(parent);
2149 slot = path->slots[level + 1];
2150 blocksize = btrfs_level_size(root, level);
2153 block1 = btrfs_node_blockptr(parent, slot - 1);
2154 gen = btrfs_node_ptr_generation(parent, slot - 1);
2155 eb = btrfs_find_tree_block(root, block1, blocksize);
2157 * if we get -eagain from btrfs_buffer_uptodate, we
2158 * don't want to return eagain here. That will loop
2161 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2163 free_extent_buffer(eb);
2165 if (slot + 1 < nritems) {
2166 block2 = btrfs_node_blockptr(parent, slot + 1);
2167 gen = btrfs_node_ptr_generation(parent, slot + 1);
2168 eb = btrfs_find_tree_block(root, block2, blocksize);
2169 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2171 free_extent_buffer(eb);
2173 if (block1 || block2) {
2176 /* release the whole path */
2177 btrfs_release_path(path);
2179 /* read the blocks */
2181 readahead_tree_block(root, block1, blocksize, 0);
2183 readahead_tree_block(root, block2, blocksize, 0);
2186 eb = read_tree_block(root, block1, blocksize, 0);
2187 free_extent_buffer(eb);
2190 eb = read_tree_block(root, block2, blocksize, 0);
2191 free_extent_buffer(eb);
2199 * when we walk down the tree, it is usually safe to unlock the higher layers
2200 * in the tree. The exceptions are when our path goes through slot 0, because
2201 * operations on the tree might require changing key pointers higher up in the
2204 * callers might also have set path->keep_locks, which tells this code to keep
2205 * the lock if the path points to the last slot in the block. This is part of
2206 * walking through the tree, and selecting the next slot in the higher block.
2208 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2209 * if lowest_unlock is 1, level 0 won't be unlocked
2211 static noinline void unlock_up(struct btrfs_path *path, int level,
2212 int lowest_unlock, int min_write_lock_level,
2213 int *write_lock_level)
2216 int skip_level = level;
2218 struct extent_buffer *t;
2220 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2221 if (!path->nodes[i])
2223 if (!path->locks[i])
2225 if (!no_skips && path->slots[i] == 0) {
2229 if (!no_skips && path->keep_locks) {
2232 nritems = btrfs_header_nritems(t);
2233 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2238 if (skip_level < i && i >= lowest_unlock)
2242 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2243 btrfs_tree_unlock_rw(t, path->locks[i]);
2245 if (write_lock_level &&
2246 i > min_write_lock_level &&
2247 i <= *write_lock_level) {
2248 *write_lock_level = i - 1;
2255 * This releases any locks held in the path starting at level and
2256 * going all the way up to the root.
2258 * btrfs_search_slot will keep the lock held on higher nodes in a few
2259 * corner cases, such as COW of the block at slot zero in the node. This
2260 * ignores those rules, and it should only be called when there are no
2261 * more updates to be done higher up in the tree.
2263 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2267 if (path->keep_locks)
2270 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2271 if (!path->nodes[i])
2273 if (!path->locks[i])
2275 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2281 * helper function for btrfs_search_slot. The goal is to find a block
2282 * in cache without setting the path to blocking. If we find the block
2283 * we return zero and the path is unchanged.
2285 * If we can't find the block, we set the path blocking and do some
2286 * reada. -EAGAIN is returned and the search must be repeated.
2289 read_block_for_search(struct btrfs_trans_handle *trans,
2290 struct btrfs_root *root, struct btrfs_path *p,
2291 struct extent_buffer **eb_ret, int level, int slot,
2292 struct btrfs_key *key, u64 time_seq)
2297 struct extent_buffer *b = *eb_ret;
2298 struct extent_buffer *tmp;
2301 blocknr = btrfs_node_blockptr(b, slot);
2302 gen = btrfs_node_ptr_generation(b, slot);
2303 blocksize = btrfs_level_size(root, level - 1);
2305 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2307 /* first we do an atomic uptodate check */
2308 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
2309 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2311 * we found an up to date block without
2318 /* the pages were up to date, but we failed
2319 * the generation number check. Do a full
2320 * read for the generation number that is correct.
2321 * We must do this without dropping locks so
2322 * we can trust our generation number
2324 free_extent_buffer(tmp);
2325 btrfs_set_path_blocking(p);
2327 /* now we're allowed to do a blocking uptodate check */
2328 tmp = read_tree_block(root, blocknr, blocksize, gen);
2329 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
2333 free_extent_buffer(tmp);
2334 btrfs_release_path(p);
2340 * reduce lock contention at high levels
2341 * of the btree by dropping locks before
2342 * we read. Don't release the lock on the current
2343 * level because we need to walk this node to figure
2344 * out which blocks to read.
2346 btrfs_unlock_up_safe(p, level + 1);
2347 btrfs_set_path_blocking(p);
2349 free_extent_buffer(tmp);
2351 reada_for_search(root, p, level, slot, key->objectid);
2353 btrfs_release_path(p);
2356 tmp = read_tree_block(root, blocknr, blocksize, 0);
2359 * If the read above didn't mark this buffer up to date,
2360 * it will never end up being up to date. Set ret to EIO now
2361 * and give up so that our caller doesn't loop forever
2364 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2366 free_extent_buffer(tmp);
2372 * helper function for btrfs_search_slot. This does all of the checks
2373 * for node-level blocks and does any balancing required based on
2376 * If no extra work was required, zero is returned. If we had to
2377 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2381 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2382 struct btrfs_root *root, struct btrfs_path *p,
2383 struct extent_buffer *b, int level, int ins_len,
2384 int *write_lock_level)
2387 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2388 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2391 if (*write_lock_level < level + 1) {
2392 *write_lock_level = level + 1;
2393 btrfs_release_path(p);
2397 sret = reada_for_balance(root, p, level);
2401 btrfs_set_path_blocking(p);
2402 sret = split_node(trans, root, p, level);
2403 btrfs_clear_path_blocking(p, NULL, 0);
2410 b = p->nodes[level];
2411 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2412 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2415 if (*write_lock_level < level + 1) {
2416 *write_lock_level = level + 1;
2417 btrfs_release_path(p);
2421 sret = reada_for_balance(root, p, level);
2425 btrfs_set_path_blocking(p);
2426 sret = balance_level(trans, root, p, level);
2427 btrfs_clear_path_blocking(p, NULL, 0);
2433 b = p->nodes[level];
2435 btrfs_release_path(p);
2438 BUG_ON(btrfs_header_nritems(b) == 1);
2449 * look for key in the tree. path is filled in with nodes along the way
2450 * if key is found, we return zero and you can find the item in the leaf
2451 * level of the path (level 0)
2453 * If the key isn't found, the path points to the slot where it should
2454 * be inserted, and 1 is returned. If there are other errors during the
2455 * search a negative error number is returned.
2457 * if ins_len > 0, nodes and leaves will be split as we walk down the
2458 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2461 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2462 *root, struct btrfs_key *key, struct btrfs_path *p, int
2465 struct extent_buffer *b;
2470 int lowest_unlock = 1;
2472 /* everything at write_lock_level or lower must be write locked */
2473 int write_lock_level = 0;
2474 u8 lowest_level = 0;
2475 int min_write_lock_level;
2477 lowest_level = p->lowest_level;
2478 WARN_ON(lowest_level && ins_len > 0);
2479 WARN_ON(p->nodes[0] != NULL);
2484 /* when we are removing items, we might have to go up to level
2485 * two as we update tree pointers Make sure we keep write
2486 * for those levels as well
2488 write_lock_level = 2;
2489 } else if (ins_len > 0) {
2491 * for inserting items, make sure we have a write lock on
2492 * level 1 so we can update keys
2494 write_lock_level = 1;
2498 write_lock_level = -1;
2500 if (cow && (p->keep_locks || p->lowest_level))
2501 write_lock_level = BTRFS_MAX_LEVEL;
2503 min_write_lock_level = write_lock_level;
2507 * we try very hard to do read locks on the root
2509 root_lock = BTRFS_READ_LOCK;
2511 if (p->search_commit_root) {
2513 * the commit roots are read only
2514 * so we always do read locks
2516 b = root->commit_root;
2517 extent_buffer_get(b);
2518 level = btrfs_header_level(b);
2519 if (!p->skip_locking)
2520 btrfs_tree_read_lock(b);
2522 if (p->skip_locking) {
2523 b = btrfs_root_node(root);
2524 level = btrfs_header_level(b);
2526 /* we don't know the level of the root node
2527 * until we actually have it read locked
2529 b = btrfs_read_lock_root_node(root);
2530 level = btrfs_header_level(b);
2531 if (level <= write_lock_level) {
2532 /* whoops, must trade for write lock */
2533 btrfs_tree_read_unlock(b);
2534 free_extent_buffer(b);
2535 b = btrfs_lock_root_node(root);
2536 root_lock = BTRFS_WRITE_LOCK;
2538 /* the level might have changed, check again */
2539 level = btrfs_header_level(b);
2543 p->nodes[level] = b;
2544 if (!p->skip_locking)
2545 p->locks[level] = root_lock;
2548 level = btrfs_header_level(b);
2551 * setup the path here so we can release it under lock
2552 * contention with the cow code
2556 * if we don't really need to cow this block
2557 * then we don't want to set the path blocking,
2558 * so we test it here
2560 if (!should_cow_block(trans, root, b))
2563 btrfs_set_path_blocking(p);
2566 * must have write locks on this node and the
2569 if (level + 1 > write_lock_level) {
2570 write_lock_level = level + 1;
2571 btrfs_release_path(p);
2575 err = btrfs_cow_block(trans, root, b,
2576 p->nodes[level + 1],
2577 p->slots[level + 1], &b);
2584 BUG_ON(!cow && ins_len);
2586 p->nodes[level] = b;
2587 btrfs_clear_path_blocking(p, NULL, 0);
2590 * we have a lock on b and as long as we aren't changing
2591 * the tree, there is no way to for the items in b to change.
2592 * It is safe to drop the lock on our parent before we
2593 * go through the expensive btree search on b.
2595 * If cow is true, then we might be changing slot zero,
2596 * which may require changing the parent. So, we can't
2597 * drop the lock until after we know which slot we're
2601 btrfs_unlock_up_safe(p, level + 1);
2603 ret = bin_search(b, key, level, &slot);
2607 if (ret && slot > 0) {
2611 p->slots[level] = slot;
2612 err = setup_nodes_for_search(trans, root, p, b, level,
2613 ins_len, &write_lock_level);
2620 b = p->nodes[level];
2621 slot = p->slots[level];
2624 * slot 0 is special, if we change the key
2625 * we have to update the parent pointer
2626 * which means we must have a write lock
2629 if (slot == 0 && cow &&
2630 write_lock_level < level + 1) {
2631 write_lock_level = level + 1;
2632 btrfs_release_path(p);
2636 unlock_up(p, level, lowest_unlock,
2637 min_write_lock_level, &write_lock_level);
2639 if (level == lowest_level) {
2645 err = read_block_for_search(trans, root, p,
2646 &b, level, slot, key, 0);
2654 if (!p->skip_locking) {
2655 level = btrfs_header_level(b);
2656 if (level <= write_lock_level) {
2657 err = btrfs_try_tree_write_lock(b);
2659 btrfs_set_path_blocking(p);
2661 btrfs_clear_path_blocking(p, b,
2664 p->locks[level] = BTRFS_WRITE_LOCK;
2666 err = btrfs_try_tree_read_lock(b);
2668 btrfs_set_path_blocking(p);
2669 btrfs_tree_read_lock(b);
2670 btrfs_clear_path_blocking(p, b,
2673 p->locks[level] = BTRFS_READ_LOCK;
2675 p->nodes[level] = b;
2678 p->slots[level] = slot;
2680 btrfs_leaf_free_space(root, b) < ins_len) {
2681 if (write_lock_level < 1) {
2682 write_lock_level = 1;
2683 btrfs_release_path(p);
2687 btrfs_set_path_blocking(p);
2688 err = split_leaf(trans, root, key,
2689 p, ins_len, ret == 0);
2690 btrfs_clear_path_blocking(p, NULL, 0);
2698 if (!p->search_for_split)
2699 unlock_up(p, level, lowest_unlock,
2700 min_write_lock_level, &write_lock_level);
2707 * we don't really know what they plan on doing with the path
2708 * from here on, so for now just mark it as blocking
2710 if (!p->leave_spinning)
2711 btrfs_set_path_blocking(p);
2713 btrfs_release_path(p);
2718 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2719 * current state of the tree together with the operations recorded in the tree
2720 * modification log to search for the key in a previous version of this tree, as
2721 * denoted by the time_seq parameter.
2723 * Naturally, there is no support for insert, delete or cow operations.
2725 * The resulting path and return value will be set up as if we called
2726 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2728 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2729 struct btrfs_path *p, u64 time_seq)
2731 struct extent_buffer *b;
2736 int lowest_unlock = 1;
2737 u8 lowest_level = 0;
2739 lowest_level = p->lowest_level;
2740 WARN_ON(p->nodes[0] != NULL);
2742 if (p->search_commit_root) {
2744 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2748 b = get_old_root(root, time_seq);
2749 level = btrfs_header_level(b);
2750 p->locks[level] = BTRFS_READ_LOCK;
2753 level = btrfs_header_level(b);
2754 p->nodes[level] = b;
2755 btrfs_clear_path_blocking(p, NULL, 0);
2758 * we have a lock on b and as long as we aren't changing
2759 * the tree, there is no way to for the items in b to change.
2760 * It is safe to drop the lock on our parent before we
2761 * go through the expensive btree search on b.
2763 btrfs_unlock_up_safe(p, level + 1);
2765 ret = bin_search(b, key, level, &slot);
2769 if (ret && slot > 0) {
2773 p->slots[level] = slot;
2774 unlock_up(p, level, lowest_unlock, 0, NULL);
2776 if (level == lowest_level) {
2782 err = read_block_for_search(NULL, root, p, &b, level,
2783 slot, key, time_seq);
2791 level = btrfs_header_level(b);
2792 err = btrfs_try_tree_read_lock(b);
2794 btrfs_set_path_blocking(p);
2795 btrfs_tree_read_lock(b);
2796 btrfs_clear_path_blocking(p, b,
2799 p->locks[level] = BTRFS_READ_LOCK;
2800 p->nodes[level] = b;
2801 b = tree_mod_log_rewind(root->fs_info, b, time_seq);
2802 if (b != p->nodes[level]) {
2803 btrfs_tree_unlock_rw(p->nodes[level],
2805 p->locks[level] = 0;
2806 p->nodes[level] = b;
2809 p->slots[level] = slot;
2810 unlock_up(p, level, lowest_unlock, 0, NULL);
2816 if (!p->leave_spinning)
2817 btrfs_set_path_blocking(p);
2819 btrfs_release_path(p);
2825 * helper to use instead of search slot if no exact match is needed but
2826 * instead the next or previous item should be returned.
2827 * When find_higher is true, the next higher item is returned, the next lower
2829 * When return_any and find_higher are both true, and no higher item is found,
2830 * return the next lower instead.
2831 * When return_any is true and find_higher is false, and no lower item is found,
2832 * return the next higher instead.
2833 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2836 int btrfs_search_slot_for_read(struct btrfs_root *root,
2837 struct btrfs_key *key, struct btrfs_path *p,
2838 int find_higher, int return_any)
2841 struct extent_buffer *leaf;
2844 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2848 * a return value of 1 means the path is at the position where the
2849 * item should be inserted. Normally this is the next bigger item,
2850 * but in case the previous item is the last in a leaf, path points
2851 * to the first free slot in the previous leaf, i.e. at an invalid
2857 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2858 ret = btrfs_next_leaf(root, p);
2864 * no higher item found, return the next
2869 btrfs_release_path(p);
2873 if (p->slots[0] == 0) {
2874 ret = btrfs_prev_leaf(root, p);
2878 p->slots[0] = btrfs_header_nritems(leaf) - 1;
2884 * no lower item found, return the next
2889 btrfs_release_path(p);
2899 * adjust the pointers going up the tree, starting at level
2900 * making sure the right key of each node is points to 'key'.
2901 * This is used after shifting pointers to the left, so it stops
2902 * fixing up pointers when a given leaf/node is not in slot 0 of the
2906 static void fixup_low_keys(struct btrfs_trans_handle *trans,
2907 struct btrfs_root *root, struct btrfs_path *path,
2908 struct btrfs_disk_key *key, int level)
2911 struct extent_buffer *t;
2913 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2914 int tslot = path->slots[i];
2915 if (!path->nodes[i])
2918 tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1);
2919 btrfs_set_node_key(t, key, tslot);
2920 btrfs_mark_buffer_dirty(path->nodes[i]);
2929 * This function isn't completely safe. It's the caller's responsibility
2930 * that the new key won't break the order
2932 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2933 struct btrfs_root *root, struct btrfs_path *path,
2934 struct btrfs_key *new_key)
2936 struct btrfs_disk_key disk_key;
2937 struct extent_buffer *eb;
2940 eb = path->nodes[0];
2941 slot = path->slots[0];
2943 btrfs_item_key(eb, &disk_key, slot - 1);
2944 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
2946 if (slot < btrfs_header_nritems(eb) - 1) {
2947 btrfs_item_key(eb, &disk_key, slot + 1);
2948 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
2951 btrfs_cpu_key_to_disk(&disk_key, new_key);
2952 btrfs_set_item_key(eb, &disk_key, slot);
2953 btrfs_mark_buffer_dirty(eb);
2955 fixup_low_keys(trans, root, path, &disk_key, 1);
2959 * try to push data from one node into the next node left in the
2962 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2963 * error, and > 0 if there was no room in the left hand block.
2965 static int push_node_left(struct btrfs_trans_handle *trans,
2966 struct btrfs_root *root, struct extent_buffer *dst,
2967 struct extent_buffer *src, int empty)
2974 src_nritems = btrfs_header_nritems(src);
2975 dst_nritems = btrfs_header_nritems(dst);
2976 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2977 WARN_ON(btrfs_header_generation(src) != trans->transid);
2978 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2980 if (!empty && src_nritems <= 8)
2983 if (push_items <= 0)
2987 push_items = min(src_nritems, push_items);
2988 if (push_items < src_nritems) {
2989 /* leave at least 8 pointers in the node if
2990 * we aren't going to empty it
2992 if (src_nritems - push_items < 8) {
2993 if (push_items <= 8)
2999 push_items = min(src_nritems - 8, push_items);
3001 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3003 copy_extent_buffer(dst, src,
3004 btrfs_node_key_ptr_offset(dst_nritems),
3005 btrfs_node_key_ptr_offset(0),
3006 push_items * sizeof(struct btrfs_key_ptr));
3008 if (push_items < src_nritems) {
3010 * don't call tree_mod_log_eb_move here, key removal was already
3011 * fully logged by tree_mod_log_eb_copy above.
3013 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3014 btrfs_node_key_ptr_offset(push_items),
3015 (src_nritems - push_items) *
3016 sizeof(struct btrfs_key_ptr));
3018 btrfs_set_header_nritems(src, src_nritems - push_items);
3019 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3020 btrfs_mark_buffer_dirty(src);
3021 btrfs_mark_buffer_dirty(dst);
3027 * try to push data from one node into the next node right in the
3030 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3031 * error, and > 0 if there was no room in the right hand block.
3033 * this will only push up to 1/2 the contents of the left node over
3035 static int balance_node_right(struct btrfs_trans_handle *trans,
3036 struct btrfs_root *root,
3037 struct extent_buffer *dst,
3038 struct extent_buffer *src)
3046 WARN_ON(btrfs_header_generation(src) != trans->transid);
3047 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3049 src_nritems = btrfs_header_nritems(src);
3050 dst_nritems = btrfs_header_nritems(dst);
3051 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3052 if (push_items <= 0)
3055 if (src_nritems < 4)
3058 max_push = src_nritems / 2 + 1;
3059 /* don't try to empty the node */
3060 if (max_push >= src_nritems)
3063 if (max_push < push_items)
3064 push_items = max_push;
3066 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3067 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3068 btrfs_node_key_ptr_offset(0),
3070 sizeof(struct btrfs_key_ptr));
3072 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3073 src_nritems - push_items, push_items);
3074 copy_extent_buffer(dst, src,
3075 btrfs_node_key_ptr_offset(0),
3076 btrfs_node_key_ptr_offset(src_nritems - push_items),
3077 push_items * sizeof(struct btrfs_key_ptr));
3079 btrfs_set_header_nritems(src, src_nritems - push_items);
3080 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3082 btrfs_mark_buffer_dirty(src);
3083 btrfs_mark_buffer_dirty(dst);
3089 * helper function to insert a new root level in the tree.
3090 * A new node is allocated, and a single item is inserted to
3091 * point to the existing root
3093 * returns zero on success or < 0 on failure.
3095 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3096 struct btrfs_root *root,
3097 struct btrfs_path *path, int level)
3100 struct extent_buffer *lower;
3101 struct extent_buffer *c;
3102 struct extent_buffer *old;
3103 struct btrfs_disk_key lower_key;
3105 BUG_ON(path->nodes[level]);
3106 BUG_ON(path->nodes[level-1] != root->node);
3108 lower = path->nodes[level-1];
3110 btrfs_item_key(lower, &lower_key, 0);
3112 btrfs_node_key(lower, &lower_key, 0);
3114 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3115 root->root_key.objectid, &lower_key,
3116 level, root->node->start, 0);
3120 root_add_used(root, root->nodesize);
3122 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3123 btrfs_set_header_nritems(c, 1);
3124 btrfs_set_header_level(c, level);
3125 btrfs_set_header_bytenr(c, c->start);
3126 btrfs_set_header_generation(c, trans->transid);
3127 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3128 btrfs_set_header_owner(c, root->root_key.objectid);
3130 write_extent_buffer(c, root->fs_info->fsid,
3131 (unsigned long)btrfs_header_fsid(c),
3134 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3135 (unsigned long)btrfs_header_chunk_tree_uuid(c),
3138 btrfs_set_node_key(c, &lower_key, 0);
3139 btrfs_set_node_blockptr(c, 0, lower->start);
3140 lower_gen = btrfs_header_generation(lower);
3141 WARN_ON(lower_gen != trans->transid);
3143 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3145 btrfs_mark_buffer_dirty(c);
3148 tree_mod_log_set_root_pointer(root, c);
3149 rcu_assign_pointer(root->node, c);
3151 /* the super has an extra ref to root->node */
3152 free_extent_buffer(old);
3154 add_root_to_dirty_list(root);
3155 extent_buffer_get(c);
3156 path->nodes[level] = c;
3157 path->locks[level] = BTRFS_WRITE_LOCK;
3158 path->slots[level] = 0;
3163 * worker function to insert a single pointer in a node.
3164 * the node should have enough room for the pointer already
3166 * slot and level indicate where you want the key to go, and
3167 * blocknr is the block the key points to.
3169 static void insert_ptr(struct btrfs_trans_handle *trans,
3170 struct btrfs_root *root, struct btrfs_path *path,
3171 struct btrfs_disk_key *key, u64 bytenr,
3172 int slot, int level)
3174 struct extent_buffer *lower;
3178 BUG_ON(!path->nodes[level]);
3179 btrfs_assert_tree_locked(path->nodes[level]);
3180 lower = path->nodes[level];
3181 nritems = btrfs_header_nritems(lower);
3182 BUG_ON(slot > nritems);
3183 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3184 if (slot != nritems) {
3186 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3187 slot, nritems - slot);
3188 memmove_extent_buffer(lower,
3189 btrfs_node_key_ptr_offset(slot + 1),
3190 btrfs_node_key_ptr_offset(slot),
3191 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3194 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3198 btrfs_set_node_key(lower, key, slot);
3199 btrfs_set_node_blockptr(lower, slot, bytenr);
3200 WARN_ON(trans->transid == 0);
3201 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3202 btrfs_set_header_nritems(lower, nritems + 1);
3203 btrfs_mark_buffer_dirty(lower);
3207 * split the node at the specified level in path in two.
3208 * The path is corrected to point to the appropriate node after the split
3210 * Before splitting this tries to make some room in the node by pushing
3211 * left and right, if either one works, it returns right away.
3213 * returns 0 on success and < 0 on failure
3215 static noinline int split_node(struct btrfs_trans_handle *trans,
3216 struct btrfs_root *root,
3217 struct btrfs_path *path, int level)
3219 struct extent_buffer *c;
3220 struct extent_buffer *split;
3221 struct btrfs_disk_key disk_key;
3226 c = path->nodes[level];
3227 WARN_ON(btrfs_header_generation(c) != trans->transid);
3228 if (c == root->node) {
3229 /* trying to split the root, lets make a new one */
3230 ret = insert_new_root(trans, root, path, level + 1);
3234 ret = push_nodes_for_insert(trans, root, path, level);
3235 c = path->nodes[level];
3236 if (!ret && btrfs_header_nritems(c) <
3237 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3243 c_nritems = btrfs_header_nritems(c);
3244 mid = (c_nritems + 1) / 2;
3245 btrfs_node_key(c, &disk_key, mid);
3247 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3248 root->root_key.objectid,
3249 &disk_key, level, c->start, 0);
3251 return PTR_ERR(split);
3253 root_add_used(root, root->nodesize);
3255 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3256 btrfs_set_header_level(split, btrfs_header_level(c));
3257 btrfs_set_header_bytenr(split, split->start);
3258 btrfs_set_header_generation(split, trans->transid);
3259 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3260 btrfs_set_header_owner(split, root->root_key.objectid);
3261 write_extent_buffer(split, root->fs_info->fsid,
3262 (unsigned long)btrfs_header_fsid(split),
3264 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3265 (unsigned long)btrfs_header_chunk_tree_uuid(split),
3268 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3269 copy_extent_buffer(split, c,
3270 btrfs_node_key_ptr_offset(0),
3271 btrfs_node_key_ptr_offset(mid),
3272 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3273 btrfs_set_header_nritems(split, c_nritems - mid);
3274 btrfs_set_header_nritems(c, mid);
3277 btrfs_mark_buffer_dirty(c);
3278 btrfs_mark_buffer_dirty(split);
3280 insert_ptr(trans, root, path, &disk_key, split->start,
3281 path->slots[level + 1] + 1, level + 1);
3283 if (path->slots[level] >= mid) {
3284 path->slots[level] -= mid;
3285 btrfs_tree_unlock(c);
3286 free_extent_buffer(c);
3287 path->nodes[level] = split;
3288 path->slots[level + 1] += 1;
3290 btrfs_tree_unlock(split);
3291 free_extent_buffer(split);
3297 * how many bytes are required to store the items in a leaf. start
3298 * and nr indicate which items in the leaf to check. This totals up the
3299 * space used both by the item structs and the item data
3301 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3304 int nritems = btrfs_header_nritems(l);
3305 int end = min(nritems, start + nr) - 1;
3309 data_len = btrfs_item_end_nr(l, start);
3310 data_len = data_len - btrfs_item_offset_nr(l, end);
3311 data_len += sizeof(struct btrfs_item) * nr;
3312 WARN_ON(data_len < 0);
3317 * The space between the end of the leaf items and
3318 * the start of the leaf data. IOW, how much room
3319 * the leaf has left for both items and data
3321 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3322 struct extent_buffer *leaf)
3324 int nritems = btrfs_header_nritems(leaf);
3326 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3328 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3329 "used %d nritems %d\n",
3330 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3331 leaf_space_used(leaf, 0, nritems), nritems);
3337 * min slot controls the lowest index we're willing to push to the
3338 * right. We'll push up to and including min_slot, but no lower
3340 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3341 struct btrfs_root *root,
3342 struct btrfs_path *path,
3343 int data_size, int empty,
3344 struct extent_buffer *right,
3345 int free_space, u32 left_nritems,
3348 struct extent_buffer *left = path->nodes[0];
3349 struct extent_buffer *upper = path->nodes[1];
3350 struct btrfs_map_token token;
3351 struct btrfs_disk_key disk_key;
3356 struct btrfs_item *item;
3362 btrfs_init_map_token(&token);
3367 nr = max_t(u32, 1, min_slot);
3369 if (path->slots[0] >= left_nritems)
3370 push_space += data_size;
3372 slot = path->slots[1];
3373 i = left_nritems - 1;
3375 item = btrfs_item_nr(left, i);
3377 if (!empty && push_items > 0) {
3378 if (path->slots[0] > i)
3380 if (path->slots[0] == i) {
3381 int space = btrfs_leaf_free_space(root, left);
3382 if (space + push_space * 2 > free_space)
3387 if (path->slots[0] == i)
3388 push_space += data_size;
3390 this_item_size = btrfs_item_size(left, item);
3391 if (this_item_size + sizeof(*item) + push_space > free_space)
3395 push_space += this_item_size + sizeof(*item);
3401 if (push_items == 0)
3404 if (!empty && push_items == left_nritems)
3407 /* push left to right */
3408 right_nritems = btrfs_header_nritems(right);
3410 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3411 push_space -= leaf_data_end(root, left);
3413 /* make room in the right data area */
3414 data_end = leaf_data_end(root, right);
3415 memmove_extent_buffer(right,
3416 btrfs_leaf_data(right) + data_end - push_space,
3417 btrfs_leaf_data(right) + data_end,
3418 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3420 /* copy from the left data area */
3421 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3422 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3423 btrfs_leaf_data(left) + leaf_data_end(root, left),
3426 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3427 btrfs_item_nr_offset(0),
3428 right_nritems * sizeof(struct btrfs_item));
3430 /* copy the items from left to right */
3431 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3432 btrfs_item_nr_offset(left_nritems - push_items),
3433 push_items * sizeof(struct btrfs_item));
3435 /* update the item pointers */
3436 right_nritems += push_items;
3437 btrfs_set_header_nritems(right, right_nritems);
3438 push_space = BTRFS_LEAF_DATA_SIZE(root);
3439 for (i = 0; i < right_nritems; i++) {
3440 item = btrfs_item_nr(right, i);
3441 push_space -= btrfs_token_item_size(right, item, &token);
3442 btrfs_set_token_item_offset(right, item, push_space, &token);
3445 left_nritems -= push_items;
3446 btrfs_set_header_nritems(left, left_nritems);
3449 btrfs_mark_buffer_dirty(left);
3451 clean_tree_block(trans, root, left);
3453 btrfs_mark_buffer_dirty(right);
3455 btrfs_item_key(right, &disk_key, 0);
3456 btrfs_set_node_key(upper, &disk_key, slot + 1);
3457 btrfs_mark_buffer_dirty(upper);
3459 /* then fixup the leaf pointer in the path */
3460 if (path->slots[0] >= left_nritems) {
3461 path->slots[0] -= left_nritems;
3462 if (btrfs_header_nritems(path->nodes[0]) == 0)
3463 clean_tree_block(trans, root, path->nodes[0]);
3464 btrfs_tree_unlock(path->nodes[0]);
3465 free_extent_buffer(path->nodes[0]);
3466 path->nodes[0] = right;
3467 path->slots[1] += 1;
3469 btrfs_tree_unlock(right);
3470 free_extent_buffer(right);
3475 btrfs_tree_unlock(right);
3476 free_extent_buffer(right);
3481 * push some data in the path leaf to the right, trying to free up at
3482 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3484 * returns 1 if the push failed because the other node didn't have enough
3485 * room, 0 if everything worked out and < 0 if there were major errors.
3487 * this will push starting from min_slot to the end of the leaf. It won't
3488 * push any slot lower than min_slot
3490 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3491 *root, struct btrfs_path *path,
3492 int min_data_size, int data_size,
3493 int empty, u32 min_slot)
3495 struct extent_buffer *left = path->nodes[0];
3496 struct extent_buffer *right;
3497 struct extent_buffer *upper;
3503 if (!path->nodes[1])
3506 slot = path->slots[1];
3507 upper = path->nodes[1];
3508 if (slot >= btrfs_header_nritems(upper) - 1)
3511 btrfs_assert_tree_locked(path->nodes[1]);
3513 right = read_node_slot(root, upper, slot + 1);
3517 btrfs_tree_lock(right);
3518 btrfs_set_lock_blocking(right);
3520 free_space = btrfs_leaf_free_space(root, right);
3521 if (free_space < data_size)
3524 /* cow and double check */
3525 ret = btrfs_cow_block(trans, root, right, upper,
3530 free_space = btrfs_leaf_free_space(root, right);
3531 if (free_space < data_size)
3534 left_nritems = btrfs_header_nritems(left);
3535 if (left_nritems == 0)
3538 return __push_leaf_right(trans, root, path, min_data_size, empty,
3539 right, free_space, left_nritems, min_slot);
3541 btrfs_tree_unlock(right);
3542 free_extent_buffer(right);
3547 * push some data in the path leaf to the left, trying to free up at
3548 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3550 * max_slot can put a limit on how far into the leaf we'll push items. The
3551 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3554 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3555 struct btrfs_root *root,
3556 struct btrfs_path *path, int data_size,
3557 int empty, struct extent_buffer *left,
3558 int free_space, u32 right_nritems,
3561 struct btrfs_disk_key disk_key;
3562 struct extent_buffer *right = path->nodes[0];
3566 struct btrfs_item *item;
3567 u32 old_left_nritems;
3571 u32 old_left_item_size;
3572 struct btrfs_map_token token;
3574 btrfs_init_map_token(&token);
3577 nr = min(right_nritems, max_slot);
3579 nr = min(right_nritems - 1, max_slot);
3581 for (i = 0; i < nr; i++) {
3582 item = btrfs_item_nr(right, i);
3584 if (!empty && push_items > 0) {
3585 if (path->slots[0] < i)
3587 if (path->slots[0] == i) {
3588 int space = btrfs_leaf_free_space(root, right);
3589 if (space + push_space * 2 > free_space)
3594 if (path->slots[0] == i)
3595 push_space += data_size;
3597 this_item_size = btrfs_item_size(right, item);
3598 if (this_item_size + sizeof(*item) + push_space > free_space)
3602 push_space += this_item_size + sizeof(*item);
3605 if (push_items == 0) {
3609 if (!empty && push_items == btrfs_header_nritems(right))
3612 /* push data from right to left */
3613 copy_extent_buffer(left, right,
3614 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3615 btrfs_item_nr_offset(0),
3616 push_items * sizeof(struct btrfs_item));
3618 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3619 btrfs_item_offset_nr(right, push_items - 1);
3621 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3622 leaf_data_end(root, left) - push_space,
3623 btrfs_leaf_data(right) +
3624 btrfs_item_offset_nr(right, push_items - 1),
3626 old_left_nritems = btrfs_header_nritems(left);
3627 BUG_ON(old_left_nritems <= 0);
3629 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3630 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3633 item = btrfs_item_nr(left, i);
3635 ioff = btrfs_token_item_offset(left, item, &token);
3636 btrfs_set_token_item_offset(left, item,
3637 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3640 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3642 /* fixup right node */
3643 if (push_items > right_nritems) {
3644 printk(KERN_CRIT "push items %d nr %u\n", push_items,
3649 if (push_items < right_nritems) {
3650 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3651 leaf_data_end(root, right);
3652 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3653 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3654 btrfs_leaf_data(right) +
3655 leaf_data_end(root, right), push_space);
3657 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3658 btrfs_item_nr_offset(push_items),
3659 (btrfs_header_nritems(right) - push_items) *
3660 sizeof(struct btrfs_item));
3662 right_nritems -= push_items;
3663 btrfs_set_header_nritems(right, right_nritems);
3664 push_space = BTRFS_LEAF_DATA_SIZE(root);
3665 for (i = 0; i < right_nritems; i++) {
3666 item = btrfs_item_nr(right, i);
3668 push_space = push_space - btrfs_token_item_size(right,
3670 btrfs_set_token_item_offset(right, item, push_space, &token);
3673 btrfs_mark_buffer_dirty(left);
3675 btrfs_mark_buffer_dirty(right);
3677 clean_tree_block(trans, root, right);
3679 btrfs_item_key(right, &disk_key, 0);
3680 fixup_low_keys(trans, root, path, &disk_key, 1);
3682 /* then fixup the leaf pointer in the path */
3683 if (path->slots[0] < push_items) {
3684 path->slots[0] += old_left_nritems;
3685 btrfs_tree_unlock(path->nodes[0]);
3686 free_extent_buffer(path->nodes[0]);
3687 path->nodes[0] = left;
3688 path->slots[1] -= 1;
3690 btrfs_tree_unlock(left);
3691 free_extent_buffer(left);
3692 path->slots[0] -= push_items;
3694 BUG_ON(path->slots[0] < 0);
3697 btrfs_tree_unlock(left);
3698 free_extent_buffer(left);
3703 * push some data in the path leaf to the left, trying to free up at
3704 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3706 * max_slot can put a limit on how far into the leaf we'll push items. The
3707 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3710 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3711 *root, struct btrfs_path *path, int min_data_size,
3712 int data_size, int empty, u32 max_slot)
3714 struct extent_buffer *right = path->nodes[0];
3715 struct extent_buffer *left;
3721 slot = path->slots[1];
3724 if (!path->nodes[1])
3727 right_nritems = btrfs_header_nritems(right);
3728 if (right_nritems == 0)
3731 btrfs_assert_tree_locked(path->nodes[1]);
3733 left = read_node_slot(root, path->nodes[1], slot - 1);
3737 btrfs_tree_lock(left);
3738 btrfs_set_lock_blocking(left);
3740 free_space = btrfs_leaf_free_space(root, left);
3741 if (free_space < data_size) {
3746 /* cow and double check */
3747 ret = btrfs_cow_block(trans, root, left,
3748 path->nodes[1], slot - 1, &left);
3750 /* we hit -ENOSPC, but it isn't fatal here */
3756 free_space = btrfs_leaf_free_space(root, left);
3757 if (free_space < data_size) {
3762 return __push_leaf_left(trans, root, path, min_data_size,
3763 empty, left, free_space, right_nritems,
3766 btrfs_tree_unlock(left);
3767 free_extent_buffer(left);
3772 * split the path's leaf in two, making sure there is at least data_size
3773 * available for the resulting leaf level of the path.
3775 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3776 struct btrfs_root *root,
3777 struct btrfs_path *path,
3778 struct extent_buffer *l,
3779 struct extent_buffer *right,
3780 int slot, int mid, int nritems)
3785 struct btrfs_disk_key disk_key;
3786 struct btrfs_map_token token;
3788 btrfs_init_map_token(&token);
3790 nritems = nritems - mid;
3791 btrfs_set_header_nritems(right, nritems);
3792 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3794 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3795 btrfs_item_nr_offset(mid),
3796 nritems * sizeof(struct btrfs_item));
3798 copy_extent_buffer(right, l,
3799 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3800 data_copy_size, btrfs_leaf_data(l) +
3801 leaf_data_end(root, l), data_copy_size);
3803 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3804 btrfs_item_end_nr(l, mid);
3806 for (i = 0; i < nritems; i++) {
3807 struct btrfs_item *item = btrfs_item_nr(right, i);
3810 ioff = btrfs_token_item_offset(right, item, &token);
3811 btrfs_set_token_item_offset(right, item,
3812 ioff + rt_data_off, &token);
3815 btrfs_set_header_nritems(l, mid);
3816 btrfs_item_key(right, &disk_key, 0);
3817 insert_ptr(trans, root, path, &disk_key, right->start,
3818 path->slots[1] + 1, 1);
3820 btrfs_mark_buffer_dirty(right);
3821 btrfs_mark_buffer_dirty(l);
3822 BUG_ON(path->slots[0] != slot);
3825 btrfs_tree_unlock(path->nodes[0]);
3826 free_extent_buffer(path->nodes[0]);
3827 path->nodes[0] = right;
3828 path->slots[0] -= mid;
3829 path->slots[1] += 1;
3831 btrfs_tree_unlock(right);
3832 free_extent_buffer(right);
3835 BUG_ON(path->slots[0] < 0);
3839 * double splits happen when we need to insert a big item in the middle
3840 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3841 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3844 * We avoid this by trying to push the items on either side of our target
3845 * into the adjacent leaves. If all goes well we can avoid the double split
3848 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3849 struct btrfs_root *root,
3850 struct btrfs_path *path,
3858 slot = path->slots[0];
3861 * try to push all the items after our slot into the
3864 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3871 nritems = btrfs_header_nritems(path->nodes[0]);
3873 * our goal is to get our slot at the start or end of a leaf. If
3874 * we've done so we're done
3876 if (path->slots[0] == 0 || path->slots[0] == nritems)
3879 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3882 /* try to push all the items before our slot into the next leaf */
3883 slot = path->slots[0];
3884 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3897 * split the path's leaf in two, making sure there is at least data_size
3898 * available for the resulting leaf level of the path.
3900 * returns 0 if all went well and < 0 on failure.
3902 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3903 struct btrfs_root *root,
3904 struct btrfs_key *ins_key,
3905 struct btrfs_path *path, int data_size,
3908 struct btrfs_disk_key disk_key;
3909 struct extent_buffer *l;
3913 struct extent_buffer *right;
3917 int num_doubles = 0;
3918 int tried_avoid_double = 0;
3921 slot = path->slots[0];
3922 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3923 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3926 /* first try to make some room by pushing left and right */
3928 wret = push_leaf_right(trans, root, path, data_size,
3933 wret = push_leaf_left(trans, root, path, data_size,
3934 data_size, 0, (u32)-1);
3940 /* did the pushes work? */
3941 if (btrfs_leaf_free_space(root, l) >= data_size)
3945 if (!path->nodes[1]) {
3946 ret = insert_new_root(trans, root, path, 1);
3953 slot = path->slots[0];
3954 nritems = btrfs_header_nritems(l);
3955 mid = (nritems + 1) / 2;
3959 leaf_space_used(l, mid, nritems - mid) + data_size >
3960 BTRFS_LEAF_DATA_SIZE(root)) {
3961 if (slot >= nritems) {
3965 if (mid != nritems &&
3966 leaf_space_used(l, mid, nritems - mid) +
3967 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3968 if (data_size && !tried_avoid_double)
3969 goto push_for_double;
3975 if (leaf_space_used(l, 0, mid) + data_size >
3976 BTRFS_LEAF_DATA_SIZE(root)) {
3977 if (!extend && data_size && slot == 0) {
3979 } else if ((extend || !data_size) && slot == 0) {
3983 if (mid != nritems &&
3984 leaf_space_used(l, mid, nritems - mid) +
3985 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3986 if (data_size && !tried_avoid_double)
3987 goto push_for_double;
3995 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3997 btrfs_item_key(l, &disk_key, mid);
3999 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
4000 root->root_key.objectid,
4001 &disk_key, 0, l->start, 0);
4003 return PTR_ERR(right);
4005 root_add_used(root, root->leafsize);
4007 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4008 btrfs_set_header_bytenr(right, right->start);
4009 btrfs_set_header_generation(right, trans->transid);
4010 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4011 btrfs_set_header_owner(right, root->root_key.objectid);
4012 btrfs_set_header_level(right, 0);
4013 write_extent_buffer(right, root->fs_info->fsid,
4014 (unsigned long)btrfs_header_fsid(right),
4017 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4018 (unsigned long)btrfs_header_chunk_tree_uuid(right),
4023 btrfs_set_header_nritems(right, 0);
4024 insert_ptr(trans, root, path, &disk_key, right->start,
4025 path->slots[1] + 1, 1);
4026 btrfs_tree_unlock(path->nodes[0]);
4027 free_extent_buffer(path->nodes[0]);
4028 path->nodes[0] = right;
4030 path->slots[1] += 1;
4032 btrfs_set_header_nritems(right, 0);
4033 insert_ptr(trans, root, path, &disk_key, right->start,
4035 btrfs_tree_unlock(path->nodes[0]);
4036 free_extent_buffer(path->nodes[0]);
4037 path->nodes[0] = right;
4039 if (path->slots[1] == 0)
4040 fixup_low_keys(trans, root, path,
4043 btrfs_mark_buffer_dirty(right);
4047 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4050 BUG_ON(num_doubles != 0);
4058 push_for_double_split(trans, root, path, data_size);
4059 tried_avoid_double = 1;
4060 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4065 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4066 struct btrfs_root *root,
4067 struct btrfs_path *path, int ins_len)
4069 struct btrfs_key key;
4070 struct extent_buffer *leaf;
4071 struct btrfs_file_extent_item *fi;
4076 leaf = path->nodes[0];
4077 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4079 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4080 key.type != BTRFS_EXTENT_CSUM_KEY);
4082 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4085 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4086 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4087 fi = btrfs_item_ptr(leaf, path->slots[0],
4088 struct btrfs_file_extent_item);
4089 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4091 btrfs_release_path(path);
4093 path->keep_locks = 1;
4094 path->search_for_split = 1;
4095 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4096 path->search_for_split = 0;
4101 leaf = path->nodes[0];
4102 /* if our item isn't there or got smaller, return now */
4103 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4106 /* the leaf has changed, it now has room. return now */
4107 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4110 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4111 fi = btrfs_item_ptr(leaf, path->slots[0],
4112 struct btrfs_file_extent_item);
4113 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4117 btrfs_set_path_blocking(path);
4118 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4122 path->keep_locks = 0;
4123 btrfs_unlock_up_safe(path, 1);
4126 path->keep_locks = 0;
4130 static noinline int split_item(struct btrfs_trans_handle *trans,
4131 struct btrfs_root *root,
4132 struct btrfs_path *path,
4133 struct btrfs_key *new_key,
4134 unsigned long split_offset)
4136 struct extent_buffer *leaf;
4137 struct btrfs_item *item;
4138 struct btrfs_item *new_item;
4144 struct btrfs_disk_key disk_key;
4146 leaf = path->nodes[0];
4147 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4149 btrfs_set_path_blocking(path);
4151 item = btrfs_item_nr(leaf, path->slots[0]);
4152 orig_offset = btrfs_item_offset(leaf, item);
4153 item_size = btrfs_item_size(leaf, item);
4155 buf = kmalloc(item_size, GFP_NOFS);
4159 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4160 path->slots[0]), item_size);
4162 slot = path->slots[0] + 1;
4163 nritems = btrfs_header_nritems(leaf);
4164 if (slot != nritems) {
4165 /* shift the items */
4166 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4167 btrfs_item_nr_offset(slot),
4168 (nritems - slot) * sizeof(struct btrfs_item));
4171 btrfs_cpu_key_to_disk(&disk_key, new_key);
4172 btrfs_set_item_key(leaf, &disk_key, slot);
4174 new_item = btrfs_item_nr(leaf, slot);
4176 btrfs_set_item_offset(leaf, new_item, orig_offset);
4177 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4179 btrfs_set_item_offset(leaf, item,
4180 orig_offset + item_size - split_offset);
4181 btrfs_set_item_size(leaf, item, split_offset);
4183 btrfs_set_header_nritems(leaf, nritems + 1);
4185 /* write the data for the start of the original item */
4186 write_extent_buffer(leaf, buf,
4187 btrfs_item_ptr_offset(leaf, path->slots[0]),
4190 /* write the data for the new item */
4191 write_extent_buffer(leaf, buf + split_offset,
4192 btrfs_item_ptr_offset(leaf, slot),
4193 item_size - split_offset);
4194 btrfs_mark_buffer_dirty(leaf);
4196 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4202 * This function splits a single item into two items,
4203 * giving 'new_key' to the new item and splitting the
4204 * old one at split_offset (from the start of the item).
4206 * The path may be released by this operation. After
4207 * the split, the path is pointing to the old item. The
4208 * new item is going to be in the same node as the old one.
4210 * Note, the item being split must be smaller enough to live alone on
4211 * a tree block with room for one extra struct btrfs_item
4213 * This allows us to split the item in place, keeping a lock on the
4214 * leaf the entire time.
4216 int btrfs_split_item(struct btrfs_trans_handle *trans,
4217 struct btrfs_root *root,
4218 struct btrfs_path *path,
4219 struct btrfs_key *new_key,
4220 unsigned long split_offset)
4223 ret = setup_leaf_for_split(trans, root, path,
4224 sizeof(struct btrfs_item));
4228 ret = split_item(trans, root, path, new_key, split_offset);
4233 * This function duplicate a item, giving 'new_key' to the new item.
4234 * It guarantees both items live in the same tree leaf and the new item
4235 * is contiguous with the original item.
4237 * This allows us to split file extent in place, keeping a lock on the
4238 * leaf the entire time.
4240 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4241 struct btrfs_root *root,
4242 struct btrfs_path *path,
4243 struct btrfs_key *new_key)
4245 struct extent_buffer *leaf;
4249 leaf = path->nodes[0];
4250 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4251 ret = setup_leaf_for_split(trans, root, path,
4252 item_size + sizeof(struct btrfs_item));
4257 setup_items_for_insert(trans, root, path, new_key, &item_size,
4258 item_size, item_size +
4259 sizeof(struct btrfs_item), 1);
4260 leaf = path->nodes[0];
4261 memcpy_extent_buffer(leaf,
4262 btrfs_item_ptr_offset(leaf, path->slots[0]),
4263 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4269 * make the item pointed to by the path smaller. new_size indicates
4270 * how small to make it, and from_end tells us if we just chop bytes
4271 * off the end of the item or if we shift the item to chop bytes off
4274 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
4275 struct btrfs_root *root,
4276 struct btrfs_path *path,
4277 u32 new_size, int from_end)
4280 struct extent_buffer *leaf;
4281 struct btrfs_item *item;
4283 unsigned int data_end;
4284 unsigned int old_data_start;
4285 unsigned int old_size;
4286 unsigned int size_diff;
4288 struct btrfs_map_token token;
4290 btrfs_init_map_token(&token);
4292 leaf = path->nodes[0];
4293 slot = path->slots[0];
4295 old_size = btrfs_item_size_nr(leaf, slot);
4296 if (old_size == new_size)
4299 nritems = btrfs_header_nritems(leaf);
4300 data_end = leaf_data_end(root, leaf);
4302 old_data_start = btrfs_item_offset_nr(leaf, slot);
4304 size_diff = old_size - new_size;
4307 BUG_ON(slot >= nritems);
4310 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4312 /* first correct the data pointers */
4313 for (i = slot; i < nritems; i++) {
4315 item = btrfs_item_nr(leaf, i);
4317 ioff = btrfs_token_item_offset(leaf, item, &token);
4318 btrfs_set_token_item_offset(leaf, item,
4319 ioff + size_diff, &token);
4322 /* shift the data */
4324 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4325 data_end + size_diff, btrfs_leaf_data(leaf) +
4326 data_end, old_data_start + new_size - data_end);
4328 struct btrfs_disk_key disk_key;
4331 btrfs_item_key(leaf, &disk_key, slot);
4333 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4335 struct btrfs_file_extent_item *fi;
4337 fi = btrfs_item_ptr(leaf, slot,
4338 struct btrfs_file_extent_item);
4339 fi = (struct btrfs_file_extent_item *)(
4340 (unsigned long)fi - size_diff);
4342 if (btrfs_file_extent_type(leaf, fi) ==
4343 BTRFS_FILE_EXTENT_INLINE) {
4344 ptr = btrfs_item_ptr_offset(leaf, slot);
4345 memmove_extent_buffer(leaf, ptr,
4347 offsetof(struct btrfs_file_extent_item,
4352 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4353 data_end + size_diff, btrfs_leaf_data(leaf) +
4354 data_end, old_data_start - data_end);
4356 offset = btrfs_disk_key_offset(&disk_key);
4357 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4358 btrfs_set_item_key(leaf, &disk_key, slot);
4360 fixup_low_keys(trans, root, path, &disk_key, 1);
4363 item = btrfs_item_nr(leaf, slot);
4364 btrfs_set_item_size(leaf, item, new_size);
4365 btrfs_mark_buffer_dirty(leaf);
4367 if (btrfs_leaf_free_space(root, leaf) < 0) {
4368 btrfs_print_leaf(root, leaf);
4374 * make the item pointed to by the path bigger, data_size is the new size.
4376 void btrfs_extend_item(struct btrfs_trans_handle *trans,
4377 struct btrfs_root *root, struct btrfs_path *path,
4381 struct extent_buffer *leaf;
4382 struct btrfs_item *item;
4384 unsigned int data_end;
4385 unsigned int old_data;
4386 unsigned int old_size;
4388 struct btrfs_map_token token;
4390 btrfs_init_map_token(&token);
4392 leaf = path->nodes[0];
4394 nritems = btrfs_header_nritems(leaf);
4395 data_end = leaf_data_end(root, leaf);
4397 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4398 btrfs_print_leaf(root, leaf);
4401 slot = path->slots[0];
4402 old_data = btrfs_item_end_nr(leaf, slot);
4405 if (slot >= nritems) {
4406 btrfs_print_leaf(root, leaf);
4407 printk(KERN_CRIT "slot %d too large, nritems %d\n",
4413 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4415 /* first correct the data pointers */
4416 for (i = slot; i < nritems; i++) {
4418 item = btrfs_item_nr(leaf, i);
4420 ioff = btrfs_token_item_offset(leaf, item, &token);
4421 btrfs_set_token_item_offset(leaf, item,
4422 ioff - data_size, &token);
4425 /* shift the data */
4426 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4427 data_end - data_size, btrfs_leaf_data(leaf) +
4428 data_end, old_data - data_end);
4430 data_end = old_data;
4431 old_size = btrfs_item_size_nr(leaf, slot);
4432 item = btrfs_item_nr(leaf, slot);
4433 btrfs_set_item_size(leaf, item, old_size + data_size);
4434 btrfs_mark_buffer_dirty(leaf);
4436 if (btrfs_leaf_free_space(root, leaf) < 0) {
4437 btrfs_print_leaf(root, leaf);
4443 * this is a helper for btrfs_insert_empty_items, the main goal here is
4444 * to save stack depth by doing the bulk of the work in a function
4445 * that doesn't call btrfs_search_slot
4447 void setup_items_for_insert(struct btrfs_trans_handle *trans,
4448 struct btrfs_root *root, struct btrfs_path *path,
4449 struct btrfs_key *cpu_key, u32 *data_size,
4450 u32 total_data, u32 total_size, int nr)
4452 struct btrfs_item *item;
4455 unsigned int data_end;
4456 struct btrfs_disk_key disk_key;
4457 struct extent_buffer *leaf;
4459 struct btrfs_map_token token;
4461 btrfs_init_map_token(&token);
4463 leaf = path->nodes[0];
4464 slot = path->slots[0];
4466 nritems = btrfs_header_nritems(leaf);
4467 data_end = leaf_data_end(root, leaf);
4469 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4470 btrfs_print_leaf(root, leaf);
4471 printk(KERN_CRIT "not enough freespace need %u have %d\n",
4472 total_size, btrfs_leaf_free_space(root, leaf));
4476 if (slot != nritems) {
4477 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4479 if (old_data < data_end) {
4480 btrfs_print_leaf(root, leaf);
4481 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4482 slot, old_data, data_end);
4486 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4488 /* first correct the data pointers */
4489 for (i = slot; i < nritems; i++) {
4492 item = btrfs_item_nr(leaf, i);
4493 ioff = btrfs_token_item_offset(leaf, item, &token);
4494 btrfs_set_token_item_offset(leaf, item,
4495 ioff - total_data, &token);
4497 /* shift the items */
4498 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4499 btrfs_item_nr_offset(slot),
4500 (nritems - slot) * sizeof(struct btrfs_item));
4502 /* shift the data */
4503 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4504 data_end - total_data, btrfs_leaf_data(leaf) +
4505 data_end, old_data - data_end);
4506 data_end = old_data;
4509 /* setup the item for the new data */
4510 for (i = 0; i < nr; i++) {
4511 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4512 btrfs_set_item_key(leaf, &disk_key, slot + i);
4513 item = btrfs_item_nr(leaf, slot + i);
4514 btrfs_set_token_item_offset(leaf, item,
4515 data_end - data_size[i], &token);
4516 data_end -= data_size[i];
4517 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4520 btrfs_set_header_nritems(leaf, nritems + nr);
4523 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4524 fixup_low_keys(trans, root, path, &disk_key, 1);
4526 btrfs_unlock_up_safe(path, 1);
4527 btrfs_mark_buffer_dirty(leaf);
4529 if (btrfs_leaf_free_space(root, leaf) < 0) {
4530 btrfs_print_leaf(root, leaf);
4536 * Given a key and some data, insert items into the tree.
4537 * This does all the path init required, making room in the tree if needed.
4539 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4540 struct btrfs_root *root,
4541 struct btrfs_path *path,
4542 struct btrfs_key *cpu_key, u32 *data_size,
4551 for (i = 0; i < nr; i++)
4552 total_data += data_size[i];
4554 total_size = total_data + (nr * sizeof(struct btrfs_item));
4555 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4561 slot = path->slots[0];
4564 setup_items_for_insert(trans, root, path, cpu_key, data_size,
4565 total_data, total_size, nr);
4570 * Given a key and some data, insert an item into the tree.
4571 * This does all the path init required, making room in the tree if needed.
4573 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4574 *root, struct btrfs_key *cpu_key, void *data, u32
4578 struct btrfs_path *path;
4579 struct extent_buffer *leaf;
4582 path = btrfs_alloc_path();
4585 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4587 leaf = path->nodes[0];
4588 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4589 write_extent_buffer(leaf, data, ptr, data_size);
4590 btrfs_mark_buffer_dirty(leaf);
4592 btrfs_free_path(path);
4597 * delete the pointer from a given node.
4599 * the tree should have been previously balanced so the deletion does not
4602 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4603 struct btrfs_path *path, int level, int slot,
4606 struct extent_buffer *parent = path->nodes[level];
4610 nritems = btrfs_header_nritems(parent);
4611 if (slot != nritems - 1) {
4612 if (tree_mod_log && level)
4613 tree_mod_log_eb_move(root->fs_info, parent, slot,
4614 slot + 1, nritems - slot - 1);
4615 memmove_extent_buffer(parent,
4616 btrfs_node_key_ptr_offset(slot),
4617 btrfs_node_key_ptr_offset(slot + 1),
4618 sizeof(struct btrfs_key_ptr) *
4619 (nritems - slot - 1));
4620 } else if (tree_mod_log && level) {
4621 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4622 MOD_LOG_KEY_REMOVE);
4627 btrfs_set_header_nritems(parent, nritems);
4628 if (nritems == 0 && parent == root->node) {
4629 BUG_ON(btrfs_header_level(root->node) != 1);
4630 /* just turn the root into a leaf and break */
4631 btrfs_set_header_level(root->node, 0);
4632 } else if (slot == 0) {
4633 struct btrfs_disk_key disk_key;
4635 btrfs_node_key(parent, &disk_key, 0);
4636 fixup_low_keys(trans, root, path, &disk_key, level + 1);
4638 btrfs_mark_buffer_dirty(parent);
4642 * a helper function to delete the leaf pointed to by path->slots[1] and
4645 * This deletes the pointer in path->nodes[1] and frees the leaf
4646 * block extent. zero is returned if it all worked out, < 0 otherwise.
4648 * The path must have already been setup for deleting the leaf, including
4649 * all the proper balancing. path->nodes[1] must be locked.
4651 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4652 struct btrfs_root *root,
4653 struct btrfs_path *path,
4654 struct extent_buffer *leaf)
4656 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4657 del_ptr(trans, root, path, 1, path->slots[1], 1);
4660 * btrfs_free_extent is expensive, we want to make sure we
4661 * aren't holding any locks when we call it
4663 btrfs_unlock_up_safe(path, 0);
4665 root_sub_used(root, leaf->len);
4667 extent_buffer_get(leaf);
4668 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4669 free_extent_buffer_stale(leaf);
4672 * delete the item at the leaf level in path. If that empties
4673 * the leaf, remove it from the tree
4675 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4676 struct btrfs_path *path, int slot, int nr)
4678 struct extent_buffer *leaf;
4679 struct btrfs_item *item;
4686 struct btrfs_map_token token;
4688 btrfs_init_map_token(&token);
4690 leaf = path->nodes[0];
4691 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4693 for (i = 0; i < nr; i++)
4694 dsize += btrfs_item_size_nr(leaf, slot + i);
4696 nritems = btrfs_header_nritems(leaf);
4698 if (slot + nr != nritems) {
4699 int data_end = leaf_data_end(root, leaf);
4701 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4703 btrfs_leaf_data(leaf) + data_end,
4704 last_off - data_end);
4706 for (i = slot + nr; i < nritems; i++) {
4709 item = btrfs_item_nr(leaf, i);
4710 ioff = btrfs_token_item_offset(leaf, item, &token);
4711 btrfs_set_token_item_offset(leaf, item,
4712 ioff + dsize, &token);
4715 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4716 btrfs_item_nr_offset(slot + nr),
4717 sizeof(struct btrfs_item) *
4718 (nritems - slot - nr));
4720 btrfs_set_header_nritems(leaf, nritems - nr);
4723 /* delete the leaf if we've emptied it */
4725 if (leaf == root->node) {
4726 btrfs_set_header_level(leaf, 0);
4728 btrfs_set_path_blocking(path);
4729 clean_tree_block(trans, root, leaf);
4730 btrfs_del_leaf(trans, root, path, leaf);
4733 int used = leaf_space_used(leaf, 0, nritems);
4735 struct btrfs_disk_key disk_key;
4737 btrfs_item_key(leaf, &disk_key, 0);
4738 fixup_low_keys(trans, root, path, &disk_key, 1);
4741 /* delete the leaf if it is mostly empty */
4742 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4743 /* push_leaf_left fixes the path.
4744 * make sure the path still points to our leaf
4745 * for possible call to del_ptr below
4747 slot = path->slots[1];
4748 extent_buffer_get(leaf);
4750 btrfs_set_path_blocking(path);
4751 wret = push_leaf_left(trans, root, path, 1, 1,
4753 if (wret < 0 && wret != -ENOSPC)
4756 if (path->nodes[0] == leaf &&
4757 btrfs_header_nritems(leaf)) {
4758 wret = push_leaf_right(trans, root, path, 1,
4760 if (wret < 0 && wret != -ENOSPC)
4764 if (btrfs_header_nritems(leaf) == 0) {
4765 path->slots[1] = slot;
4766 btrfs_del_leaf(trans, root, path, leaf);
4767 free_extent_buffer(leaf);
4770 /* if we're still in the path, make sure
4771 * we're dirty. Otherwise, one of the
4772 * push_leaf functions must have already
4773 * dirtied this buffer
4775 if (path->nodes[0] == leaf)
4776 btrfs_mark_buffer_dirty(leaf);
4777 free_extent_buffer(leaf);
4780 btrfs_mark_buffer_dirty(leaf);
4787 * search the tree again to find a leaf with lesser keys
4788 * returns 0 if it found something or 1 if there are no lesser leaves.
4789 * returns < 0 on io errors.
4791 * This may release the path, and so you may lose any locks held at the
4794 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4796 struct btrfs_key key;
4797 struct btrfs_disk_key found_key;
4800 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4804 else if (key.type > 0)
4806 else if (key.objectid > 0)
4811 btrfs_release_path(path);
4812 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4815 btrfs_item_key(path->nodes[0], &found_key, 0);
4816 ret = comp_keys(&found_key, &key);
4823 * A helper function to walk down the tree starting at min_key, and looking
4824 * for nodes or leaves that are either in cache or have a minimum
4825 * transaction id. This is used by the btree defrag code, and tree logging
4827 * This does not cow, but it does stuff the starting key it finds back
4828 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4829 * key and get a writable path.
4831 * This does lock as it descends, and path->keep_locks should be set
4832 * to 1 by the caller.
4834 * This honors path->lowest_level to prevent descent past a given level
4837 * min_trans indicates the oldest transaction that you are interested
4838 * in walking through. Any nodes or leaves older than min_trans are
4839 * skipped over (without reading them).
4841 * returns zero if something useful was found, < 0 on error and 1 if there
4842 * was nothing in the tree that matched the search criteria.
4844 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4845 struct btrfs_key *max_key,
4846 struct btrfs_path *path, int cache_only,
4849 struct extent_buffer *cur;
4850 struct btrfs_key found_key;
4857 WARN_ON(!path->keep_locks);
4859 cur = btrfs_read_lock_root_node(root);
4860 level = btrfs_header_level(cur);
4861 WARN_ON(path->nodes[level]);
4862 path->nodes[level] = cur;
4863 path->locks[level] = BTRFS_READ_LOCK;
4865 if (btrfs_header_generation(cur) < min_trans) {
4870 nritems = btrfs_header_nritems(cur);
4871 level = btrfs_header_level(cur);
4872 sret = bin_search(cur, min_key, level, &slot);
4874 /* at the lowest level, we're done, setup the path and exit */
4875 if (level == path->lowest_level) {
4876 if (slot >= nritems)
4879 path->slots[level] = slot;
4880 btrfs_item_key_to_cpu(cur, &found_key, slot);
4883 if (sret && slot > 0)
4886 * check this node pointer against the cache_only and
4887 * min_trans parameters. If it isn't in cache or is too
4888 * old, skip to the next one.
4890 while (slot < nritems) {
4893 struct extent_buffer *tmp;
4894 struct btrfs_disk_key disk_key;
4896 blockptr = btrfs_node_blockptr(cur, slot);
4897 gen = btrfs_node_ptr_generation(cur, slot);
4898 if (gen < min_trans) {
4906 btrfs_node_key(cur, &disk_key, slot);
4907 if (comp_keys(&disk_key, max_key) >= 0) {
4913 tmp = btrfs_find_tree_block(root, blockptr,
4914 btrfs_level_size(root, level - 1));
4916 if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
4917 free_extent_buffer(tmp);
4921 free_extent_buffer(tmp);
4926 * we didn't find a candidate key in this node, walk forward
4927 * and find another one
4929 if (slot >= nritems) {
4930 path->slots[level] = slot;
4931 btrfs_set_path_blocking(path);
4932 sret = btrfs_find_next_key(root, path, min_key, level,
4933 cache_only, min_trans);
4935 btrfs_release_path(path);
4941 /* save our key for returning back */
4942 btrfs_node_key_to_cpu(cur, &found_key, slot);
4943 path->slots[level] = slot;
4944 if (level == path->lowest_level) {
4946 unlock_up(path, level, 1, 0, NULL);
4949 btrfs_set_path_blocking(path);
4950 cur = read_node_slot(root, cur, slot);
4951 BUG_ON(!cur); /* -ENOMEM */
4953 btrfs_tree_read_lock(cur);
4955 path->locks[level - 1] = BTRFS_READ_LOCK;
4956 path->nodes[level - 1] = cur;
4957 unlock_up(path, level, 1, 0, NULL);
4958 btrfs_clear_path_blocking(path, NULL, 0);
4962 memcpy(min_key, &found_key, sizeof(found_key));
4963 btrfs_set_path_blocking(path);
4967 static void tree_move_down(struct btrfs_root *root,
4968 struct btrfs_path *path,
4969 int *level, int root_level)
4971 BUG_ON(*level == 0);
4972 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
4973 path->slots[*level]);
4974 path->slots[*level - 1] = 0;
4978 static int tree_move_next_or_upnext(struct btrfs_root *root,
4979 struct btrfs_path *path,
4980 int *level, int root_level)
4984 nritems = btrfs_header_nritems(path->nodes[*level]);
4986 path->slots[*level]++;
4988 while (path->slots[*level] >= nritems) {
4989 if (*level == root_level)
4993 path->slots[*level] = 0;
4994 free_extent_buffer(path->nodes[*level]);
4995 path->nodes[*level] = NULL;
4997 path->slots[*level]++;
4999 nritems = btrfs_header_nritems(path->nodes[*level]);
5006 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5009 static int tree_advance(struct btrfs_root *root,
5010 struct btrfs_path *path,
5011 int *level, int root_level,
5013 struct btrfs_key *key)
5017 if (*level == 0 || !allow_down) {
5018 ret = tree_move_next_or_upnext(root, path, level, root_level);
5020 tree_move_down(root, path, level, root_level);
5025 btrfs_item_key_to_cpu(path->nodes[*level], key,
5026 path->slots[*level]);
5028 btrfs_node_key_to_cpu(path->nodes[*level], key,
5029 path->slots[*level]);
5034 static int tree_compare_item(struct btrfs_root *left_root,
5035 struct btrfs_path *left_path,
5036 struct btrfs_path *right_path,
5041 unsigned long off1, off2;
5043 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5044 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5048 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5049 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5050 right_path->slots[0]);
5052 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5054 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5061 #define ADVANCE_ONLY_NEXT -1
5064 * This function compares two trees and calls the provided callback for
5065 * every changed/new/deleted item it finds.
5066 * If shared tree blocks are encountered, whole subtrees are skipped, making
5067 * the compare pretty fast on snapshotted subvolumes.
5069 * This currently works on commit roots only. As commit roots are read only,
5070 * we don't do any locking. The commit roots are protected with transactions.
5071 * Transactions are ended and rejoined when a commit is tried in between.
5073 * This function checks for modifications done to the trees while comparing.
5074 * If it detects a change, it aborts immediately.
5076 int btrfs_compare_trees(struct btrfs_root *left_root,
5077 struct btrfs_root *right_root,
5078 btrfs_changed_cb_t changed_cb, void *ctx)
5082 struct btrfs_trans_handle *trans = NULL;
5083 struct btrfs_path *left_path = NULL;
5084 struct btrfs_path *right_path = NULL;
5085 struct btrfs_key left_key;
5086 struct btrfs_key right_key;
5087 char *tmp_buf = NULL;
5088 int left_root_level;
5089 int right_root_level;
5092 int left_end_reached;
5093 int right_end_reached;
5098 u64 left_start_ctransid;
5099 u64 right_start_ctransid;
5102 left_path = btrfs_alloc_path();
5107 right_path = btrfs_alloc_path();
5113 tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
5119 left_path->search_commit_root = 1;
5120 left_path->skip_locking = 1;
5121 right_path->search_commit_root = 1;
5122 right_path->skip_locking = 1;
5124 spin_lock(&left_root->root_times_lock);
5125 left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
5126 spin_unlock(&left_root->root_times_lock);
5128 spin_lock(&right_root->root_times_lock);
5129 right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
5130 spin_unlock(&right_root->root_times_lock);
5132 trans = btrfs_join_transaction(left_root);
5133 if (IS_ERR(trans)) {
5134 ret = PTR_ERR(trans);
5140 * Strategy: Go to the first items of both trees. Then do
5142 * If both trees are at level 0
5143 * Compare keys of current items
5144 * If left < right treat left item as new, advance left tree
5146 * If left > right treat right item as deleted, advance right tree
5148 * If left == right do deep compare of items, treat as changed if
5149 * needed, advance both trees and repeat
5150 * If both trees are at the same level but not at level 0
5151 * Compare keys of current nodes/leafs
5152 * If left < right advance left tree and repeat
5153 * If left > right advance right tree and repeat
5154 * If left == right compare blockptrs of the next nodes/leafs
5155 * If they match advance both trees but stay at the same level
5157 * If they don't match advance both trees while allowing to go
5159 * If tree levels are different
5160 * Advance the tree that needs it and repeat
5162 * Advancing a tree means:
5163 * If we are at level 0, try to go to the next slot. If that's not
5164 * possible, go one level up and repeat. Stop when we found a level
5165 * where we could go to the next slot. We may at this point be on a
5168 * If we are not at level 0 and not on shared tree blocks, go one
5171 * If we are not at level 0 and on shared tree blocks, go one slot to
5172 * the right if possible or go up and right.
5175 left_level = btrfs_header_level(left_root->commit_root);
5176 left_root_level = left_level;
5177 left_path->nodes[left_level] = left_root->commit_root;
5178 extent_buffer_get(left_path->nodes[left_level]);
5180 right_level = btrfs_header_level(right_root->commit_root);
5181 right_root_level = right_level;
5182 right_path->nodes[right_level] = right_root->commit_root;
5183 extent_buffer_get(right_path->nodes[right_level]);
5185 if (left_level == 0)
5186 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5187 &left_key, left_path->slots[left_level]);
5189 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5190 &left_key, left_path->slots[left_level]);
5191 if (right_level == 0)
5192 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5193 &right_key, right_path->slots[right_level]);
5195 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5196 &right_key, right_path->slots[right_level]);
5198 left_end_reached = right_end_reached = 0;
5199 advance_left = advance_right = 0;
5203 * We need to make sure the transaction does not get committed
5204 * while we do anything on commit roots. This means, we need to
5205 * join and leave transactions for every item that we process.
5207 if (trans && btrfs_should_end_transaction(trans, left_root)) {
5208 btrfs_release_path(left_path);
5209 btrfs_release_path(right_path);
5211 ret = btrfs_end_transaction(trans, left_root);
5216 /* now rejoin the transaction */
5218 trans = btrfs_join_transaction(left_root);
5219 if (IS_ERR(trans)) {
5220 ret = PTR_ERR(trans);
5225 spin_lock(&left_root->root_times_lock);
5226 ctransid = btrfs_root_ctransid(&left_root->root_item);
5227 spin_unlock(&left_root->root_times_lock);
5228 if (ctransid != left_start_ctransid)
5229 left_start_ctransid = 0;
5231 spin_lock(&right_root->root_times_lock);
5232 ctransid = btrfs_root_ctransid(&right_root->root_item);
5233 spin_unlock(&right_root->root_times_lock);
5234 if (ctransid != right_start_ctransid)
5235 right_start_ctransid = 0;
5237 if (!left_start_ctransid || !right_start_ctransid) {
5238 WARN(1, KERN_WARNING
5239 "btrfs: btrfs_compare_tree detected "
5240 "a change in one of the trees while "
5241 "iterating. This is probably a "
5248 * the commit root may have changed, so start again
5251 left_path->lowest_level = left_level;
5252 right_path->lowest_level = right_level;
5253 ret = btrfs_search_slot(NULL, left_root,
5254 &left_key, left_path, 0, 0);
5257 ret = btrfs_search_slot(NULL, right_root,
5258 &right_key, right_path, 0, 0);
5263 if (advance_left && !left_end_reached) {
5264 ret = tree_advance(left_root, left_path, &left_level,
5266 advance_left != ADVANCE_ONLY_NEXT,
5269 left_end_reached = ADVANCE;
5272 if (advance_right && !right_end_reached) {
5273 ret = tree_advance(right_root, right_path, &right_level,
5275 advance_right != ADVANCE_ONLY_NEXT,
5278 right_end_reached = ADVANCE;
5282 if (left_end_reached && right_end_reached) {
5285 } else if (left_end_reached) {
5286 if (right_level == 0) {
5287 ret = changed_cb(left_root, right_root,
5288 left_path, right_path,
5290 BTRFS_COMPARE_TREE_DELETED,
5295 advance_right = ADVANCE;
5297 } else if (right_end_reached) {
5298 if (left_level == 0) {
5299 ret = changed_cb(left_root, right_root,
5300 left_path, right_path,
5302 BTRFS_COMPARE_TREE_NEW,
5307 advance_left = ADVANCE;
5311 if (left_level == 0 && right_level == 0) {
5312 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5314 ret = changed_cb(left_root, right_root,
5315 left_path, right_path,
5317 BTRFS_COMPARE_TREE_NEW,
5321 advance_left = ADVANCE;
5322 } else if (cmp > 0) {
5323 ret = changed_cb(left_root, right_root,
5324 left_path, right_path,
5326 BTRFS_COMPARE_TREE_DELETED,
5330 advance_right = ADVANCE;
5332 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5333 ret = tree_compare_item(left_root, left_path,
5334 right_path, tmp_buf);
5336 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5337 ret = changed_cb(left_root, right_root,
5338 left_path, right_path,
5340 BTRFS_COMPARE_TREE_CHANGED,
5345 advance_left = ADVANCE;
5346 advance_right = ADVANCE;
5348 } else if (left_level == right_level) {
5349 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5351 advance_left = ADVANCE;
5352 } else if (cmp > 0) {
5353 advance_right = ADVANCE;
5355 left_blockptr = btrfs_node_blockptr(
5356 left_path->nodes[left_level],
5357 left_path->slots[left_level]);
5358 right_blockptr = btrfs_node_blockptr(
5359 right_path->nodes[right_level],
5360 right_path->slots[right_level]);
5361 if (left_blockptr == right_blockptr) {
5363 * As we're on a shared block, don't
5364 * allow to go deeper.
5366 advance_left = ADVANCE_ONLY_NEXT;
5367 advance_right = ADVANCE_ONLY_NEXT;
5369 advance_left = ADVANCE;
5370 advance_right = ADVANCE;
5373 } else if (left_level < right_level) {
5374 advance_right = ADVANCE;
5376 advance_left = ADVANCE;
5381 btrfs_free_path(left_path);
5382 btrfs_free_path(right_path);
5387 ret = btrfs_end_transaction(trans, left_root);
5389 btrfs_end_transaction(trans, left_root);
5396 * this is similar to btrfs_next_leaf, but does not try to preserve
5397 * and fixup the path. It looks for and returns the next key in the
5398 * tree based on the current path and the cache_only and min_trans
5401 * 0 is returned if another key is found, < 0 if there are any errors
5402 * and 1 is returned if there are no higher keys in the tree
5404 * path->keep_locks should be set to 1 on the search made before
5405 * calling this function.
5407 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5408 struct btrfs_key *key, int level,
5409 int cache_only, u64 min_trans)
5412 struct extent_buffer *c;
5414 WARN_ON(!path->keep_locks);
5415 while (level < BTRFS_MAX_LEVEL) {
5416 if (!path->nodes[level])
5419 slot = path->slots[level] + 1;
5420 c = path->nodes[level];
5422 if (slot >= btrfs_header_nritems(c)) {
5425 struct btrfs_key cur_key;
5426 if (level + 1 >= BTRFS_MAX_LEVEL ||
5427 !path->nodes[level + 1])
5430 if (path->locks[level + 1]) {
5435 slot = btrfs_header_nritems(c) - 1;
5437 btrfs_item_key_to_cpu(c, &cur_key, slot);
5439 btrfs_node_key_to_cpu(c, &cur_key, slot);
5441 orig_lowest = path->lowest_level;
5442 btrfs_release_path(path);
5443 path->lowest_level = level;
5444 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5446 path->lowest_level = orig_lowest;
5450 c = path->nodes[level];
5451 slot = path->slots[level];
5458 btrfs_item_key_to_cpu(c, key, slot);
5460 u64 blockptr = btrfs_node_blockptr(c, slot);
5461 u64 gen = btrfs_node_ptr_generation(c, slot);
5464 struct extent_buffer *cur;
5465 cur = btrfs_find_tree_block(root, blockptr,
5466 btrfs_level_size(root, level - 1));
5468 btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
5471 free_extent_buffer(cur);
5474 free_extent_buffer(cur);
5476 if (gen < min_trans) {
5480 btrfs_node_key_to_cpu(c, key, slot);
5488 * search the tree again to find a leaf with greater keys
5489 * returns 0 if it found something or 1 if there are no greater leaves.
5490 * returns < 0 on io errors.
5492 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5494 return btrfs_next_old_leaf(root, path, 0);
5497 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5502 struct extent_buffer *c;
5503 struct extent_buffer *next;
5504 struct btrfs_key key;
5507 int old_spinning = path->leave_spinning;
5508 int next_rw_lock = 0;
5510 nritems = btrfs_header_nritems(path->nodes[0]);
5514 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5519 btrfs_release_path(path);
5521 path->keep_locks = 1;
5522 path->leave_spinning = 1;
5525 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5527 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5528 path->keep_locks = 0;
5533 nritems = btrfs_header_nritems(path->nodes[0]);
5535 * by releasing the path above we dropped all our locks. A balance
5536 * could have added more items next to the key that used to be
5537 * at the very end of the block. So, check again here and
5538 * advance the path if there are now more items available.
5540 if (nritems > 0 && path->slots[0] < nritems - 1) {
5547 while (level < BTRFS_MAX_LEVEL) {
5548 if (!path->nodes[level]) {
5553 slot = path->slots[level] + 1;
5554 c = path->nodes[level];
5555 if (slot >= btrfs_header_nritems(c)) {
5557 if (level == BTRFS_MAX_LEVEL) {
5565 btrfs_tree_unlock_rw(next, next_rw_lock);
5566 free_extent_buffer(next);
5570 next_rw_lock = path->locks[level];
5571 ret = read_block_for_search(NULL, root, path, &next, level,
5577 btrfs_release_path(path);
5581 if (!path->skip_locking) {
5582 ret = btrfs_try_tree_read_lock(next);
5583 if (!ret && time_seq) {
5585 * If we don't get the lock, we may be racing
5586 * with push_leaf_left, holding that lock while
5587 * itself waiting for the leaf we've currently
5588 * locked. To solve this situation, we give up
5589 * on our lock and cycle.
5591 free_extent_buffer(next);
5592 btrfs_release_path(path);
5597 btrfs_set_path_blocking(path);
5598 btrfs_tree_read_lock(next);
5599 btrfs_clear_path_blocking(path, next,
5602 next_rw_lock = BTRFS_READ_LOCK;
5606 path->slots[level] = slot;
5609 c = path->nodes[level];
5610 if (path->locks[level])
5611 btrfs_tree_unlock_rw(c, path->locks[level]);
5613 free_extent_buffer(c);
5614 path->nodes[level] = next;
5615 path->slots[level] = 0;
5616 if (!path->skip_locking)
5617 path->locks[level] = next_rw_lock;
5621 ret = read_block_for_search(NULL, root, path, &next, level,
5627 btrfs_release_path(path);
5631 if (!path->skip_locking) {
5632 ret = btrfs_try_tree_read_lock(next);
5634 btrfs_set_path_blocking(path);
5635 btrfs_tree_read_lock(next);
5636 btrfs_clear_path_blocking(path, next,
5639 next_rw_lock = BTRFS_READ_LOCK;
5644 unlock_up(path, 0, 1, 0, NULL);
5645 path->leave_spinning = old_spinning;
5647 btrfs_set_path_blocking(path);
5653 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5654 * searching until it gets past min_objectid or finds an item of 'type'
5656 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5658 int btrfs_previous_item(struct btrfs_root *root,
5659 struct btrfs_path *path, u64 min_objectid,
5662 struct btrfs_key found_key;
5663 struct extent_buffer *leaf;
5668 if (path->slots[0] == 0) {
5669 btrfs_set_path_blocking(path);
5670 ret = btrfs_prev_leaf(root, path);
5676 leaf = path->nodes[0];
5677 nritems = btrfs_header_nritems(leaf);
5680 if (path->slots[0] == nritems)
5683 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5684 if (found_key.objectid < min_objectid)
5686 if (found_key.type == type)
5688 if (found_key.objectid == min_objectid &&
5689 found_key.type < type)