2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include <linux/vmalloc.h>
25 #include "transaction.h"
26 #include "print-tree.h"
29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_path *path, int level);
31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
32 *root, struct btrfs_key *ins_key,
33 struct btrfs_path *path, int data_size, int extend);
34 static int push_node_left(struct btrfs_trans_handle *trans,
35 struct btrfs_root *root, struct extent_buffer *dst,
36 struct extent_buffer *src, int empty);
37 static int balance_node_right(struct btrfs_trans_handle *trans,
38 struct btrfs_root *root,
39 struct extent_buffer *dst_buf,
40 struct extent_buffer *src_buf);
41 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
43 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
44 struct extent_buffer *eb);
46 struct btrfs_path *btrfs_alloc_path(void)
48 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
52 * set all locked nodes in the path to blocking locks. This should
53 * be done before scheduling
55 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
58 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
59 if (!p->nodes[i] || !p->locks[i])
61 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
62 if (p->locks[i] == BTRFS_READ_LOCK)
63 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
64 else if (p->locks[i] == BTRFS_WRITE_LOCK)
65 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
70 * reset all the locked nodes in the patch to spinning locks.
72 * held is used to keep lockdep happy, when lockdep is enabled
73 * we set held to a blocking lock before we go around and
74 * retake all the spinlocks in the path. You can safely use NULL
77 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
78 struct extent_buffer *held, int held_rw)
83 btrfs_set_lock_blocking_rw(held, held_rw);
84 if (held_rw == BTRFS_WRITE_LOCK)
85 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
86 else if (held_rw == BTRFS_READ_LOCK)
87 held_rw = BTRFS_READ_LOCK_BLOCKING;
89 btrfs_set_path_blocking(p);
91 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
92 if (p->nodes[i] && p->locks[i]) {
93 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
94 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
95 p->locks[i] = BTRFS_WRITE_LOCK;
96 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
97 p->locks[i] = BTRFS_READ_LOCK;
102 btrfs_clear_lock_blocking_rw(held, held_rw);
105 /* this also releases the path */
106 void btrfs_free_path(struct btrfs_path *p)
110 btrfs_release_path(p);
111 kmem_cache_free(btrfs_path_cachep, p);
115 * path release drops references on the extent buffers in the path
116 * and it drops any locks held by this path
118 * It is safe to call this on paths that no locks or extent buffers held.
120 noinline void btrfs_release_path(struct btrfs_path *p)
124 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
129 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
132 free_extent_buffer(p->nodes[i]);
138 * safely gets a reference on the root node of a tree. A lock
139 * is not taken, so a concurrent writer may put a different node
140 * at the root of the tree. See btrfs_lock_root_node for the
143 * The extent buffer returned by this has a reference taken, so
144 * it won't disappear. It may stop being the root of the tree
145 * at any time because there are no locks held.
147 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
149 struct extent_buffer *eb;
153 eb = rcu_dereference(root->node);
156 * RCU really hurts here, we could free up the root node because
157 * it was COWed but we may not get the new root node yet so do
158 * the inc_not_zero dance and if it doesn't work then
159 * synchronize_rcu and try again.
161 if (atomic_inc_not_zero(&eb->refs)) {
171 /* loop around taking references on and locking the root node of the
172 * tree until you end up with a lock on the root. A locked buffer
173 * is returned, with a reference held.
175 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
177 struct extent_buffer *eb;
180 eb = btrfs_root_node(root);
182 if (eb == root->node)
184 btrfs_tree_unlock(eb);
185 free_extent_buffer(eb);
190 /* loop around taking references on and locking the root node of the
191 * tree until you end up with a lock on the root. A locked buffer
192 * is returned, with a reference held.
194 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
196 struct extent_buffer *eb;
199 eb = btrfs_root_node(root);
200 btrfs_tree_read_lock(eb);
201 if (eb == root->node)
203 btrfs_tree_read_unlock(eb);
204 free_extent_buffer(eb);
209 /* cowonly root (everything not a reference counted cow subvolume), just get
210 * put onto a simple dirty list. transaction.c walks this to make sure they
211 * get properly updated on disk.
213 static void add_root_to_dirty_list(struct btrfs_root *root)
215 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
216 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
219 spin_lock(&root->fs_info->trans_lock);
220 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
221 /* Want the extent tree to be the last on the list */
222 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
223 list_move_tail(&root->dirty_list,
224 &root->fs_info->dirty_cowonly_roots);
226 list_move(&root->dirty_list,
227 &root->fs_info->dirty_cowonly_roots);
229 spin_unlock(&root->fs_info->trans_lock);
233 * used by snapshot creation to make a copy of a root for a tree with
234 * a given objectid. The buffer with the new root node is returned in
235 * cow_ret, and this func returns zero on success or a negative error code.
237 int btrfs_copy_root(struct btrfs_trans_handle *trans,
238 struct btrfs_root *root,
239 struct extent_buffer *buf,
240 struct extent_buffer **cow_ret, u64 new_root_objectid)
242 struct extent_buffer *cow;
245 struct btrfs_disk_key disk_key;
247 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
248 trans->transid != root->fs_info->running_transaction->transid);
249 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
250 trans->transid != root->last_trans);
252 level = btrfs_header_level(buf);
254 btrfs_item_key(buf, &disk_key, 0);
256 btrfs_node_key(buf, &disk_key, 0);
258 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
259 &disk_key, level, buf->start, 0);
263 copy_extent_buffer(cow, buf, 0, 0, cow->len);
264 btrfs_set_header_bytenr(cow, cow->start);
265 btrfs_set_header_generation(cow, trans->transid);
266 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
267 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
268 BTRFS_HEADER_FLAG_RELOC);
269 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
270 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
272 btrfs_set_header_owner(cow, new_root_objectid);
274 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
277 WARN_ON(btrfs_header_generation(buf) > trans->transid);
278 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
279 ret = btrfs_inc_ref(trans, root, cow, 1);
281 ret = btrfs_inc_ref(trans, root, cow, 0);
286 btrfs_mark_buffer_dirty(cow);
295 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
296 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
298 MOD_LOG_ROOT_REPLACE,
301 struct tree_mod_move {
306 struct tree_mod_root {
311 struct tree_mod_elem {
317 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
320 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
323 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
324 struct btrfs_disk_key key;
327 /* this is used for op == MOD_LOG_MOVE_KEYS */
328 struct tree_mod_move move;
330 /* this is used for op == MOD_LOG_ROOT_REPLACE */
331 struct tree_mod_root old_root;
334 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
336 read_lock(&fs_info->tree_mod_log_lock);
339 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
341 read_unlock(&fs_info->tree_mod_log_lock);
344 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
346 write_lock(&fs_info->tree_mod_log_lock);
349 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
351 write_unlock(&fs_info->tree_mod_log_lock);
355 * Pull a new tree mod seq number for our operation.
357 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
359 return atomic64_inc_return(&fs_info->tree_mod_seq);
363 * This adds a new blocker to the tree mod log's blocker list if the @elem
364 * passed does not already have a sequence number set. So when a caller expects
365 * to record tree modifications, it should ensure to set elem->seq to zero
366 * before calling btrfs_get_tree_mod_seq.
367 * Returns a fresh, unused tree log modification sequence number, even if no new
370 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
371 struct seq_list *elem)
373 tree_mod_log_write_lock(fs_info);
374 spin_lock(&fs_info->tree_mod_seq_lock);
376 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
377 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
379 spin_unlock(&fs_info->tree_mod_seq_lock);
380 tree_mod_log_write_unlock(fs_info);
385 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
386 struct seq_list *elem)
388 struct rb_root *tm_root;
389 struct rb_node *node;
390 struct rb_node *next;
391 struct seq_list *cur_elem;
392 struct tree_mod_elem *tm;
393 u64 min_seq = (u64)-1;
394 u64 seq_putting = elem->seq;
399 spin_lock(&fs_info->tree_mod_seq_lock);
400 list_del(&elem->list);
403 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
404 if (cur_elem->seq < min_seq) {
405 if (seq_putting > cur_elem->seq) {
407 * blocker with lower sequence number exists, we
408 * cannot remove anything from the log
410 spin_unlock(&fs_info->tree_mod_seq_lock);
413 min_seq = cur_elem->seq;
416 spin_unlock(&fs_info->tree_mod_seq_lock);
419 * anything that's lower than the lowest existing (read: blocked)
420 * sequence number can be removed from the tree.
422 tree_mod_log_write_lock(fs_info);
423 tm_root = &fs_info->tree_mod_log;
424 for (node = rb_first(tm_root); node; node = next) {
425 next = rb_next(node);
426 tm = container_of(node, struct tree_mod_elem, node);
427 if (tm->seq > min_seq)
429 rb_erase(node, tm_root);
432 tree_mod_log_write_unlock(fs_info);
436 * key order of the log:
437 * node/leaf start address -> sequence
439 * The 'start address' is the logical address of the *new* root node
440 * for root replace operations, or the logical address of the affected
441 * block for all other operations.
443 * Note: must be called with write lock (tree_mod_log_write_lock).
446 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
448 struct rb_root *tm_root;
449 struct rb_node **new;
450 struct rb_node *parent = NULL;
451 struct tree_mod_elem *cur;
455 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
457 tm_root = &fs_info->tree_mod_log;
458 new = &tm_root->rb_node;
460 cur = container_of(*new, struct tree_mod_elem, node);
462 if (cur->logical < tm->logical)
463 new = &((*new)->rb_left);
464 else if (cur->logical > tm->logical)
465 new = &((*new)->rb_right);
466 else if (cur->seq < tm->seq)
467 new = &((*new)->rb_left);
468 else if (cur->seq > tm->seq)
469 new = &((*new)->rb_right);
474 rb_link_node(&tm->node, parent, new);
475 rb_insert_color(&tm->node, tm_root);
480 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
481 * returns zero with the tree_mod_log_lock acquired. The caller must hold
482 * this until all tree mod log insertions are recorded in the rb tree and then
483 * call tree_mod_log_write_unlock() to release.
485 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
486 struct extent_buffer *eb) {
488 if (list_empty(&(fs_info)->tree_mod_seq_list))
490 if (eb && btrfs_header_level(eb) == 0)
493 tree_mod_log_write_lock(fs_info);
494 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
495 tree_mod_log_write_unlock(fs_info);
502 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
503 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
504 struct extent_buffer *eb)
507 if (list_empty(&(fs_info)->tree_mod_seq_list))
509 if (eb && btrfs_header_level(eb) == 0)
515 static struct tree_mod_elem *
516 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
517 enum mod_log_op op, gfp_t flags)
519 struct tree_mod_elem *tm;
521 tm = kzalloc(sizeof(*tm), flags);
525 tm->logical = eb->start;
526 if (op != MOD_LOG_KEY_ADD) {
527 btrfs_node_key(eb, &tm->key, slot);
528 tm->blockptr = btrfs_node_blockptr(eb, slot);
532 tm->generation = btrfs_node_ptr_generation(eb, slot);
533 RB_CLEAR_NODE(&tm->node);
539 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
540 struct extent_buffer *eb, int slot,
541 enum mod_log_op op, gfp_t flags)
543 struct tree_mod_elem *tm;
546 if (!tree_mod_need_log(fs_info, eb))
549 tm = alloc_tree_mod_elem(eb, slot, op, flags);
553 if (tree_mod_dont_log(fs_info, eb)) {
558 ret = __tree_mod_log_insert(fs_info, tm);
559 tree_mod_log_write_unlock(fs_info);
567 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
568 struct extent_buffer *eb, int dst_slot, int src_slot,
569 int nr_items, gfp_t flags)
571 struct tree_mod_elem *tm = NULL;
572 struct tree_mod_elem **tm_list = NULL;
577 if (!tree_mod_need_log(fs_info, eb))
580 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
584 tm = kzalloc(sizeof(*tm), flags);
590 tm->logical = eb->start;
592 tm->move.dst_slot = dst_slot;
593 tm->move.nr_items = nr_items;
594 tm->op = MOD_LOG_MOVE_KEYS;
596 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
597 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
598 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
605 if (tree_mod_dont_log(fs_info, eb))
610 * When we override something during the move, we log these removals.
611 * This can only happen when we move towards the beginning of the
612 * buffer, i.e. dst_slot < src_slot.
614 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
615 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
620 ret = __tree_mod_log_insert(fs_info, tm);
623 tree_mod_log_write_unlock(fs_info);
628 for (i = 0; i < nr_items; i++) {
629 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
630 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
634 tree_mod_log_write_unlock(fs_info);
642 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
643 struct tree_mod_elem **tm_list,
649 for (i = nritems - 1; i >= 0; i--) {
650 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
652 for (j = nritems - 1; j > i; j--)
653 rb_erase(&tm_list[j]->node,
654 &fs_info->tree_mod_log);
663 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
664 struct extent_buffer *old_root,
665 struct extent_buffer *new_root, gfp_t flags,
668 struct tree_mod_elem *tm = NULL;
669 struct tree_mod_elem **tm_list = NULL;
674 if (!tree_mod_need_log(fs_info, NULL))
677 if (log_removal && btrfs_header_level(old_root) > 0) {
678 nritems = btrfs_header_nritems(old_root);
679 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
685 for (i = 0; i < nritems; i++) {
686 tm_list[i] = alloc_tree_mod_elem(old_root, i,
687 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
695 tm = kzalloc(sizeof(*tm), flags);
701 tm->logical = new_root->start;
702 tm->old_root.logical = old_root->start;
703 tm->old_root.level = btrfs_header_level(old_root);
704 tm->generation = btrfs_header_generation(old_root);
705 tm->op = MOD_LOG_ROOT_REPLACE;
707 if (tree_mod_dont_log(fs_info, NULL))
711 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
713 ret = __tree_mod_log_insert(fs_info, tm);
715 tree_mod_log_write_unlock(fs_info);
724 for (i = 0; i < nritems; i++)
733 static struct tree_mod_elem *
734 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
737 struct rb_root *tm_root;
738 struct rb_node *node;
739 struct tree_mod_elem *cur = NULL;
740 struct tree_mod_elem *found = NULL;
742 tree_mod_log_read_lock(fs_info);
743 tm_root = &fs_info->tree_mod_log;
744 node = tm_root->rb_node;
746 cur = container_of(node, struct tree_mod_elem, node);
747 if (cur->logical < start) {
748 node = node->rb_left;
749 } else if (cur->logical > start) {
750 node = node->rb_right;
751 } else if (cur->seq < min_seq) {
752 node = node->rb_left;
753 } else if (!smallest) {
754 /* we want the node with the highest seq */
756 BUG_ON(found->seq > cur->seq);
758 node = node->rb_left;
759 } else if (cur->seq > min_seq) {
760 /* we want the node with the smallest seq */
762 BUG_ON(found->seq < cur->seq);
764 node = node->rb_right;
770 tree_mod_log_read_unlock(fs_info);
776 * this returns the element from the log with the smallest time sequence
777 * value that's in the log (the oldest log item). any element with a time
778 * sequence lower than min_seq will be ignored.
780 static struct tree_mod_elem *
781 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
784 return __tree_mod_log_search(fs_info, start, min_seq, 1);
788 * this returns the element from the log with the largest time sequence
789 * value that's in the log (the most recent log item). any element with
790 * a time sequence lower than min_seq will be ignored.
792 static struct tree_mod_elem *
793 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
795 return __tree_mod_log_search(fs_info, start, min_seq, 0);
799 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
800 struct extent_buffer *src, unsigned long dst_offset,
801 unsigned long src_offset, int nr_items)
804 struct tree_mod_elem **tm_list = NULL;
805 struct tree_mod_elem **tm_list_add, **tm_list_rem;
809 if (!tree_mod_need_log(fs_info, NULL))
812 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
815 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
820 tm_list_add = tm_list;
821 tm_list_rem = tm_list + nr_items;
822 for (i = 0; i < nr_items; i++) {
823 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
824 MOD_LOG_KEY_REMOVE, GFP_NOFS);
825 if (!tm_list_rem[i]) {
830 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
831 MOD_LOG_KEY_ADD, GFP_NOFS);
832 if (!tm_list_add[i]) {
838 if (tree_mod_dont_log(fs_info, NULL))
842 for (i = 0; i < nr_items; i++) {
843 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
846 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
851 tree_mod_log_write_unlock(fs_info);
857 for (i = 0; i < nr_items * 2; i++) {
858 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
859 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
863 tree_mod_log_write_unlock(fs_info);
870 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
871 int dst_offset, int src_offset, int nr_items)
874 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
880 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
881 struct extent_buffer *eb, int slot, int atomic)
885 ret = tree_mod_log_insert_key(fs_info, eb, slot,
887 atomic ? GFP_ATOMIC : GFP_NOFS);
892 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
894 struct tree_mod_elem **tm_list = NULL;
899 if (btrfs_header_level(eb) == 0)
902 if (!tree_mod_need_log(fs_info, NULL))
905 nritems = btrfs_header_nritems(eb);
906 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
910 for (i = 0; i < nritems; i++) {
911 tm_list[i] = alloc_tree_mod_elem(eb, i,
912 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
919 if (tree_mod_dont_log(fs_info, eb))
922 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
923 tree_mod_log_write_unlock(fs_info);
931 for (i = 0; i < nritems; i++)
939 tree_mod_log_set_root_pointer(struct btrfs_root *root,
940 struct extent_buffer *new_root_node,
944 ret = tree_mod_log_insert_root(root->fs_info, root->node,
945 new_root_node, GFP_NOFS, log_removal);
950 * check if the tree block can be shared by multiple trees
952 int btrfs_block_can_be_shared(struct btrfs_root *root,
953 struct extent_buffer *buf)
956 * Tree blocks not in reference counted trees and tree roots
957 * are never shared. If a block was allocated after the last
958 * snapshot and the block was not allocated by tree relocation,
959 * we know the block is not shared.
961 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
962 buf != root->node && buf != root->commit_root &&
963 (btrfs_header_generation(buf) <=
964 btrfs_root_last_snapshot(&root->root_item) ||
965 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
967 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
968 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
969 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
975 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
976 struct btrfs_root *root,
977 struct extent_buffer *buf,
978 struct extent_buffer *cow,
988 * Backrefs update rules:
990 * Always use full backrefs for extent pointers in tree block
991 * allocated by tree relocation.
993 * If a shared tree block is no longer referenced by its owner
994 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
995 * use full backrefs for extent pointers in tree block.
997 * If a tree block is been relocating
998 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
999 * use full backrefs for extent pointers in tree block.
1000 * The reason for this is some operations (such as drop tree)
1001 * are only allowed for blocks use full backrefs.
1004 if (btrfs_block_can_be_shared(root, buf)) {
1005 ret = btrfs_lookup_extent_info(trans, root, buf->start,
1006 btrfs_header_level(buf), 1,
1012 btrfs_handle_fs_error(root->fs_info, ret, NULL);
1017 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1018 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1019 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1024 owner = btrfs_header_owner(buf);
1025 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1026 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1029 if ((owner == root->root_key.objectid ||
1030 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1031 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1032 ret = btrfs_inc_ref(trans, root, buf, 1);
1033 BUG_ON(ret); /* -ENOMEM */
1035 if (root->root_key.objectid ==
1036 BTRFS_TREE_RELOC_OBJECTID) {
1037 ret = btrfs_dec_ref(trans, root, buf, 0);
1038 BUG_ON(ret); /* -ENOMEM */
1039 ret = btrfs_inc_ref(trans, root, cow, 1);
1040 BUG_ON(ret); /* -ENOMEM */
1042 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1045 if (root->root_key.objectid ==
1046 BTRFS_TREE_RELOC_OBJECTID)
1047 ret = btrfs_inc_ref(trans, root, cow, 1);
1049 ret = btrfs_inc_ref(trans, root, cow, 0);
1050 BUG_ON(ret); /* -ENOMEM */
1052 if (new_flags != 0) {
1053 int level = btrfs_header_level(buf);
1055 ret = btrfs_set_disk_extent_flags(trans, root,
1058 new_flags, level, 0);
1063 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1064 if (root->root_key.objectid ==
1065 BTRFS_TREE_RELOC_OBJECTID)
1066 ret = btrfs_inc_ref(trans, root, cow, 1);
1068 ret = btrfs_inc_ref(trans, root, cow, 0);
1069 BUG_ON(ret); /* -ENOMEM */
1070 ret = btrfs_dec_ref(trans, root, buf, 1);
1071 BUG_ON(ret); /* -ENOMEM */
1073 clean_tree_block(trans, root->fs_info, buf);
1080 * does the dirty work in cow of a single block. The parent block (if
1081 * supplied) is updated to point to the new cow copy. The new buffer is marked
1082 * dirty and returned locked. If you modify the block it needs to be marked
1085 * search_start -- an allocation hint for the new block
1087 * empty_size -- a hint that you plan on doing more cow. This is the size in
1088 * bytes the allocator should try to find free next to the block it returns.
1089 * This is just a hint and may be ignored by the allocator.
1091 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1092 struct btrfs_root *root,
1093 struct extent_buffer *buf,
1094 struct extent_buffer *parent, int parent_slot,
1095 struct extent_buffer **cow_ret,
1096 u64 search_start, u64 empty_size)
1098 struct btrfs_disk_key disk_key;
1099 struct extent_buffer *cow;
1102 int unlock_orig = 0;
1105 if (*cow_ret == buf)
1108 btrfs_assert_tree_locked(buf);
1110 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1111 trans->transid != root->fs_info->running_transaction->transid);
1112 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1113 trans->transid != root->last_trans);
1115 level = btrfs_header_level(buf);
1118 btrfs_item_key(buf, &disk_key, 0);
1120 btrfs_node_key(buf, &disk_key, 0);
1122 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1124 parent_start = parent->start;
1130 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1131 root->root_key.objectid, &disk_key, level,
1132 search_start, empty_size);
1134 return PTR_ERR(cow);
1136 /* cow is set to blocking by btrfs_init_new_buffer */
1138 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1139 btrfs_set_header_bytenr(cow, cow->start);
1140 btrfs_set_header_generation(cow, trans->transid);
1141 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1142 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1143 BTRFS_HEADER_FLAG_RELOC);
1144 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1145 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1147 btrfs_set_header_owner(cow, root->root_key.objectid);
1149 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1152 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1154 btrfs_abort_transaction(trans, ret);
1158 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1159 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1161 btrfs_abort_transaction(trans, ret);
1166 if (buf == root->node) {
1167 WARN_ON(parent && parent != buf);
1168 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1169 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1170 parent_start = buf->start;
1174 extent_buffer_get(cow);
1175 tree_mod_log_set_root_pointer(root, cow, 1);
1176 rcu_assign_pointer(root->node, cow);
1178 btrfs_free_tree_block(trans, root, buf, parent_start,
1180 free_extent_buffer(buf);
1181 add_root_to_dirty_list(root);
1183 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1184 parent_start = parent->start;
1188 WARN_ON(trans->transid != btrfs_header_generation(parent));
1189 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1190 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1191 btrfs_set_node_blockptr(parent, parent_slot,
1193 btrfs_set_node_ptr_generation(parent, parent_slot,
1195 btrfs_mark_buffer_dirty(parent);
1197 ret = tree_mod_log_free_eb(root->fs_info, buf);
1199 btrfs_abort_transaction(trans, ret);
1203 btrfs_free_tree_block(trans, root, buf, parent_start,
1207 btrfs_tree_unlock(buf);
1208 free_extent_buffer_stale(buf);
1209 btrfs_mark_buffer_dirty(cow);
1215 * returns the logical address of the oldest predecessor of the given root.
1216 * entries older than time_seq are ignored.
1218 static struct tree_mod_elem *
1219 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1220 struct extent_buffer *eb_root, u64 time_seq)
1222 struct tree_mod_elem *tm;
1223 struct tree_mod_elem *found = NULL;
1224 u64 root_logical = eb_root->start;
1231 * the very last operation that's logged for a root is the
1232 * replacement operation (if it is replaced at all). this has
1233 * the logical address of the *new* root, making it the very
1234 * first operation that's logged for this root.
1237 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1242 * if there are no tree operation for the oldest root, we simply
1243 * return it. this should only happen if that (old) root is at
1250 * if there's an operation that's not a root replacement, we
1251 * found the oldest version of our root. normally, we'll find a
1252 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1254 if (tm->op != MOD_LOG_ROOT_REPLACE)
1258 root_logical = tm->old_root.logical;
1262 /* if there's no old root to return, return what we found instead */
1270 * tm is a pointer to the first operation to rewind within eb. then, all
1271 * previous operations will be rewound (until we reach something older than
1275 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1276 u64 time_seq, struct tree_mod_elem *first_tm)
1279 struct rb_node *next;
1280 struct tree_mod_elem *tm = first_tm;
1281 unsigned long o_dst;
1282 unsigned long o_src;
1283 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1285 n = btrfs_header_nritems(eb);
1286 tree_mod_log_read_lock(fs_info);
1287 while (tm && tm->seq >= time_seq) {
1289 * all the operations are recorded with the operator used for
1290 * the modification. as we're going backwards, we do the
1291 * opposite of each operation here.
1294 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1295 BUG_ON(tm->slot < n);
1297 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1298 case MOD_LOG_KEY_REMOVE:
1299 btrfs_set_node_key(eb, &tm->key, tm->slot);
1300 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1301 btrfs_set_node_ptr_generation(eb, tm->slot,
1305 case MOD_LOG_KEY_REPLACE:
1306 BUG_ON(tm->slot >= n);
1307 btrfs_set_node_key(eb, &tm->key, tm->slot);
1308 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1309 btrfs_set_node_ptr_generation(eb, tm->slot,
1312 case MOD_LOG_KEY_ADD:
1313 /* if a move operation is needed it's in the log */
1316 case MOD_LOG_MOVE_KEYS:
1317 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1318 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1319 memmove_extent_buffer(eb, o_dst, o_src,
1320 tm->move.nr_items * p_size);
1322 case MOD_LOG_ROOT_REPLACE:
1324 * this operation is special. for roots, this must be
1325 * handled explicitly before rewinding.
1326 * for non-roots, this operation may exist if the node
1327 * was a root: root A -> child B; then A gets empty and
1328 * B is promoted to the new root. in the mod log, we'll
1329 * have a root-replace operation for B, a tree block
1330 * that is no root. we simply ignore that operation.
1334 next = rb_next(&tm->node);
1337 tm = container_of(next, struct tree_mod_elem, node);
1338 if (tm->logical != first_tm->logical)
1341 tree_mod_log_read_unlock(fs_info);
1342 btrfs_set_header_nritems(eb, n);
1346 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1347 * is returned. If rewind operations happen, a fresh buffer is returned. The
1348 * returned buffer is always read-locked. If the returned buffer is not the
1349 * input buffer, the lock on the input buffer is released and the input buffer
1350 * is freed (its refcount is decremented).
1352 static struct extent_buffer *
1353 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1354 struct extent_buffer *eb, u64 time_seq)
1356 struct extent_buffer *eb_rewin;
1357 struct tree_mod_elem *tm;
1362 if (btrfs_header_level(eb) == 0)
1365 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1369 btrfs_set_path_blocking(path);
1370 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1372 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1373 BUG_ON(tm->slot != 0);
1374 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start,
1377 btrfs_tree_read_unlock_blocking(eb);
1378 free_extent_buffer(eb);
1381 btrfs_set_header_bytenr(eb_rewin, eb->start);
1382 btrfs_set_header_backref_rev(eb_rewin,
1383 btrfs_header_backref_rev(eb));
1384 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1385 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1387 eb_rewin = btrfs_clone_extent_buffer(eb);
1389 btrfs_tree_read_unlock_blocking(eb);
1390 free_extent_buffer(eb);
1395 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1396 btrfs_tree_read_unlock_blocking(eb);
1397 free_extent_buffer(eb);
1399 extent_buffer_get(eb_rewin);
1400 btrfs_tree_read_lock(eb_rewin);
1401 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1402 WARN_ON(btrfs_header_nritems(eb_rewin) >
1403 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1409 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1410 * value. If there are no changes, the current root->root_node is returned. If
1411 * anything changed in between, there's a fresh buffer allocated on which the
1412 * rewind operations are done. In any case, the returned buffer is read locked.
1413 * Returns NULL on error (with no locks held).
1415 static inline struct extent_buffer *
1416 get_old_root(struct btrfs_root *root, u64 time_seq)
1418 struct tree_mod_elem *tm;
1419 struct extent_buffer *eb = NULL;
1420 struct extent_buffer *eb_root;
1421 struct extent_buffer *old;
1422 struct tree_mod_root *old_root = NULL;
1423 u64 old_generation = 0;
1426 eb_root = btrfs_read_lock_root_node(root);
1427 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1431 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1432 old_root = &tm->old_root;
1433 old_generation = tm->generation;
1434 logical = old_root->logical;
1436 logical = eb_root->start;
1439 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1440 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1441 btrfs_tree_read_unlock(eb_root);
1442 free_extent_buffer(eb_root);
1443 old = read_tree_block(root, logical, 0);
1444 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1446 free_extent_buffer(old);
1447 btrfs_warn(root->fs_info,
1448 "failed to read tree block %llu from get_old_root", logical);
1450 eb = btrfs_clone_extent_buffer(old);
1451 free_extent_buffer(old);
1453 } else if (old_root) {
1454 btrfs_tree_read_unlock(eb_root);
1455 free_extent_buffer(eb_root);
1456 eb = alloc_dummy_extent_buffer(root->fs_info, logical,
1459 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1460 eb = btrfs_clone_extent_buffer(eb_root);
1461 btrfs_tree_read_unlock_blocking(eb_root);
1462 free_extent_buffer(eb_root);
1467 extent_buffer_get(eb);
1468 btrfs_tree_read_lock(eb);
1470 btrfs_set_header_bytenr(eb, eb->start);
1471 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1472 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1473 btrfs_set_header_level(eb, old_root->level);
1474 btrfs_set_header_generation(eb, old_generation);
1477 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1479 WARN_ON(btrfs_header_level(eb) != 0);
1480 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1485 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1487 struct tree_mod_elem *tm;
1489 struct extent_buffer *eb_root = btrfs_root_node(root);
1491 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1492 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1493 level = tm->old_root.level;
1495 level = btrfs_header_level(eb_root);
1497 free_extent_buffer(eb_root);
1502 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1503 struct btrfs_root *root,
1504 struct extent_buffer *buf)
1506 if (btrfs_is_testing(root->fs_info))
1509 /* ensure we can see the force_cow */
1513 * We do not need to cow a block if
1514 * 1) this block is not created or changed in this transaction;
1515 * 2) this block does not belong to TREE_RELOC tree;
1516 * 3) the root is not forced COW.
1518 * What is forced COW:
1519 * when we create snapshot during committing the transaction,
1520 * after we've finished coping src root, we must COW the shared
1521 * block to ensure the metadata consistency.
1523 if (btrfs_header_generation(buf) == trans->transid &&
1524 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1525 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1526 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1527 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1533 * cows a single block, see __btrfs_cow_block for the real work.
1534 * This version of it has extra checks so that a block isn't COWed more than
1535 * once per transaction, as long as it hasn't been written yet
1537 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1538 struct btrfs_root *root, struct extent_buffer *buf,
1539 struct extent_buffer *parent, int parent_slot,
1540 struct extent_buffer **cow_ret)
1545 if (trans->transaction != root->fs_info->running_transaction)
1546 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1548 root->fs_info->running_transaction->transid);
1550 if (trans->transid != root->fs_info->generation)
1551 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1552 trans->transid, root->fs_info->generation);
1554 if (!should_cow_block(trans, root, buf)) {
1555 trans->dirty = true;
1560 search_start = buf->start & ~((u64)SZ_1G - 1);
1563 btrfs_set_lock_blocking(parent);
1564 btrfs_set_lock_blocking(buf);
1566 ret = __btrfs_cow_block(trans, root, buf, parent,
1567 parent_slot, cow_ret, search_start, 0);
1569 trace_btrfs_cow_block(root, buf, *cow_ret);
1575 * helper function for defrag to decide if two blocks pointed to by a
1576 * node are actually close by
1578 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1580 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1582 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1588 * compare two keys in a memcmp fashion
1590 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1592 struct btrfs_key k1;
1594 btrfs_disk_key_to_cpu(&k1, disk);
1596 return btrfs_comp_cpu_keys(&k1, k2);
1600 * same as comp_keys only with two btrfs_key's
1602 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1604 if (k1->objectid > k2->objectid)
1606 if (k1->objectid < k2->objectid)
1608 if (k1->type > k2->type)
1610 if (k1->type < k2->type)
1612 if (k1->offset > k2->offset)
1614 if (k1->offset < k2->offset)
1620 * this is used by the defrag code to go through all the
1621 * leaves pointed to by a node and reallocate them so that
1622 * disk order is close to key order
1624 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1625 struct btrfs_root *root, struct extent_buffer *parent,
1626 int start_slot, u64 *last_ret,
1627 struct btrfs_key *progress)
1629 struct extent_buffer *cur;
1632 u64 search_start = *last_ret;
1642 int progress_passed = 0;
1643 struct btrfs_disk_key disk_key;
1645 parent_level = btrfs_header_level(parent);
1647 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1648 WARN_ON(trans->transid != root->fs_info->generation);
1650 parent_nritems = btrfs_header_nritems(parent);
1651 blocksize = root->nodesize;
1652 end_slot = parent_nritems - 1;
1654 if (parent_nritems <= 1)
1657 btrfs_set_lock_blocking(parent);
1659 for (i = start_slot; i <= end_slot; i++) {
1662 btrfs_node_key(parent, &disk_key, i);
1663 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1666 progress_passed = 1;
1667 blocknr = btrfs_node_blockptr(parent, i);
1668 gen = btrfs_node_ptr_generation(parent, i);
1669 if (last_block == 0)
1670 last_block = blocknr;
1673 other = btrfs_node_blockptr(parent, i - 1);
1674 close = close_blocks(blocknr, other, blocksize);
1676 if (!close && i < end_slot) {
1677 other = btrfs_node_blockptr(parent, i + 1);
1678 close = close_blocks(blocknr, other, blocksize);
1681 last_block = blocknr;
1685 cur = btrfs_find_tree_block(root->fs_info, blocknr);
1687 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1690 if (!cur || !uptodate) {
1692 cur = read_tree_block(root, blocknr, gen);
1694 return PTR_ERR(cur);
1695 } else if (!extent_buffer_uptodate(cur)) {
1696 free_extent_buffer(cur);
1699 } else if (!uptodate) {
1700 err = btrfs_read_buffer(cur, gen);
1702 free_extent_buffer(cur);
1707 if (search_start == 0)
1708 search_start = last_block;
1710 btrfs_tree_lock(cur);
1711 btrfs_set_lock_blocking(cur);
1712 err = __btrfs_cow_block(trans, root, cur, parent, i,
1715 (end_slot - i) * blocksize));
1717 btrfs_tree_unlock(cur);
1718 free_extent_buffer(cur);
1721 search_start = cur->start;
1722 last_block = cur->start;
1723 *last_ret = search_start;
1724 btrfs_tree_unlock(cur);
1725 free_extent_buffer(cur);
1731 * The leaf data grows from end-to-front in the node.
1732 * this returns the address of the start of the last item,
1733 * which is the stop of the leaf data stack
1735 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1736 struct extent_buffer *leaf)
1738 u32 nr = btrfs_header_nritems(leaf);
1740 return BTRFS_LEAF_DATA_SIZE(root);
1741 return btrfs_item_offset_nr(leaf, nr - 1);
1746 * search for key in the extent_buffer. The items start at offset p,
1747 * and they are item_size apart. There are 'max' items in p.
1749 * the slot in the array is returned via slot, and it points to
1750 * the place where you would insert key if it is not found in
1753 * slot may point to max if the key is bigger than all of the keys
1755 static noinline int generic_bin_search(struct extent_buffer *eb,
1757 int item_size, struct btrfs_key *key,
1764 struct btrfs_disk_key *tmp = NULL;
1765 struct btrfs_disk_key unaligned;
1766 unsigned long offset;
1768 unsigned long map_start = 0;
1769 unsigned long map_len = 0;
1773 btrfs_err(eb->fs_info,
1774 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1775 __func__, low, high, eb->start,
1776 btrfs_header_owner(eb), btrfs_header_level(eb));
1780 while (low < high) {
1781 mid = (low + high) / 2;
1782 offset = p + mid * item_size;
1784 if (!kaddr || offset < map_start ||
1785 (offset + sizeof(struct btrfs_disk_key)) >
1786 map_start + map_len) {
1788 err = map_private_extent_buffer(eb, offset,
1789 sizeof(struct btrfs_disk_key),
1790 &kaddr, &map_start, &map_len);
1793 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1795 } else if (err == 1) {
1796 read_extent_buffer(eb, &unaligned,
1797 offset, sizeof(unaligned));
1804 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1807 ret = comp_keys(tmp, key);
1823 * simple bin_search frontend that does the right thing for
1826 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1827 int level, int *slot)
1830 return generic_bin_search(eb,
1831 offsetof(struct btrfs_leaf, items),
1832 sizeof(struct btrfs_item),
1833 key, btrfs_header_nritems(eb),
1836 return generic_bin_search(eb,
1837 offsetof(struct btrfs_node, ptrs),
1838 sizeof(struct btrfs_key_ptr),
1839 key, btrfs_header_nritems(eb),
1843 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1844 int level, int *slot)
1846 return bin_search(eb, key, level, slot);
1849 static void root_add_used(struct btrfs_root *root, u32 size)
1851 spin_lock(&root->accounting_lock);
1852 btrfs_set_root_used(&root->root_item,
1853 btrfs_root_used(&root->root_item) + size);
1854 spin_unlock(&root->accounting_lock);
1857 static void root_sub_used(struct btrfs_root *root, u32 size)
1859 spin_lock(&root->accounting_lock);
1860 btrfs_set_root_used(&root->root_item,
1861 btrfs_root_used(&root->root_item) - size);
1862 spin_unlock(&root->accounting_lock);
1865 /* given a node and slot number, this reads the blocks it points to. The
1866 * extent buffer is returned with a reference taken (but unlocked).
1868 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1869 struct extent_buffer *parent, int slot)
1871 int level = btrfs_header_level(parent);
1872 struct extent_buffer *eb;
1874 if (slot < 0 || slot >= btrfs_header_nritems(parent))
1875 return ERR_PTR(-ENOENT);
1879 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1880 btrfs_node_ptr_generation(parent, slot));
1881 if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1882 free_extent_buffer(eb);
1890 * node level balancing, used to make sure nodes are in proper order for
1891 * item deletion. We balance from the top down, so we have to make sure
1892 * that a deletion won't leave an node completely empty later on.
1894 static noinline int balance_level(struct btrfs_trans_handle *trans,
1895 struct btrfs_root *root,
1896 struct btrfs_path *path, int level)
1898 struct extent_buffer *right = NULL;
1899 struct extent_buffer *mid;
1900 struct extent_buffer *left = NULL;
1901 struct extent_buffer *parent = NULL;
1905 int orig_slot = path->slots[level];
1911 mid = path->nodes[level];
1913 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1914 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1915 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1917 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1919 if (level < BTRFS_MAX_LEVEL - 1) {
1920 parent = path->nodes[level + 1];
1921 pslot = path->slots[level + 1];
1925 * deal with the case where there is only one pointer in the root
1926 * by promoting the node below to a root
1929 struct extent_buffer *child;
1931 if (btrfs_header_nritems(mid) != 1)
1934 /* promote the child to a root */
1935 child = read_node_slot(root, mid, 0);
1936 if (IS_ERR(child)) {
1937 ret = PTR_ERR(child);
1938 btrfs_handle_fs_error(root->fs_info, ret, NULL);
1942 btrfs_tree_lock(child);
1943 btrfs_set_lock_blocking(child);
1944 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1946 btrfs_tree_unlock(child);
1947 free_extent_buffer(child);
1951 tree_mod_log_set_root_pointer(root, child, 1);
1952 rcu_assign_pointer(root->node, child);
1954 add_root_to_dirty_list(root);
1955 btrfs_tree_unlock(child);
1957 path->locks[level] = 0;
1958 path->nodes[level] = NULL;
1959 clean_tree_block(trans, root->fs_info, mid);
1960 btrfs_tree_unlock(mid);
1961 /* once for the path */
1962 free_extent_buffer(mid);
1964 root_sub_used(root, mid->len);
1965 btrfs_free_tree_block(trans, root, mid, 0, 1);
1966 /* once for the root ptr */
1967 free_extent_buffer_stale(mid);
1970 if (btrfs_header_nritems(mid) >
1971 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1974 left = read_node_slot(root, parent, pslot - 1);
1979 btrfs_tree_lock(left);
1980 btrfs_set_lock_blocking(left);
1981 wret = btrfs_cow_block(trans, root, left,
1982 parent, pslot - 1, &left);
1989 right = read_node_slot(root, parent, pslot + 1);
1994 btrfs_tree_lock(right);
1995 btrfs_set_lock_blocking(right);
1996 wret = btrfs_cow_block(trans, root, right,
1997 parent, pslot + 1, &right);
2004 /* first, try to make some room in the middle buffer */
2006 orig_slot += btrfs_header_nritems(left);
2007 wret = push_node_left(trans, root, left, mid, 1);
2013 * then try to empty the right most buffer into the middle
2016 wret = push_node_left(trans, root, mid, right, 1);
2017 if (wret < 0 && wret != -ENOSPC)
2019 if (btrfs_header_nritems(right) == 0) {
2020 clean_tree_block(trans, root->fs_info, right);
2021 btrfs_tree_unlock(right);
2022 del_ptr(root, path, level + 1, pslot + 1);
2023 root_sub_used(root, right->len);
2024 btrfs_free_tree_block(trans, root, right, 0, 1);
2025 free_extent_buffer_stale(right);
2028 struct btrfs_disk_key right_key;
2029 btrfs_node_key(right, &right_key, 0);
2030 tree_mod_log_set_node_key(root->fs_info, parent,
2032 btrfs_set_node_key(parent, &right_key, pslot + 1);
2033 btrfs_mark_buffer_dirty(parent);
2036 if (btrfs_header_nritems(mid) == 1) {
2038 * we're not allowed to leave a node with one item in the
2039 * tree during a delete. A deletion from lower in the tree
2040 * could try to delete the only pointer in this node.
2041 * So, pull some keys from the left.
2042 * There has to be a left pointer at this point because
2043 * otherwise we would have pulled some pointers from the
2048 btrfs_handle_fs_error(root->fs_info, ret, NULL);
2051 wret = balance_node_right(trans, root, mid, left);
2057 wret = push_node_left(trans, root, left, mid, 1);
2063 if (btrfs_header_nritems(mid) == 0) {
2064 clean_tree_block(trans, root->fs_info, mid);
2065 btrfs_tree_unlock(mid);
2066 del_ptr(root, path, level + 1, pslot);
2067 root_sub_used(root, mid->len);
2068 btrfs_free_tree_block(trans, root, mid, 0, 1);
2069 free_extent_buffer_stale(mid);
2072 /* update the parent key to reflect our changes */
2073 struct btrfs_disk_key mid_key;
2074 btrfs_node_key(mid, &mid_key, 0);
2075 tree_mod_log_set_node_key(root->fs_info, parent,
2077 btrfs_set_node_key(parent, &mid_key, pslot);
2078 btrfs_mark_buffer_dirty(parent);
2081 /* update the path */
2083 if (btrfs_header_nritems(left) > orig_slot) {
2084 extent_buffer_get(left);
2085 /* left was locked after cow */
2086 path->nodes[level] = left;
2087 path->slots[level + 1] -= 1;
2088 path->slots[level] = orig_slot;
2090 btrfs_tree_unlock(mid);
2091 free_extent_buffer(mid);
2094 orig_slot -= btrfs_header_nritems(left);
2095 path->slots[level] = orig_slot;
2098 /* double check we haven't messed things up */
2100 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2104 btrfs_tree_unlock(right);
2105 free_extent_buffer(right);
2108 if (path->nodes[level] != left)
2109 btrfs_tree_unlock(left);
2110 free_extent_buffer(left);
2115 /* Node balancing for insertion. Here we only split or push nodes around
2116 * when they are completely full. This is also done top down, so we
2117 * have to be pessimistic.
2119 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2120 struct btrfs_root *root,
2121 struct btrfs_path *path, int level)
2123 struct extent_buffer *right = NULL;
2124 struct extent_buffer *mid;
2125 struct extent_buffer *left = NULL;
2126 struct extent_buffer *parent = NULL;
2130 int orig_slot = path->slots[level];
2135 mid = path->nodes[level];
2136 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2138 if (level < BTRFS_MAX_LEVEL - 1) {
2139 parent = path->nodes[level + 1];
2140 pslot = path->slots[level + 1];
2146 left = read_node_slot(root, parent, pslot - 1);
2150 /* first, try to make some room in the middle buffer */
2154 btrfs_tree_lock(left);
2155 btrfs_set_lock_blocking(left);
2157 left_nr = btrfs_header_nritems(left);
2158 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2161 ret = btrfs_cow_block(trans, root, left, parent,
2166 wret = push_node_left(trans, root,
2173 struct btrfs_disk_key disk_key;
2174 orig_slot += left_nr;
2175 btrfs_node_key(mid, &disk_key, 0);
2176 tree_mod_log_set_node_key(root->fs_info, parent,
2178 btrfs_set_node_key(parent, &disk_key, pslot);
2179 btrfs_mark_buffer_dirty(parent);
2180 if (btrfs_header_nritems(left) > orig_slot) {
2181 path->nodes[level] = left;
2182 path->slots[level + 1] -= 1;
2183 path->slots[level] = orig_slot;
2184 btrfs_tree_unlock(mid);
2185 free_extent_buffer(mid);
2188 btrfs_header_nritems(left);
2189 path->slots[level] = orig_slot;
2190 btrfs_tree_unlock(left);
2191 free_extent_buffer(left);
2195 btrfs_tree_unlock(left);
2196 free_extent_buffer(left);
2198 right = read_node_slot(root, parent, pslot + 1);
2203 * then try to empty the right most buffer into the middle
2208 btrfs_tree_lock(right);
2209 btrfs_set_lock_blocking(right);
2211 right_nr = btrfs_header_nritems(right);
2212 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2215 ret = btrfs_cow_block(trans, root, right,
2221 wret = balance_node_right(trans, root,
2228 struct btrfs_disk_key disk_key;
2230 btrfs_node_key(right, &disk_key, 0);
2231 tree_mod_log_set_node_key(root->fs_info, parent,
2233 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2234 btrfs_mark_buffer_dirty(parent);
2236 if (btrfs_header_nritems(mid) <= orig_slot) {
2237 path->nodes[level] = right;
2238 path->slots[level + 1] += 1;
2239 path->slots[level] = orig_slot -
2240 btrfs_header_nritems(mid);
2241 btrfs_tree_unlock(mid);
2242 free_extent_buffer(mid);
2244 btrfs_tree_unlock(right);
2245 free_extent_buffer(right);
2249 btrfs_tree_unlock(right);
2250 free_extent_buffer(right);
2256 * readahead one full node of leaves, finding things that are close
2257 * to the block in 'slot', and triggering ra on them.
2259 static void reada_for_search(struct btrfs_root *root,
2260 struct btrfs_path *path,
2261 int level, int slot, u64 objectid)
2263 struct extent_buffer *node;
2264 struct btrfs_disk_key disk_key;
2269 struct extent_buffer *eb;
2277 if (!path->nodes[level])
2280 node = path->nodes[level];
2282 search = btrfs_node_blockptr(node, slot);
2283 blocksize = root->nodesize;
2284 eb = btrfs_find_tree_block(root->fs_info, search);
2286 free_extent_buffer(eb);
2292 nritems = btrfs_header_nritems(node);
2296 if (path->reada == READA_BACK) {
2300 } else if (path->reada == READA_FORWARD) {
2305 if (path->reada == READA_BACK && objectid) {
2306 btrfs_node_key(node, &disk_key, nr);
2307 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2310 search = btrfs_node_blockptr(node, nr);
2311 if ((search <= target && target - search <= 65536) ||
2312 (search > target && search - target <= 65536)) {
2313 readahead_tree_block(root, search);
2317 if ((nread > 65536 || nscan > 32))
2322 static noinline void reada_for_balance(struct btrfs_root *root,
2323 struct btrfs_path *path, int level)
2327 struct extent_buffer *parent;
2328 struct extent_buffer *eb;
2333 parent = path->nodes[level + 1];
2337 nritems = btrfs_header_nritems(parent);
2338 slot = path->slots[level + 1];
2341 block1 = btrfs_node_blockptr(parent, slot - 1);
2342 gen = btrfs_node_ptr_generation(parent, slot - 1);
2343 eb = btrfs_find_tree_block(root->fs_info, block1);
2345 * if we get -eagain from btrfs_buffer_uptodate, we
2346 * don't want to return eagain here. That will loop
2349 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2351 free_extent_buffer(eb);
2353 if (slot + 1 < nritems) {
2354 block2 = btrfs_node_blockptr(parent, slot + 1);
2355 gen = btrfs_node_ptr_generation(parent, slot + 1);
2356 eb = btrfs_find_tree_block(root->fs_info, block2);
2357 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2359 free_extent_buffer(eb);
2363 readahead_tree_block(root, block1);
2365 readahead_tree_block(root, block2);
2370 * when we walk down the tree, it is usually safe to unlock the higher layers
2371 * in the tree. The exceptions are when our path goes through slot 0, because
2372 * operations on the tree might require changing key pointers higher up in the
2375 * callers might also have set path->keep_locks, which tells this code to keep
2376 * the lock if the path points to the last slot in the block. This is part of
2377 * walking through the tree, and selecting the next slot in the higher block.
2379 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2380 * if lowest_unlock is 1, level 0 won't be unlocked
2382 static noinline void unlock_up(struct btrfs_path *path, int level,
2383 int lowest_unlock, int min_write_lock_level,
2384 int *write_lock_level)
2387 int skip_level = level;
2389 struct extent_buffer *t;
2391 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2392 if (!path->nodes[i])
2394 if (!path->locks[i])
2396 if (!no_skips && path->slots[i] == 0) {
2400 if (!no_skips && path->keep_locks) {
2403 nritems = btrfs_header_nritems(t);
2404 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2409 if (skip_level < i && i >= lowest_unlock)
2413 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2414 btrfs_tree_unlock_rw(t, path->locks[i]);
2416 if (write_lock_level &&
2417 i > min_write_lock_level &&
2418 i <= *write_lock_level) {
2419 *write_lock_level = i - 1;
2426 * This releases any locks held in the path starting at level and
2427 * going all the way up to the root.
2429 * btrfs_search_slot will keep the lock held on higher nodes in a few
2430 * corner cases, such as COW of the block at slot zero in the node. This
2431 * ignores those rules, and it should only be called when there are no
2432 * more updates to be done higher up in the tree.
2434 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2438 if (path->keep_locks)
2441 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2442 if (!path->nodes[i])
2444 if (!path->locks[i])
2446 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2452 * helper function for btrfs_search_slot. The goal is to find a block
2453 * in cache without setting the path to blocking. If we find the block
2454 * we return zero and the path is unchanged.
2456 * If we can't find the block, we set the path blocking and do some
2457 * reada. -EAGAIN is returned and the search must be repeated.
2460 read_block_for_search(struct btrfs_trans_handle *trans,
2461 struct btrfs_root *root, struct btrfs_path *p,
2462 struct extent_buffer **eb_ret, int level, int slot,
2463 struct btrfs_key *key, u64 time_seq)
2467 struct extent_buffer *b = *eb_ret;
2468 struct extent_buffer *tmp;
2471 blocknr = btrfs_node_blockptr(b, slot);
2472 gen = btrfs_node_ptr_generation(b, slot);
2474 tmp = btrfs_find_tree_block(root->fs_info, blocknr);
2476 /* first we do an atomic uptodate check */
2477 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2482 /* the pages were up to date, but we failed
2483 * the generation number check. Do a full
2484 * read for the generation number that is correct.
2485 * We must do this without dropping locks so
2486 * we can trust our generation number
2488 btrfs_set_path_blocking(p);
2490 /* now we're allowed to do a blocking uptodate check */
2491 ret = btrfs_read_buffer(tmp, gen);
2496 free_extent_buffer(tmp);
2497 btrfs_release_path(p);
2502 * reduce lock contention at high levels
2503 * of the btree by dropping locks before
2504 * we read. Don't release the lock on the current
2505 * level because we need to walk this node to figure
2506 * out which blocks to read.
2508 btrfs_unlock_up_safe(p, level + 1);
2509 btrfs_set_path_blocking(p);
2511 free_extent_buffer(tmp);
2512 if (p->reada != READA_NONE)
2513 reada_for_search(root, p, level, slot, key->objectid);
2515 btrfs_release_path(p);
2518 tmp = read_tree_block(root, blocknr, 0);
2521 * If the read above didn't mark this buffer up to date,
2522 * it will never end up being up to date. Set ret to EIO now
2523 * and give up so that our caller doesn't loop forever
2526 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2528 free_extent_buffer(tmp);
2536 * helper function for btrfs_search_slot. This does all of the checks
2537 * for node-level blocks and does any balancing required based on
2540 * If no extra work was required, zero is returned. If we had to
2541 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2545 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2546 struct btrfs_root *root, struct btrfs_path *p,
2547 struct extent_buffer *b, int level, int ins_len,
2548 int *write_lock_level)
2551 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2552 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2555 if (*write_lock_level < level + 1) {
2556 *write_lock_level = level + 1;
2557 btrfs_release_path(p);
2561 btrfs_set_path_blocking(p);
2562 reada_for_balance(root, p, level);
2563 sret = split_node(trans, root, p, level);
2564 btrfs_clear_path_blocking(p, NULL, 0);
2571 b = p->nodes[level];
2572 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2573 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2576 if (*write_lock_level < level + 1) {
2577 *write_lock_level = level + 1;
2578 btrfs_release_path(p);
2582 btrfs_set_path_blocking(p);
2583 reada_for_balance(root, p, level);
2584 sret = balance_level(trans, root, p, level);
2585 btrfs_clear_path_blocking(p, NULL, 0);
2591 b = p->nodes[level];
2593 btrfs_release_path(p);
2596 BUG_ON(btrfs_header_nritems(b) == 1);
2606 static void key_search_validate(struct extent_buffer *b,
2607 struct btrfs_key *key,
2610 #ifdef CONFIG_BTRFS_ASSERT
2611 struct btrfs_disk_key disk_key;
2613 btrfs_cpu_key_to_disk(&disk_key, key);
2616 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2617 offsetof(struct btrfs_leaf, items[0].key),
2620 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2621 offsetof(struct btrfs_node, ptrs[0].key),
2626 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2627 int level, int *prev_cmp, int *slot)
2629 if (*prev_cmp != 0) {
2630 *prev_cmp = bin_search(b, key, level, slot);
2634 key_search_validate(b, key, level);
2640 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2641 u64 iobjectid, u64 ioff, u8 key_type,
2642 struct btrfs_key *found_key)
2645 struct btrfs_key key;
2646 struct extent_buffer *eb;
2651 key.type = key_type;
2652 key.objectid = iobjectid;
2655 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2659 eb = path->nodes[0];
2660 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2661 ret = btrfs_next_leaf(fs_root, path);
2664 eb = path->nodes[0];
2667 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2668 if (found_key->type != key.type ||
2669 found_key->objectid != key.objectid)
2676 * look for key in the tree. path is filled in with nodes along the way
2677 * if key is found, we return zero and you can find the item in the leaf
2678 * level of the path (level 0)
2680 * If the key isn't found, the path points to the slot where it should
2681 * be inserted, and 1 is returned. If there are other errors during the
2682 * search a negative error number is returned.
2684 * if ins_len > 0, nodes and leaves will be split as we walk down the
2685 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2688 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2689 *root, struct btrfs_key *key, struct btrfs_path *p, int
2692 struct extent_buffer *b;
2697 int lowest_unlock = 1;
2699 /* everything at write_lock_level or lower must be write locked */
2700 int write_lock_level = 0;
2701 u8 lowest_level = 0;
2702 int min_write_lock_level;
2705 lowest_level = p->lowest_level;
2706 WARN_ON(lowest_level && ins_len > 0);
2707 WARN_ON(p->nodes[0] != NULL);
2708 BUG_ON(!cow && ins_len);
2713 /* when we are removing items, we might have to go up to level
2714 * two as we update tree pointers Make sure we keep write
2715 * for those levels as well
2717 write_lock_level = 2;
2718 } else if (ins_len > 0) {
2720 * for inserting items, make sure we have a write lock on
2721 * level 1 so we can update keys
2723 write_lock_level = 1;
2727 write_lock_level = -1;
2729 if (cow && (p->keep_locks || p->lowest_level))
2730 write_lock_level = BTRFS_MAX_LEVEL;
2732 min_write_lock_level = write_lock_level;
2737 * we try very hard to do read locks on the root
2739 root_lock = BTRFS_READ_LOCK;
2741 if (p->search_commit_root) {
2743 * the commit roots are read only
2744 * so we always do read locks
2746 if (p->need_commit_sem)
2747 down_read(&root->fs_info->commit_root_sem);
2748 b = root->commit_root;
2749 extent_buffer_get(b);
2750 level = btrfs_header_level(b);
2751 if (p->need_commit_sem)
2752 up_read(&root->fs_info->commit_root_sem);
2753 if (!p->skip_locking)
2754 btrfs_tree_read_lock(b);
2756 if (p->skip_locking) {
2757 b = btrfs_root_node(root);
2758 level = btrfs_header_level(b);
2760 /* we don't know the level of the root node
2761 * until we actually have it read locked
2763 b = btrfs_read_lock_root_node(root);
2764 level = btrfs_header_level(b);
2765 if (level <= write_lock_level) {
2766 /* whoops, must trade for write lock */
2767 btrfs_tree_read_unlock(b);
2768 free_extent_buffer(b);
2769 b = btrfs_lock_root_node(root);
2770 root_lock = BTRFS_WRITE_LOCK;
2772 /* the level might have changed, check again */
2773 level = btrfs_header_level(b);
2777 p->nodes[level] = b;
2778 if (!p->skip_locking)
2779 p->locks[level] = root_lock;
2782 level = btrfs_header_level(b);
2785 * setup the path here so we can release it under lock
2786 * contention with the cow code
2790 * if we don't really need to cow this block
2791 * then we don't want to set the path blocking,
2792 * so we test it here
2794 if (!should_cow_block(trans, root, b)) {
2795 trans->dirty = true;
2800 * must have write locks on this node and the
2803 if (level > write_lock_level ||
2804 (level + 1 > write_lock_level &&
2805 level + 1 < BTRFS_MAX_LEVEL &&
2806 p->nodes[level + 1])) {
2807 write_lock_level = level + 1;
2808 btrfs_release_path(p);
2812 btrfs_set_path_blocking(p);
2813 err = btrfs_cow_block(trans, root, b,
2814 p->nodes[level + 1],
2815 p->slots[level + 1], &b);
2822 p->nodes[level] = b;
2823 btrfs_clear_path_blocking(p, NULL, 0);
2826 * we have a lock on b and as long as we aren't changing
2827 * the tree, there is no way to for the items in b to change.
2828 * It is safe to drop the lock on our parent before we
2829 * go through the expensive btree search on b.
2831 * If we're inserting or deleting (ins_len != 0), then we might
2832 * be changing slot zero, which may require changing the parent.
2833 * So, we can't drop the lock until after we know which slot
2834 * we're operating on.
2836 if (!ins_len && !p->keep_locks) {
2839 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2840 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2845 ret = key_search(b, key, level, &prev_cmp, &slot);
2851 if (ret && slot > 0) {
2855 p->slots[level] = slot;
2856 err = setup_nodes_for_search(trans, root, p, b, level,
2857 ins_len, &write_lock_level);
2864 b = p->nodes[level];
2865 slot = p->slots[level];
2868 * slot 0 is special, if we change the key
2869 * we have to update the parent pointer
2870 * which means we must have a write lock
2873 if (slot == 0 && ins_len &&
2874 write_lock_level < level + 1) {
2875 write_lock_level = level + 1;
2876 btrfs_release_path(p);
2880 unlock_up(p, level, lowest_unlock,
2881 min_write_lock_level, &write_lock_level);
2883 if (level == lowest_level) {
2889 err = read_block_for_search(trans, root, p,
2890 &b, level, slot, key, 0);
2898 if (!p->skip_locking) {
2899 level = btrfs_header_level(b);
2900 if (level <= write_lock_level) {
2901 err = btrfs_try_tree_write_lock(b);
2903 btrfs_set_path_blocking(p);
2905 btrfs_clear_path_blocking(p, b,
2908 p->locks[level] = BTRFS_WRITE_LOCK;
2910 err = btrfs_tree_read_lock_atomic(b);
2912 btrfs_set_path_blocking(p);
2913 btrfs_tree_read_lock(b);
2914 btrfs_clear_path_blocking(p, b,
2917 p->locks[level] = BTRFS_READ_LOCK;
2919 p->nodes[level] = b;
2922 p->slots[level] = slot;
2924 btrfs_leaf_free_space(root, b) < ins_len) {
2925 if (write_lock_level < 1) {
2926 write_lock_level = 1;
2927 btrfs_release_path(p);
2931 btrfs_set_path_blocking(p);
2932 err = split_leaf(trans, root, key,
2933 p, ins_len, ret == 0);
2934 btrfs_clear_path_blocking(p, NULL, 0);
2942 if (!p->search_for_split)
2943 unlock_up(p, level, lowest_unlock,
2944 min_write_lock_level, &write_lock_level);
2951 * we don't really know what they plan on doing with the path
2952 * from here on, so for now just mark it as blocking
2954 if (!p->leave_spinning)
2955 btrfs_set_path_blocking(p);
2956 if (ret < 0 && !p->skip_release_on_error)
2957 btrfs_release_path(p);
2962 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2963 * current state of the tree together with the operations recorded in the tree
2964 * modification log to search for the key in a previous version of this tree, as
2965 * denoted by the time_seq parameter.
2967 * Naturally, there is no support for insert, delete or cow operations.
2969 * The resulting path and return value will be set up as if we called
2970 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2972 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2973 struct btrfs_path *p, u64 time_seq)
2975 struct extent_buffer *b;
2980 int lowest_unlock = 1;
2981 u8 lowest_level = 0;
2984 lowest_level = p->lowest_level;
2985 WARN_ON(p->nodes[0] != NULL);
2987 if (p->search_commit_root) {
2989 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2993 b = get_old_root(root, time_seq);
2994 level = btrfs_header_level(b);
2995 p->locks[level] = BTRFS_READ_LOCK;
2998 level = btrfs_header_level(b);
2999 p->nodes[level] = b;
3000 btrfs_clear_path_blocking(p, NULL, 0);
3003 * we have a lock on b and as long as we aren't changing
3004 * the tree, there is no way to for the items in b to change.
3005 * It is safe to drop the lock on our parent before we
3006 * go through the expensive btree search on b.
3008 btrfs_unlock_up_safe(p, level + 1);
3011 * Since we can unwind ebs we want to do a real search every
3015 ret = key_search(b, key, level, &prev_cmp, &slot);
3019 if (ret && slot > 0) {
3023 p->slots[level] = slot;
3024 unlock_up(p, level, lowest_unlock, 0, NULL);
3026 if (level == lowest_level) {
3032 err = read_block_for_search(NULL, root, p, &b, level,
3033 slot, key, time_seq);
3041 level = btrfs_header_level(b);
3042 err = btrfs_tree_read_lock_atomic(b);
3044 btrfs_set_path_blocking(p);
3045 btrfs_tree_read_lock(b);
3046 btrfs_clear_path_blocking(p, b,
3049 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
3054 p->locks[level] = BTRFS_READ_LOCK;
3055 p->nodes[level] = b;
3057 p->slots[level] = slot;
3058 unlock_up(p, level, lowest_unlock, 0, NULL);
3064 if (!p->leave_spinning)
3065 btrfs_set_path_blocking(p);
3067 btrfs_release_path(p);
3073 * helper to use instead of search slot if no exact match is needed but
3074 * instead the next or previous item should be returned.
3075 * When find_higher is true, the next higher item is returned, the next lower
3077 * When return_any and find_higher are both true, and no higher item is found,
3078 * return the next lower instead.
3079 * When return_any is true and find_higher is false, and no lower item is found,
3080 * return the next higher instead.
3081 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3084 int btrfs_search_slot_for_read(struct btrfs_root *root,
3085 struct btrfs_key *key, struct btrfs_path *p,
3086 int find_higher, int return_any)
3089 struct extent_buffer *leaf;
3092 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3096 * a return value of 1 means the path is at the position where the
3097 * item should be inserted. Normally this is the next bigger item,
3098 * but in case the previous item is the last in a leaf, path points
3099 * to the first free slot in the previous leaf, i.e. at an invalid
3105 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3106 ret = btrfs_next_leaf(root, p);
3112 * no higher item found, return the next
3117 btrfs_release_path(p);
3121 if (p->slots[0] == 0) {
3122 ret = btrfs_prev_leaf(root, p);
3127 if (p->slots[0] == btrfs_header_nritems(leaf))
3134 * no lower item found, return the next
3139 btrfs_release_path(p);
3149 * adjust the pointers going up the tree, starting at level
3150 * making sure the right key of each node is points to 'key'.
3151 * This is used after shifting pointers to the left, so it stops
3152 * fixing up pointers when a given leaf/node is not in slot 0 of the
3156 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3157 struct btrfs_path *path,
3158 struct btrfs_disk_key *key, int level)
3161 struct extent_buffer *t;
3163 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3164 int tslot = path->slots[i];
3165 if (!path->nodes[i])
3168 tree_mod_log_set_node_key(fs_info, t, tslot, 1);
3169 btrfs_set_node_key(t, key, tslot);
3170 btrfs_mark_buffer_dirty(path->nodes[i]);
3179 * This function isn't completely safe. It's the caller's responsibility
3180 * that the new key won't break the order
3182 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3183 struct btrfs_path *path,
3184 struct btrfs_key *new_key)
3186 struct btrfs_disk_key disk_key;
3187 struct extent_buffer *eb;
3190 eb = path->nodes[0];
3191 slot = path->slots[0];
3193 btrfs_item_key(eb, &disk_key, slot - 1);
3194 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3196 if (slot < btrfs_header_nritems(eb) - 1) {
3197 btrfs_item_key(eb, &disk_key, slot + 1);
3198 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3201 btrfs_cpu_key_to_disk(&disk_key, new_key);
3202 btrfs_set_item_key(eb, &disk_key, slot);
3203 btrfs_mark_buffer_dirty(eb);
3205 fixup_low_keys(fs_info, path, &disk_key, 1);
3209 * try to push data from one node into the next node left in the
3212 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3213 * error, and > 0 if there was no room in the left hand block.
3215 static int push_node_left(struct btrfs_trans_handle *trans,
3216 struct btrfs_root *root, struct extent_buffer *dst,
3217 struct extent_buffer *src, int empty)
3224 src_nritems = btrfs_header_nritems(src);
3225 dst_nritems = btrfs_header_nritems(dst);
3226 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3227 WARN_ON(btrfs_header_generation(src) != trans->transid);
3228 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3230 if (!empty && src_nritems <= 8)
3233 if (push_items <= 0)
3237 push_items = min(src_nritems, push_items);
3238 if (push_items < src_nritems) {
3239 /* leave at least 8 pointers in the node if
3240 * we aren't going to empty it
3242 if (src_nritems - push_items < 8) {
3243 if (push_items <= 8)
3249 push_items = min(src_nritems - 8, push_items);
3251 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3254 btrfs_abort_transaction(trans, ret);
3257 copy_extent_buffer(dst, src,
3258 btrfs_node_key_ptr_offset(dst_nritems),
3259 btrfs_node_key_ptr_offset(0),
3260 push_items * sizeof(struct btrfs_key_ptr));
3262 if (push_items < src_nritems) {
3264 * don't call tree_mod_log_eb_move here, key removal was already
3265 * fully logged by tree_mod_log_eb_copy above.
3267 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3268 btrfs_node_key_ptr_offset(push_items),
3269 (src_nritems - push_items) *
3270 sizeof(struct btrfs_key_ptr));
3272 btrfs_set_header_nritems(src, src_nritems - push_items);
3273 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3274 btrfs_mark_buffer_dirty(src);
3275 btrfs_mark_buffer_dirty(dst);
3281 * try to push data from one node into the next node right in the
3284 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3285 * error, and > 0 if there was no room in the right hand block.
3287 * this will only push up to 1/2 the contents of the left node over
3289 static int balance_node_right(struct btrfs_trans_handle *trans,
3290 struct btrfs_root *root,
3291 struct extent_buffer *dst,
3292 struct extent_buffer *src)
3300 WARN_ON(btrfs_header_generation(src) != trans->transid);
3301 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3303 src_nritems = btrfs_header_nritems(src);
3304 dst_nritems = btrfs_header_nritems(dst);
3305 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3306 if (push_items <= 0)
3309 if (src_nritems < 4)
3312 max_push = src_nritems / 2 + 1;
3313 /* don't try to empty the node */
3314 if (max_push >= src_nritems)
3317 if (max_push < push_items)
3318 push_items = max_push;
3320 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3321 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3322 btrfs_node_key_ptr_offset(0),
3324 sizeof(struct btrfs_key_ptr));
3326 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3327 src_nritems - push_items, push_items);
3329 btrfs_abort_transaction(trans, ret);
3332 copy_extent_buffer(dst, src,
3333 btrfs_node_key_ptr_offset(0),
3334 btrfs_node_key_ptr_offset(src_nritems - push_items),
3335 push_items * sizeof(struct btrfs_key_ptr));
3337 btrfs_set_header_nritems(src, src_nritems - push_items);
3338 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3340 btrfs_mark_buffer_dirty(src);
3341 btrfs_mark_buffer_dirty(dst);
3347 * helper function to insert a new root level in the tree.
3348 * A new node is allocated, and a single item is inserted to
3349 * point to the existing root
3351 * returns zero on success or < 0 on failure.
3353 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3354 struct btrfs_root *root,
3355 struct btrfs_path *path, int level)
3358 struct extent_buffer *lower;
3359 struct extent_buffer *c;
3360 struct extent_buffer *old;
3361 struct btrfs_disk_key lower_key;
3363 BUG_ON(path->nodes[level]);
3364 BUG_ON(path->nodes[level-1] != root->node);
3366 lower = path->nodes[level-1];
3368 btrfs_item_key(lower, &lower_key, 0);
3370 btrfs_node_key(lower, &lower_key, 0);
3372 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3373 &lower_key, level, root->node->start, 0);
3377 root_add_used(root, root->nodesize);
3379 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3380 btrfs_set_header_nritems(c, 1);
3381 btrfs_set_header_level(c, level);
3382 btrfs_set_header_bytenr(c, c->start);
3383 btrfs_set_header_generation(c, trans->transid);
3384 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3385 btrfs_set_header_owner(c, root->root_key.objectid);
3387 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3390 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3391 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3393 btrfs_set_node_key(c, &lower_key, 0);
3394 btrfs_set_node_blockptr(c, 0, lower->start);
3395 lower_gen = btrfs_header_generation(lower);
3396 WARN_ON(lower_gen != trans->transid);
3398 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3400 btrfs_mark_buffer_dirty(c);
3403 tree_mod_log_set_root_pointer(root, c, 0);
3404 rcu_assign_pointer(root->node, c);
3406 /* the super has an extra ref to root->node */
3407 free_extent_buffer(old);
3409 add_root_to_dirty_list(root);
3410 extent_buffer_get(c);
3411 path->nodes[level] = c;
3412 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3413 path->slots[level] = 0;
3418 * worker function to insert a single pointer in a node.
3419 * the node should have enough room for the pointer already
3421 * slot and level indicate where you want the key to go, and
3422 * blocknr is the block the key points to.
3424 static void insert_ptr(struct btrfs_trans_handle *trans,
3425 struct btrfs_root *root, struct btrfs_path *path,
3426 struct btrfs_disk_key *key, u64 bytenr,
3427 int slot, int level)
3429 struct extent_buffer *lower;
3433 BUG_ON(!path->nodes[level]);
3434 btrfs_assert_tree_locked(path->nodes[level]);
3435 lower = path->nodes[level];
3436 nritems = btrfs_header_nritems(lower);
3437 BUG_ON(slot > nritems);
3438 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3439 if (slot != nritems) {
3441 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3442 slot, nritems - slot);
3443 memmove_extent_buffer(lower,
3444 btrfs_node_key_ptr_offset(slot + 1),
3445 btrfs_node_key_ptr_offset(slot),
3446 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3449 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3450 MOD_LOG_KEY_ADD, GFP_NOFS);
3453 btrfs_set_node_key(lower, key, slot);
3454 btrfs_set_node_blockptr(lower, slot, bytenr);
3455 WARN_ON(trans->transid == 0);
3456 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3457 btrfs_set_header_nritems(lower, nritems + 1);
3458 btrfs_mark_buffer_dirty(lower);
3462 * split the node at the specified level in path in two.
3463 * The path is corrected to point to the appropriate node after the split
3465 * Before splitting this tries to make some room in the node by pushing
3466 * left and right, if either one works, it returns right away.
3468 * returns 0 on success and < 0 on failure
3470 static noinline int split_node(struct btrfs_trans_handle *trans,
3471 struct btrfs_root *root,
3472 struct btrfs_path *path, int level)
3474 struct extent_buffer *c;
3475 struct extent_buffer *split;
3476 struct btrfs_disk_key disk_key;
3481 c = path->nodes[level];
3482 WARN_ON(btrfs_header_generation(c) != trans->transid);
3483 if (c == root->node) {
3485 * trying to split the root, lets make a new one
3487 * tree mod log: We don't log_removal old root in
3488 * insert_new_root, because that root buffer will be kept as a
3489 * normal node. We are going to log removal of half of the
3490 * elements below with tree_mod_log_eb_copy. We're holding a
3491 * tree lock on the buffer, which is why we cannot race with
3492 * other tree_mod_log users.
3494 ret = insert_new_root(trans, root, path, level + 1);
3498 ret = push_nodes_for_insert(trans, root, path, level);
3499 c = path->nodes[level];
3500 if (!ret && btrfs_header_nritems(c) <
3501 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3507 c_nritems = btrfs_header_nritems(c);
3508 mid = (c_nritems + 1) / 2;
3509 btrfs_node_key(c, &disk_key, mid);
3511 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3512 &disk_key, level, c->start, 0);
3514 return PTR_ERR(split);
3516 root_add_used(root, root->nodesize);
3518 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3519 btrfs_set_header_level(split, btrfs_header_level(c));
3520 btrfs_set_header_bytenr(split, split->start);
3521 btrfs_set_header_generation(split, trans->transid);
3522 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3523 btrfs_set_header_owner(split, root->root_key.objectid);
3524 write_extent_buffer(split, root->fs_info->fsid,
3525 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3526 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3527 btrfs_header_chunk_tree_uuid(split),
3530 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3531 mid, c_nritems - mid);
3533 btrfs_abort_transaction(trans, ret);
3536 copy_extent_buffer(split, c,
3537 btrfs_node_key_ptr_offset(0),
3538 btrfs_node_key_ptr_offset(mid),
3539 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3540 btrfs_set_header_nritems(split, c_nritems - mid);
3541 btrfs_set_header_nritems(c, mid);
3544 btrfs_mark_buffer_dirty(c);
3545 btrfs_mark_buffer_dirty(split);
3547 insert_ptr(trans, root, path, &disk_key, split->start,
3548 path->slots[level + 1] + 1, level + 1);
3550 if (path->slots[level] >= mid) {
3551 path->slots[level] -= mid;
3552 btrfs_tree_unlock(c);
3553 free_extent_buffer(c);
3554 path->nodes[level] = split;
3555 path->slots[level + 1] += 1;
3557 btrfs_tree_unlock(split);
3558 free_extent_buffer(split);
3564 * how many bytes are required to store the items in a leaf. start
3565 * and nr indicate which items in the leaf to check. This totals up the
3566 * space used both by the item structs and the item data
3568 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3570 struct btrfs_item *start_item;
3571 struct btrfs_item *end_item;
3572 struct btrfs_map_token token;
3574 int nritems = btrfs_header_nritems(l);
3575 int end = min(nritems, start + nr) - 1;
3579 btrfs_init_map_token(&token);
3580 start_item = btrfs_item_nr(start);
3581 end_item = btrfs_item_nr(end);
3582 data_len = btrfs_token_item_offset(l, start_item, &token) +
3583 btrfs_token_item_size(l, start_item, &token);
3584 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3585 data_len += sizeof(struct btrfs_item) * nr;
3586 WARN_ON(data_len < 0);
3591 * The space between the end of the leaf items and
3592 * the start of the leaf data. IOW, how much room
3593 * the leaf has left for both items and data
3595 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3596 struct extent_buffer *leaf)
3598 int nritems = btrfs_header_nritems(leaf);
3600 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3602 btrfs_crit(root->fs_info,
3603 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3604 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3605 leaf_space_used(leaf, 0, nritems), nritems);
3611 * min slot controls the lowest index we're willing to push to the
3612 * right. We'll push up to and including min_slot, but no lower
3614 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3615 struct btrfs_root *root,
3616 struct btrfs_path *path,
3617 int data_size, int empty,
3618 struct extent_buffer *right,
3619 int free_space, u32 left_nritems,
3622 struct extent_buffer *left = path->nodes[0];
3623 struct extent_buffer *upper = path->nodes[1];
3624 struct btrfs_map_token token;
3625 struct btrfs_disk_key disk_key;
3630 struct btrfs_item *item;
3636 btrfs_init_map_token(&token);
3641 nr = max_t(u32, 1, min_slot);
3643 if (path->slots[0] >= left_nritems)
3644 push_space += data_size;
3646 slot = path->slots[1];
3647 i = left_nritems - 1;
3649 item = btrfs_item_nr(i);
3651 if (!empty && push_items > 0) {
3652 if (path->slots[0] > i)
3654 if (path->slots[0] == i) {
3655 int space = btrfs_leaf_free_space(root, left);
3656 if (space + push_space * 2 > free_space)
3661 if (path->slots[0] == i)
3662 push_space += data_size;
3664 this_item_size = btrfs_item_size(left, item);
3665 if (this_item_size + sizeof(*item) + push_space > free_space)
3669 push_space += this_item_size + sizeof(*item);
3675 if (push_items == 0)
3678 WARN_ON(!empty && push_items == left_nritems);
3680 /* push left to right */
3681 right_nritems = btrfs_header_nritems(right);
3683 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3684 push_space -= leaf_data_end(root, left);
3686 /* make room in the right data area */
3687 data_end = leaf_data_end(root, right);
3688 memmove_extent_buffer(right,
3689 btrfs_leaf_data(right) + data_end - push_space,
3690 btrfs_leaf_data(right) + data_end,
3691 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3693 /* copy from the left data area */
3694 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3695 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3696 btrfs_leaf_data(left) + leaf_data_end(root, left),
3699 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3700 btrfs_item_nr_offset(0),
3701 right_nritems * sizeof(struct btrfs_item));
3703 /* copy the items from left to right */
3704 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3705 btrfs_item_nr_offset(left_nritems - push_items),
3706 push_items * sizeof(struct btrfs_item));
3708 /* update the item pointers */
3709 right_nritems += push_items;
3710 btrfs_set_header_nritems(right, right_nritems);
3711 push_space = BTRFS_LEAF_DATA_SIZE(root);
3712 for (i = 0; i < right_nritems; i++) {
3713 item = btrfs_item_nr(i);
3714 push_space -= btrfs_token_item_size(right, item, &token);
3715 btrfs_set_token_item_offset(right, item, push_space, &token);
3718 left_nritems -= push_items;
3719 btrfs_set_header_nritems(left, left_nritems);
3722 btrfs_mark_buffer_dirty(left);
3724 clean_tree_block(trans, root->fs_info, left);
3726 btrfs_mark_buffer_dirty(right);
3728 btrfs_item_key(right, &disk_key, 0);
3729 btrfs_set_node_key(upper, &disk_key, slot + 1);
3730 btrfs_mark_buffer_dirty(upper);
3732 /* then fixup the leaf pointer in the path */
3733 if (path->slots[0] >= left_nritems) {
3734 path->slots[0] -= left_nritems;
3735 if (btrfs_header_nritems(path->nodes[0]) == 0)
3736 clean_tree_block(trans, root->fs_info, path->nodes[0]);
3737 btrfs_tree_unlock(path->nodes[0]);
3738 free_extent_buffer(path->nodes[0]);
3739 path->nodes[0] = right;
3740 path->slots[1] += 1;
3742 btrfs_tree_unlock(right);
3743 free_extent_buffer(right);
3748 btrfs_tree_unlock(right);
3749 free_extent_buffer(right);
3754 * push some data in the path leaf to the right, trying to free up at
3755 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3757 * returns 1 if the push failed because the other node didn't have enough
3758 * room, 0 if everything worked out and < 0 if there were major errors.
3760 * this will push starting from min_slot to the end of the leaf. It won't
3761 * push any slot lower than min_slot
3763 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3764 *root, struct btrfs_path *path,
3765 int min_data_size, int data_size,
3766 int empty, u32 min_slot)
3768 struct extent_buffer *left = path->nodes[0];
3769 struct extent_buffer *right;
3770 struct extent_buffer *upper;
3776 if (!path->nodes[1])
3779 slot = path->slots[1];
3780 upper = path->nodes[1];
3781 if (slot >= btrfs_header_nritems(upper) - 1)
3784 btrfs_assert_tree_locked(path->nodes[1]);
3786 right = read_node_slot(root, upper, slot + 1);
3788 * slot + 1 is not valid or we fail to read the right node,
3789 * no big deal, just return.
3794 btrfs_tree_lock(right);
3795 btrfs_set_lock_blocking(right);
3797 free_space = btrfs_leaf_free_space(root, right);
3798 if (free_space < data_size)
3801 /* cow and double check */
3802 ret = btrfs_cow_block(trans, root, right, upper,
3807 free_space = btrfs_leaf_free_space(root, right);
3808 if (free_space < data_size)
3811 left_nritems = btrfs_header_nritems(left);
3812 if (left_nritems == 0)
3815 if (path->slots[0] == left_nritems && !empty) {
3816 /* Key greater than all keys in the leaf, right neighbor has
3817 * enough room for it and we're not emptying our leaf to delete
3818 * it, therefore use right neighbor to insert the new item and
3819 * no need to touch/dirty our left leaft. */
3820 btrfs_tree_unlock(left);
3821 free_extent_buffer(left);
3822 path->nodes[0] = right;
3828 return __push_leaf_right(trans, root, path, min_data_size, empty,
3829 right, free_space, left_nritems, min_slot);
3831 btrfs_tree_unlock(right);
3832 free_extent_buffer(right);
3837 * push some data in the path leaf to the left, trying to free up at
3838 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3840 * max_slot can put a limit on how far into the leaf we'll push items. The
3841 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3844 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3845 struct btrfs_root *root,
3846 struct btrfs_path *path, int data_size,
3847 int empty, struct extent_buffer *left,
3848 int free_space, u32 right_nritems,
3851 struct btrfs_disk_key disk_key;
3852 struct extent_buffer *right = path->nodes[0];
3856 struct btrfs_item *item;
3857 u32 old_left_nritems;
3861 u32 old_left_item_size;
3862 struct btrfs_map_token token;
3864 btrfs_init_map_token(&token);
3867 nr = min(right_nritems, max_slot);
3869 nr = min(right_nritems - 1, max_slot);
3871 for (i = 0; i < nr; i++) {
3872 item = btrfs_item_nr(i);
3874 if (!empty && push_items > 0) {
3875 if (path->slots[0] < i)
3877 if (path->slots[0] == i) {
3878 int space = btrfs_leaf_free_space(root, right);
3879 if (space + push_space * 2 > free_space)
3884 if (path->slots[0] == i)
3885 push_space += data_size;
3887 this_item_size = btrfs_item_size(right, item);
3888 if (this_item_size + sizeof(*item) + push_space > free_space)
3892 push_space += this_item_size + sizeof(*item);
3895 if (push_items == 0) {
3899 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3901 /* push data from right to left */
3902 copy_extent_buffer(left, right,
3903 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3904 btrfs_item_nr_offset(0),
3905 push_items * sizeof(struct btrfs_item));
3907 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3908 btrfs_item_offset_nr(right, push_items - 1);
3910 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3911 leaf_data_end(root, left) - push_space,
3912 btrfs_leaf_data(right) +
3913 btrfs_item_offset_nr(right, push_items - 1),
3915 old_left_nritems = btrfs_header_nritems(left);
3916 BUG_ON(old_left_nritems <= 0);
3918 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3919 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3922 item = btrfs_item_nr(i);
3924 ioff = btrfs_token_item_offset(left, item, &token);
3925 btrfs_set_token_item_offset(left, item,
3926 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3929 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3931 /* fixup right node */
3932 if (push_items > right_nritems)
3933 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3936 if (push_items < right_nritems) {
3937 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3938 leaf_data_end(root, right);
3939 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3940 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3941 btrfs_leaf_data(right) +
3942 leaf_data_end(root, right), push_space);
3944 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3945 btrfs_item_nr_offset(push_items),
3946 (btrfs_header_nritems(right) - push_items) *
3947 sizeof(struct btrfs_item));
3949 right_nritems -= push_items;
3950 btrfs_set_header_nritems(right, right_nritems);
3951 push_space = BTRFS_LEAF_DATA_SIZE(root);
3952 for (i = 0; i < right_nritems; i++) {
3953 item = btrfs_item_nr(i);
3955 push_space = push_space - btrfs_token_item_size(right,
3957 btrfs_set_token_item_offset(right, item, push_space, &token);
3960 btrfs_mark_buffer_dirty(left);
3962 btrfs_mark_buffer_dirty(right);
3964 clean_tree_block(trans, root->fs_info, right);
3966 btrfs_item_key(right, &disk_key, 0);
3967 fixup_low_keys(root->fs_info, path, &disk_key, 1);
3969 /* then fixup the leaf pointer in the path */
3970 if (path->slots[0] < push_items) {
3971 path->slots[0] += old_left_nritems;
3972 btrfs_tree_unlock(path->nodes[0]);
3973 free_extent_buffer(path->nodes[0]);
3974 path->nodes[0] = left;
3975 path->slots[1] -= 1;
3977 btrfs_tree_unlock(left);
3978 free_extent_buffer(left);
3979 path->slots[0] -= push_items;
3981 BUG_ON(path->slots[0] < 0);
3984 btrfs_tree_unlock(left);
3985 free_extent_buffer(left);
3990 * push some data in the path leaf to the left, trying to free up at
3991 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3993 * max_slot can put a limit on how far into the leaf we'll push items. The
3994 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3997 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3998 *root, struct btrfs_path *path, int min_data_size,
3999 int data_size, int empty, u32 max_slot)
4001 struct extent_buffer *right = path->nodes[0];
4002 struct extent_buffer *left;
4008 slot = path->slots[1];
4011 if (!path->nodes[1])
4014 right_nritems = btrfs_header_nritems(right);
4015 if (right_nritems == 0)
4018 btrfs_assert_tree_locked(path->nodes[1]);
4020 left = read_node_slot(root, path->nodes[1], slot - 1);
4022 * slot - 1 is not valid or we fail to read the left node,
4023 * no big deal, just return.
4028 btrfs_tree_lock(left);
4029 btrfs_set_lock_blocking(left);
4031 free_space = btrfs_leaf_free_space(root, left);
4032 if (free_space < data_size) {
4037 /* cow and double check */
4038 ret = btrfs_cow_block(trans, root, left,
4039 path->nodes[1], slot - 1, &left);
4041 /* we hit -ENOSPC, but it isn't fatal here */
4047 free_space = btrfs_leaf_free_space(root, left);
4048 if (free_space < data_size) {
4053 return __push_leaf_left(trans, root, path, min_data_size,
4054 empty, left, free_space, right_nritems,
4057 btrfs_tree_unlock(left);
4058 free_extent_buffer(left);
4063 * split the path's leaf in two, making sure there is at least data_size
4064 * available for the resulting leaf level of the path.
4066 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4067 struct btrfs_root *root,
4068 struct btrfs_path *path,
4069 struct extent_buffer *l,
4070 struct extent_buffer *right,
4071 int slot, int mid, int nritems)
4076 struct btrfs_disk_key disk_key;
4077 struct btrfs_map_token token;
4079 btrfs_init_map_token(&token);
4081 nritems = nritems - mid;
4082 btrfs_set_header_nritems(right, nritems);
4083 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
4085 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4086 btrfs_item_nr_offset(mid),
4087 nritems * sizeof(struct btrfs_item));
4089 copy_extent_buffer(right, l,
4090 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
4091 data_copy_size, btrfs_leaf_data(l) +
4092 leaf_data_end(root, l), data_copy_size);
4094 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
4095 btrfs_item_end_nr(l, mid);
4097 for (i = 0; i < nritems; i++) {
4098 struct btrfs_item *item = btrfs_item_nr(i);
4101 ioff = btrfs_token_item_offset(right, item, &token);
4102 btrfs_set_token_item_offset(right, item,
4103 ioff + rt_data_off, &token);
4106 btrfs_set_header_nritems(l, mid);
4107 btrfs_item_key(right, &disk_key, 0);
4108 insert_ptr(trans, root, path, &disk_key, right->start,
4109 path->slots[1] + 1, 1);
4111 btrfs_mark_buffer_dirty(right);
4112 btrfs_mark_buffer_dirty(l);
4113 BUG_ON(path->slots[0] != slot);
4116 btrfs_tree_unlock(path->nodes[0]);
4117 free_extent_buffer(path->nodes[0]);
4118 path->nodes[0] = right;
4119 path->slots[0] -= mid;
4120 path->slots[1] += 1;
4122 btrfs_tree_unlock(right);
4123 free_extent_buffer(right);
4126 BUG_ON(path->slots[0] < 0);
4130 * double splits happen when we need to insert a big item in the middle
4131 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4132 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4135 * We avoid this by trying to push the items on either side of our target
4136 * into the adjacent leaves. If all goes well we can avoid the double split
4139 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4140 struct btrfs_root *root,
4141 struct btrfs_path *path,
4148 int space_needed = data_size;
4150 slot = path->slots[0];
4151 if (slot < btrfs_header_nritems(path->nodes[0]))
4152 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
4155 * try to push all the items after our slot into the
4158 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4165 nritems = btrfs_header_nritems(path->nodes[0]);
4167 * our goal is to get our slot at the start or end of a leaf. If
4168 * we've done so we're done
4170 if (path->slots[0] == 0 || path->slots[0] == nritems)
4173 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4176 /* try to push all the items before our slot into the next leaf */
4177 slot = path->slots[0];
4178 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4191 * split the path's leaf in two, making sure there is at least data_size
4192 * available for the resulting leaf level of the path.
4194 * returns 0 if all went well and < 0 on failure.
4196 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4197 struct btrfs_root *root,
4198 struct btrfs_key *ins_key,
4199 struct btrfs_path *path, int data_size,
4202 struct btrfs_disk_key disk_key;
4203 struct extent_buffer *l;
4207 struct extent_buffer *right;
4208 struct btrfs_fs_info *fs_info = root->fs_info;
4212 int num_doubles = 0;
4213 int tried_avoid_double = 0;
4216 slot = path->slots[0];
4217 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4218 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4221 /* first try to make some room by pushing left and right */
4222 if (data_size && path->nodes[1]) {
4223 int space_needed = data_size;
4225 if (slot < btrfs_header_nritems(l))
4226 space_needed -= btrfs_leaf_free_space(root, l);
4228 wret = push_leaf_right(trans, root, path, space_needed,
4229 space_needed, 0, 0);
4233 wret = push_leaf_left(trans, root, path, space_needed,
4234 space_needed, 0, (u32)-1);
4240 /* did the pushes work? */
4241 if (btrfs_leaf_free_space(root, l) >= data_size)
4245 if (!path->nodes[1]) {
4246 ret = insert_new_root(trans, root, path, 1);
4253 slot = path->slots[0];
4254 nritems = btrfs_header_nritems(l);
4255 mid = (nritems + 1) / 2;
4259 leaf_space_used(l, mid, nritems - mid) + data_size >
4260 BTRFS_LEAF_DATA_SIZE(root)) {
4261 if (slot >= nritems) {
4265 if (mid != nritems &&
4266 leaf_space_used(l, mid, nritems - mid) +
4267 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4268 if (data_size && !tried_avoid_double)
4269 goto push_for_double;
4275 if (leaf_space_used(l, 0, mid) + data_size >
4276 BTRFS_LEAF_DATA_SIZE(root)) {
4277 if (!extend && data_size && slot == 0) {
4279 } else if ((extend || !data_size) && slot == 0) {
4283 if (mid != nritems &&
4284 leaf_space_used(l, mid, nritems - mid) +
4285 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4286 if (data_size && !tried_avoid_double)
4287 goto push_for_double;
4295 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4297 btrfs_item_key(l, &disk_key, mid);
4299 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4300 &disk_key, 0, l->start, 0);
4302 return PTR_ERR(right);
4304 root_add_used(root, root->nodesize);
4306 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4307 btrfs_set_header_bytenr(right, right->start);
4308 btrfs_set_header_generation(right, trans->transid);
4309 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4310 btrfs_set_header_owner(right, root->root_key.objectid);
4311 btrfs_set_header_level(right, 0);
4312 write_extent_buffer(right, fs_info->fsid,
4313 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4315 write_extent_buffer(right, fs_info->chunk_tree_uuid,
4316 btrfs_header_chunk_tree_uuid(right),
4321 btrfs_set_header_nritems(right, 0);
4322 insert_ptr(trans, root, path, &disk_key, right->start,
4323 path->slots[1] + 1, 1);
4324 btrfs_tree_unlock(path->nodes[0]);
4325 free_extent_buffer(path->nodes[0]);
4326 path->nodes[0] = right;
4328 path->slots[1] += 1;
4330 btrfs_set_header_nritems(right, 0);
4331 insert_ptr(trans, root, path, &disk_key, right->start,
4333 btrfs_tree_unlock(path->nodes[0]);
4334 free_extent_buffer(path->nodes[0]);
4335 path->nodes[0] = right;
4337 if (path->slots[1] == 0)
4338 fixup_low_keys(fs_info, path, &disk_key, 1);
4340 btrfs_mark_buffer_dirty(right);
4344 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4347 BUG_ON(num_doubles != 0);
4355 push_for_double_split(trans, root, path, data_size);
4356 tried_avoid_double = 1;
4357 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4362 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4363 struct btrfs_root *root,
4364 struct btrfs_path *path, int ins_len)
4366 struct btrfs_key key;
4367 struct extent_buffer *leaf;
4368 struct btrfs_file_extent_item *fi;
4373 leaf = path->nodes[0];
4374 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4376 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4377 key.type != BTRFS_EXTENT_CSUM_KEY);
4379 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4382 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4383 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4384 fi = btrfs_item_ptr(leaf, path->slots[0],
4385 struct btrfs_file_extent_item);
4386 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4388 btrfs_release_path(path);
4390 path->keep_locks = 1;
4391 path->search_for_split = 1;
4392 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4393 path->search_for_split = 0;
4400 leaf = path->nodes[0];
4401 /* if our item isn't there, return now */
4402 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4405 /* the leaf has changed, it now has room. return now */
4406 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4409 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4410 fi = btrfs_item_ptr(leaf, path->slots[0],
4411 struct btrfs_file_extent_item);
4412 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4416 btrfs_set_path_blocking(path);
4417 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4421 path->keep_locks = 0;
4422 btrfs_unlock_up_safe(path, 1);
4425 path->keep_locks = 0;
4429 static noinline int split_item(struct btrfs_trans_handle *trans,
4430 struct btrfs_root *root,
4431 struct btrfs_path *path,
4432 struct btrfs_key *new_key,
4433 unsigned long split_offset)
4435 struct extent_buffer *leaf;
4436 struct btrfs_item *item;
4437 struct btrfs_item *new_item;
4443 struct btrfs_disk_key disk_key;
4445 leaf = path->nodes[0];
4446 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4448 btrfs_set_path_blocking(path);
4450 item = btrfs_item_nr(path->slots[0]);
4451 orig_offset = btrfs_item_offset(leaf, item);
4452 item_size = btrfs_item_size(leaf, item);
4454 buf = kmalloc(item_size, GFP_NOFS);
4458 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4459 path->slots[0]), item_size);
4461 slot = path->slots[0] + 1;
4462 nritems = btrfs_header_nritems(leaf);
4463 if (slot != nritems) {
4464 /* shift the items */
4465 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4466 btrfs_item_nr_offset(slot),
4467 (nritems - slot) * sizeof(struct btrfs_item));
4470 btrfs_cpu_key_to_disk(&disk_key, new_key);
4471 btrfs_set_item_key(leaf, &disk_key, slot);
4473 new_item = btrfs_item_nr(slot);
4475 btrfs_set_item_offset(leaf, new_item, orig_offset);
4476 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4478 btrfs_set_item_offset(leaf, item,
4479 orig_offset + item_size - split_offset);
4480 btrfs_set_item_size(leaf, item, split_offset);
4482 btrfs_set_header_nritems(leaf, nritems + 1);
4484 /* write the data for the start of the original item */
4485 write_extent_buffer(leaf, buf,
4486 btrfs_item_ptr_offset(leaf, path->slots[0]),
4489 /* write the data for the new item */
4490 write_extent_buffer(leaf, buf + split_offset,
4491 btrfs_item_ptr_offset(leaf, slot),
4492 item_size - split_offset);
4493 btrfs_mark_buffer_dirty(leaf);
4495 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4501 * This function splits a single item into two items,
4502 * giving 'new_key' to the new item and splitting the
4503 * old one at split_offset (from the start of the item).
4505 * The path may be released by this operation. After
4506 * the split, the path is pointing to the old item. The
4507 * new item is going to be in the same node as the old one.
4509 * Note, the item being split must be smaller enough to live alone on
4510 * a tree block with room for one extra struct btrfs_item
4512 * This allows us to split the item in place, keeping a lock on the
4513 * leaf the entire time.
4515 int btrfs_split_item(struct btrfs_trans_handle *trans,
4516 struct btrfs_root *root,
4517 struct btrfs_path *path,
4518 struct btrfs_key *new_key,
4519 unsigned long split_offset)
4522 ret = setup_leaf_for_split(trans, root, path,
4523 sizeof(struct btrfs_item));
4527 ret = split_item(trans, root, path, new_key, split_offset);
4532 * This function duplicate a item, giving 'new_key' to the new item.
4533 * It guarantees both items live in the same tree leaf and the new item
4534 * is contiguous with the original item.
4536 * This allows us to split file extent in place, keeping a lock on the
4537 * leaf the entire time.
4539 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4540 struct btrfs_root *root,
4541 struct btrfs_path *path,
4542 struct btrfs_key *new_key)
4544 struct extent_buffer *leaf;
4548 leaf = path->nodes[0];
4549 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4550 ret = setup_leaf_for_split(trans, root, path,
4551 item_size + sizeof(struct btrfs_item));
4556 setup_items_for_insert(root, path, new_key, &item_size,
4557 item_size, item_size +
4558 sizeof(struct btrfs_item), 1);
4559 leaf = path->nodes[0];
4560 memcpy_extent_buffer(leaf,
4561 btrfs_item_ptr_offset(leaf, path->slots[0]),
4562 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4568 * make the item pointed to by the path smaller. new_size indicates
4569 * how small to make it, and from_end tells us if we just chop bytes
4570 * off the end of the item or if we shift the item to chop bytes off
4573 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4574 u32 new_size, int from_end)
4577 struct extent_buffer *leaf;
4578 struct btrfs_item *item;
4580 unsigned int data_end;
4581 unsigned int old_data_start;
4582 unsigned int old_size;
4583 unsigned int size_diff;
4585 struct btrfs_map_token token;
4587 btrfs_init_map_token(&token);
4589 leaf = path->nodes[0];
4590 slot = path->slots[0];
4592 old_size = btrfs_item_size_nr(leaf, slot);
4593 if (old_size == new_size)
4596 nritems = btrfs_header_nritems(leaf);
4597 data_end = leaf_data_end(root, leaf);
4599 old_data_start = btrfs_item_offset_nr(leaf, slot);
4601 size_diff = old_size - new_size;
4604 BUG_ON(slot >= nritems);
4607 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4609 /* first correct the data pointers */
4610 for (i = slot; i < nritems; i++) {
4612 item = btrfs_item_nr(i);
4614 ioff = btrfs_token_item_offset(leaf, item, &token);
4615 btrfs_set_token_item_offset(leaf, item,
4616 ioff + size_diff, &token);
4619 /* shift the data */
4621 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4622 data_end + size_diff, btrfs_leaf_data(leaf) +
4623 data_end, old_data_start + new_size - data_end);
4625 struct btrfs_disk_key disk_key;
4628 btrfs_item_key(leaf, &disk_key, slot);
4630 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4632 struct btrfs_file_extent_item *fi;
4634 fi = btrfs_item_ptr(leaf, slot,
4635 struct btrfs_file_extent_item);
4636 fi = (struct btrfs_file_extent_item *)(
4637 (unsigned long)fi - size_diff);
4639 if (btrfs_file_extent_type(leaf, fi) ==
4640 BTRFS_FILE_EXTENT_INLINE) {
4641 ptr = btrfs_item_ptr_offset(leaf, slot);
4642 memmove_extent_buffer(leaf, ptr,
4644 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4648 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4649 data_end + size_diff, btrfs_leaf_data(leaf) +
4650 data_end, old_data_start - data_end);
4652 offset = btrfs_disk_key_offset(&disk_key);
4653 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4654 btrfs_set_item_key(leaf, &disk_key, slot);
4656 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4659 item = btrfs_item_nr(slot);
4660 btrfs_set_item_size(leaf, item, new_size);
4661 btrfs_mark_buffer_dirty(leaf);
4663 if (btrfs_leaf_free_space(root, leaf) < 0) {
4664 btrfs_print_leaf(root, leaf);
4670 * make the item pointed to by the path bigger, data_size is the added size.
4672 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4676 struct extent_buffer *leaf;
4677 struct btrfs_item *item;
4679 unsigned int data_end;
4680 unsigned int old_data;
4681 unsigned int old_size;
4683 struct btrfs_map_token token;
4685 btrfs_init_map_token(&token);
4687 leaf = path->nodes[0];
4689 nritems = btrfs_header_nritems(leaf);
4690 data_end = leaf_data_end(root, leaf);
4692 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4693 btrfs_print_leaf(root, leaf);
4696 slot = path->slots[0];
4697 old_data = btrfs_item_end_nr(leaf, slot);
4700 if (slot >= nritems) {
4701 btrfs_print_leaf(root, leaf);
4702 btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
4708 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4710 /* first correct the data pointers */
4711 for (i = slot; i < nritems; i++) {
4713 item = btrfs_item_nr(i);
4715 ioff = btrfs_token_item_offset(leaf, item, &token);
4716 btrfs_set_token_item_offset(leaf, item,
4717 ioff - data_size, &token);
4720 /* shift the data */
4721 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4722 data_end - data_size, btrfs_leaf_data(leaf) +
4723 data_end, old_data - data_end);
4725 data_end = old_data;
4726 old_size = btrfs_item_size_nr(leaf, slot);
4727 item = btrfs_item_nr(slot);
4728 btrfs_set_item_size(leaf, item, old_size + data_size);
4729 btrfs_mark_buffer_dirty(leaf);
4731 if (btrfs_leaf_free_space(root, leaf) < 0) {
4732 btrfs_print_leaf(root, leaf);
4738 * this is a helper for btrfs_insert_empty_items, the main goal here is
4739 * to save stack depth by doing the bulk of the work in a function
4740 * that doesn't call btrfs_search_slot
4742 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4743 struct btrfs_key *cpu_key, u32 *data_size,
4744 u32 total_data, u32 total_size, int nr)
4746 struct btrfs_item *item;
4749 unsigned int data_end;
4750 struct btrfs_disk_key disk_key;
4751 struct extent_buffer *leaf;
4753 struct btrfs_map_token token;
4755 if (path->slots[0] == 0) {
4756 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4757 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4759 btrfs_unlock_up_safe(path, 1);
4761 btrfs_init_map_token(&token);
4763 leaf = path->nodes[0];
4764 slot = path->slots[0];
4766 nritems = btrfs_header_nritems(leaf);
4767 data_end = leaf_data_end(root, leaf);
4769 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4770 btrfs_print_leaf(root, leaf);
4771 btrfs_crit(root->fs_info,
4772 "not enough freespace need %u have %d",
4773 total_size, btrfs_leaf_free_space(root, leaf));
4777 if (slot != nritems) {
4778 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4780 if (old_data < data_end) {
4781 btrfs_print_leaf(root, leaf);
4782 btrfs_crit(root->fs_info,
4783 "slot %d old_data %d data_end %d",
4784 slot, old_data, data_end);
4788 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4790 /* first correct the data pointers */
4791 for (i = slot; i < nritems; i++) {
4794 item = btrfs_item_nr( i);
4795 ioff = btrfs_token_item_offset(leaf, item, &token);
4796 btrfs_set_token_item_offset(leaf, item,
4797 ioff - total_data, &token);
4799 /* shift the items */
4800 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4801 btrfs_item_nr_offset(slot),
4802 (nritems - slot) * sizeof(struct btrfs_item));
4804 /* shift the data */
4805 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4806 data_end - total_data, btrfs_leaf_data(leaf) +
4807 data_end, old_data - data_end);
4808 data_end = old_data;
4811 /* setup the item for the new data */
4812 for (i = 0; i < nr; i++) {
4813 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4814 btrfs_set_item_key(leaf, &disk_key, slot + i);
4815 item = btrfs_item_nr(slot + i);
4816 btrfs_set_token_item_offset(leaf, item,
4817 data_end - data_size[i], &token);
4818 data_end -= data_size[i];
4819 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4822 btrfs_set_header_nritems(leaf, nritems + nr);
4823 btrfs_mark_buffer_dirty(leaf);
4825 if (btrfs_leaf_free_space(root, leaf) < 0) {
4826 btrfs_print_leaf(root, leaf);
4832 * Given a key and some data, insert items into the tree.
4833 * This does all the path init required, making room in the tree if needed.
4835 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4836 struct btrfs_root *root,
4837 struct btrfs_path *path,
4838 struct btrfs_key *cpu_key, u32 *data_size,
4847 for (i = 0; i < nr; i++)
4848 total_data += data_size[i];
4850 total_size = total_data + (nr * sizeof(struct btrfs_item));
4851 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4857 slot = path->slots[0];
4860 setup_items_for_insert(root, path, cpu_key, data_size,
4861 total_data, total_size, nr);
4866 * Given a key and some data, insert an item into the tree.
4867 * This does all the path init required, making room in the tree if needed.
4869 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4870 *root, struct btrfs_key *cpu_key, void *data, u32
4874 struct btrfs_path *path;
4875 struct extent_buffer *leaf;
4878 path = btrfs_alloc_path();
4881 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4883 leaf = path->nodes[0];
4884 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4885 write_extent_buffer(leaf, data, ptr, data_size);
4886 btrfs_mark_buffer_dirty(leaf);
4888 btrfs_free_path(path);
4893 * delete the pointer from a given node.
4895 * the tree should have been previously balanced so the deletion does not
4898 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4899 int level, int slot)
4901 struct extent_buffer *parent = path->nodes[level];
4905 nritems = btrfs_header_nritems(parent);
4906 if (slot != nritems - 1) {
4908 tree_mod_log_eb_move(root->fs_info, parent, slot,
4909 slot + 1, nritems - slot - 1);
4910 memmove_extent_buffer(parent,
4911 btrfs_node_key_ptr_offset(slot),
4912 btrfs_node_key_ptr_offset(slot + 1),
4913 sizeof(struct btrfs_key_ptr) *
4914 (nritems - slot - 1));
4916 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4917 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4922 btrfs_set_header_nritems(parent, nritems);
4923 if (nritems == 0 && parent == root->node) {
4924 BUG_ON(btrfs_header_level(root->node) != 1);
4925 /* just turn the root into a leaf and break */
4926 btrfs_set_header_level(root->node, 0);
4927 } else if (slot == 0) {
4928 struct btrfs_disk_key disk_key;
4930 btrfs_node_key(parent, &disk_key, 0);
4931 fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
4933 btrfs_mark_buffer_dirty(parent);
4937 * a helper function to delete the leaf pointed to by path->slots[1] and
4940 * This deletes the pointer in path->nodes[1] and frees the leaf
4941 * block extent. zero is returned if it all worked out, < 0 otherwise.
4943 * The path must have already been setup for deleting the leaf, including
4944 * all the proper balancing. path->nodes[1] must be locked.
4946 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4947 struct btrfs_root *root,
4948 struct btrfs_path *path,
4949 struct extent_buffer *leaf)
4951 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4952 del_ptr(root, path, 1, path->slots[1]);
4955 * btrfs_free_extent is expensive, we want to make sure we
4956 * aren't holding any locks when we call it
4958 btrfs_unlock_up_safe(path, 0);
4960 root_sub_used(root, leaf->len);
4962 extent_buffer_get(leaf);
4963 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4964 free_extent_buffer_stale(leaf);
4967 * delete the item at the leaf level in path. If that empties
4968 * the leaf, remove it from the tree
4970 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4971 struct btrfs_path *path, int slot, int nr)
4973 struct extent_buffer *leaf;
4974 struct btrfs_item *item;
4981 struct btrfs_map_token token;
4983 btrfs_init_map_token(&token);
4985 leaf = path->nodes[0];
4986 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4988 for (i = 0; i < nr; i++)
4989 dsize += btrfs_item_size_nr(leaf, slot + i);
4991 nritems = btrfs_header_nritems(leaf);
4993 if (slot + nr != nritems) {
4994 int data_end = leaf_data_end(root, leaf);
4996 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4998 btrfs_leaf_data(leaf) + data_end,
4999 last_off - data_end);
5001 for (i = slot + nr; i < nritems; i++) {
5004 item = btrfs_item_nr(i);
5005 ioff = btrfs_token_item_offset(leaf, item, &token);
5006 btrfs_set_token_item_offset(leaf, item,
5007 ioff + dsize, &token);
5010 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
5011 btrfs_item_nr_offset(slot + nr),
5012 sizeof(struct btrfs_item) *
5013 (nritems - slot - nr));
5015 btrfs_set_header_nritems(leaf, nritems - nr);
5018 /* delete the leaf if we've emptied it */
5020 if (leaf == root->node) {
5021 btrfs_set_header_level(leaf, 0);
5023 btrfs_set_path_blocking(path);
5024 clean_tree_block(trans, root->fs_info, leaf);
5025 btrfs_del_leaf(trans, root, path, leaf);
5028 int used = leaf_space_used(leaf, 0, nritems);
5030 struct btrfs_disk_key disk_key;
5032 btrfs_item_key(leaf, &disk_key, 0);
5033 fixup_low_keys(root->fs_info, path, &disk_key, 1);
5036 /* delete the leaf if it is mostly empty */
5037 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
5038 /* push_leaf_left fixes the path.
5039 * make sure the path still points to our leaf
5040 * for possible call to del_ptr below
5042 slot = path->slots[1];
5043 extent_buffer_get(leaf);
5045 btrfs_set_path_blocking(path);
5046 wret = push_leaf_left(trans, root, path, 1, 1,
5048 if (wret < 0 && wret != -ENOSPC)
5051 if (path->nodes[0] == leaf &&
5052 btrfs_header_nritems(leaf)) {
5053 wret = push_leaf_right(trans, root, path, 1,
5055 if (wret < 0 && wret != -ENOSPC)
5059 if (btrfs_header_nritems(leaf) == 0) {
5060 path->slots[1] = slot;
5061 btrfs_del_leaf(trans, root, path, leaf);
5062 free_extent_buffer(leaf);
5065 /* if we're still in the path, make sure
5066 * we're dirty. Otherwise, one of the
5067 * push_leaf functions must have already
5068 * dirtied this buffer
5070 if (path->nodes[0] == leaf)
5071 btrfs_mark_buffer_dirty(leaf);
5072 free_extent_buffer(leaf);
5075 btrfs_mark_buffer_dirty(leaf);
5082 * search the tree again to find a leaf with lesser keys
5083 * returns 0 if it found something or 1 if there are no lesser leaves.
5084 * returns < 0 on io errors.
5086 * This may release the path, and so you may lose any locks held at the
5089 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5091 struct btrfs_key key;
5092 struct btrfs_disk_key found_key;
5095 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5097 if (key.offset > 0) {
5099 } else if (key.type > 0) {
5101 key.offset = (u64)-1;
5102 } else if (key.objectid > 0) {
5105 key.offset = (u64)-1;
5110 btrfs_release_path(path);
5111 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5114 btrfs_item_key(path->nodes[0], &found_key, 0);
5115 ret = comp_keys(&found_key, &key);
5117 * We might have had an item with the previous key in the tree right
5118 * before we released our path. And after we released our path, that
5119 * item might have been pushed to the first slot (0) of the leaf we
5120 * were holding due to a tree balance. Alternatively, an item with the
5121 * previous key can exist as the only element of a leaf (big fat item).
5122 * Therefore account for these 2 cases, so that our callers (like
5123 * btrfs_previous_item) don't miss an existing item with a key matching
5124 * the previous key we computed above.
5132 * A helper function to walk down the tree starting at min_key, and looking
5133 * for nodes or leaves that are have a minimum transaction id.
5134 * This is used by the btree defrag code, and tree logging
5136 * This does not cow, but it does stuff the starting key it finds back
5137 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5138 * key and get a writable path.
5140 * This does lock as it descends, and path->keep_locks should be set
5141 * to 1 by the caller.
5143 * This honors path->lowest_level to prevent descent past a given level
5146 * min_trans indicates the oldest transaction that you are interested
5147 * in walking through. Any nodes or leaves older than min_trans are
5148 * skipped over (without reading them).
5150 * returns zero if something useful was found, < 0 on error and 1 if there
5151 * was nothing in the tree that matched the search criteria.
5153 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5154 struct btrfs_path *path,
5157 struct extent_buffer *cur;
5158 struct btrfs_key found_key;
5164 int keep_locks = path->keep_locks;
5166 path->keep_locks = 1;
5168 cur = btrfs_read_lock_root_node(root);
5169 level = btrfs_header_level(cur);
5170 WARN_ON(path->nodes[level]);
5171 path->nodes[level] = cur;
5172 path->locks[level] = BTRFS_READ_LOCK;
5174 if (btrfs_header_generation(cur) < min_trans) {
5179 nritems = btrfs_header_nritems(cur);
5180 level = btrfs_header_level(cur);
5181 sret = bin_search(cur, min_key, level, &slot);
5183 /* at the lowest level, we're done, setup the path and exit */
5184 if (level == path->lowest_level) {
5185 if (slot >= nritems)
5188 path->slots[level] = slot;
5189 btrfs_item_key_to_cpu(cur, &found_key, slot);
5192 if (sret && slot > 0)
5195 * check this node pointer against the min_trans parameters.
5196 * If it is too old, old, skip to the next one.
5198 while (slot < nritems) {
5201 gen = btrfs_node_ptr_generation(cur, slot);
5202 if (gen < min_trans) {
5210 * we didn't find a candidate key in this node, walk forward
5211 * and find another one
5213 if (slot >= nritems) {
5214 path->slots[level] = slot;
5215 btrfs_set_path_blocking(path);
5216 sret = btrfs_find_next_key(root, path, min_key, level,
5219 btrfs_release_path(path);
5225 /* save our key for returning back */
5226 btrfs_node_key_to_cpu(cur, &found_key, slot);
5227 path->slots[level] = slot;
5228 if (level == path->lowest_level) {
5232 btrfs_set_path_blocking(path);
5233 cur = read_node_slot(root, cur, slot);
5239 btrfs_tree_read_lock(cur);
5241 path->locks[level - 1] = BTRFS_READ_LOCK;
5242 path->nodes[level - 1] = cur;
5243 unlock_up(path, level, 1, 0, NULL);
5244 btrfs_clear_path_blocking(path, NULL, 0);
5247 path->keep_locks = keep_locks;
5249 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5250 btrfs_set_path_blocking(path);
5251 memcpy(min_key, &found_key, sizeof(found_key));
5256 static int tree_move_down(struct btrfs_root *root,
5257 struct btrfs_path *path,
5258 int *level, int root_level)
5260 struct extent_buffer *eb;
5262 BUG_ON(*level == 0);
5263 eb = read_node_slot(root, path->nodes[*level], path->slots[*level]);
5267 path->nodes[*level - 1] = eb;
5268 path->slots[*level - 1] = 0;
5273 static int tree_move_next_or_upnext(struct btrfs_root *root,
5274 struct btrfs_path *path,
5275 int *level, int root_level)
5279 nritems = btrfs_header_nritems(path->nodes[*level]);
5281 path->slots[*level]++;
5283 while (path->slots[*level] >= nritems) {
5284 if (*level == root_level)
5288 path->slots[*level] = 0;
5289 free_extent_buffer(path->nodes[*level]);
5290 path->nodes[*level] = NULL;
5292 path->slots[*level]++;
5294 nritems = btrfs_header_nritems(path->nodes[*level]);
5301 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5304 static int tree_advance(struct btrfs_root *root,
5305 struct btrfs_path *path,
5306 int *level, int root_level,
5308 struct btrfs_key *key)
5312 if (*level == 0 || !allow_down) {
5313 ret = tree_move_next_or_upnext(root, path, level, root_level);
5315 ret = tree_move_down(root, path, level, root_level);
5319 btrfs_item_key_to_cpu(path->nodes[*level], key,
5320 path->slots[*level]);
5322 btrfs_node_key_to_cpu(path->nodes[*level], key,
5323 path->slots[*level]);
5328 static int tree_compare_item(struct btrfs_root *left_root,
5329 struct btrfs_path *left_path,
5330 struct btrfs_path *right_path,
5335 unsigned long off1, off2;
5337 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5338 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5342 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5343 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5344 right_path->slots[0]);
5346 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5348 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5355 #define ADVANCE_ONLY_NEXT -1
5358 * This function compares two trees and calls the provided callback for
5359 * every changed/new/deleted item it finds.
5360 * If shared tree blocks are encountered, whole subtrees are skipped, making
5361 * the compare pretty fast on snapshotted subvolumes.
5363 * This currently works on commit roots only. As commit roots are read only,
5364 * we don't do any locking. The commit roots are protected with transactions.
5365 * Transactions are ended and rejoined when a commit is tried in between.
5367 * This function checks for modifications done to the trees while comparing.
5368 * If it detects a change, it aborts immediately.
5370 int btrfs_compare_trees(struct btrfs_root *left_root,
5371 struct btrfs_root *right_root,
5372 btrfs_changed_cb_t changed_cb, void *ctx)
5376 struct btrfs_path *left_path = NULL;
5377 struct btrfs_path *right_path = NULL;
5378 struct btrfs_key left_key;
5379 struct btrfs_key right_key;
5380 char *tmp_buf = NULL;
5381 int left_root_level;
5382 int right_root_level;
5385 int left_end_reached;
5386 int right_end_reached;
5394 left_path = btrfs_alloc_path();
5399 right_path = btrfs_alloc_path();
5405 tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
5407 tmp_buf = vmalloc(left_root->nodesize);
5414 left_path->search_commit_root = 1;
5415 left_path->skip_locking = 1;
5416 right_path->search_commit_root = 1;
5417 right_path->skip_locking = 1;
5420 * Strategy: Go to the first items of both trees. Then do
5422 * If both trees are at level 0
5423 * Compare keys of current items
5424 * If left < right treat left item as new, advance left tree
5426 * If left > right treat right item as deleted, advance right tree
5428 * If left == right do deep compare of items, treat as changed if
5429 * needed, advance both trees and repeat
5430 * If both trees are at the same level but not at level 0
5431 * Compare keys of current nodes/leafs
5432 * If left < right advance left tree and repeat
5433 * If left > right advance right tree and repeat
5434 * If left == right compare blockptrs of the next nodes/leafs
5435 * If they match advance both trees but stay at the same level
5437 * If they don't match advance both trees while allowing to go
5439 * If tree levels are different
5440 * Advance the tree that needs it and repeat
5442 * Advancing a tree means:
5443 * If we are at level 0, try to go to the next slot. If that's not
5444 * possible, go one level up and repeat. Stop when we found a level
5445 * where we could go to the next slot. We may at this point be on a
5448 * If we are not at level 0 and not on shared tree blocks, go one
5451 * If we are not at level 0 and on shared tree blocks, go one slot to
5452 * the right if possible or go up and right.
5455 down_read(&left_root->fs_info->commit_root_sem);
5456 left_level = btrfs_header_level(left_root->commit_root);
5457 left_root_level = left_level;
5458 left_path->nodes[left_level] = left_root->commit_root;
5459 extent_buffer_get(left_path->nodes[left_level]);
5461 right_level = btrfs_header_level(right_root->commit_root);
5462 right_root_level = right_level;
5463 right_path->nodes[right_level] = right_root->commit_root;
5464 extent_buffer_get(right_path->nodes[right_level]);
5465 up_read(&left_root->fs_info->commit_root_sem);
5467 if (left_level == 0)
5468 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5469 &left_key, left_path->slots[left_level]);
5471 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5472 &left_key, left_path->slots[left_level]);
5473 if (right_level == 0)
5474 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5475 &right_key, right_path->slots[right_level]);
5477 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5478 &right_key, right_path->slots[right_level]);
5480 left_end_reached = right_end_reached = 0;
5481 advance_left = advance_right = 0;
5484 if (advance_left && !left_end_reached) {
5485 ret = tree_advance(left_root, left_path, &left_level,
5487 advance_left != ADVANCE_ONLY_NEXT,
5490 left_end_reached = ADVANCE;
5495 if (advance_right && !right_end_reached) {
5496 ret = tree_advance(right_root, right_path, &right_level,
5498 advance_right != ADVANCE_ONLY_NEXT,
5501 right_end_reached = ADVANCE;
5507 if (left_end_reached && right_end_reached) {
5510 } else if (left_end_reached) {
5511 if (right_level == 0) {
5512 ret = changed_cb(left_root, right_root,
5513 left_path, right_path,
5515 BTRFS_COMPARE_TREE_DELETED,
5520 advance_right = ADVANCE;
5522 } else if (right_end_reached) {
5523 if (left_level == 0) {
5524 ret = changed_cb(left_root, right_root,
5525 left_path, right_path,
5527 BTRFS_COMPARE_TREE_NEW,
5532 advance_left = ADVANCE;
5536 if (left_level == 0 && right_level == 0) {
5537 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5539 ret = changed_cb(left_root, right_root,
5540 left_path, right_path,
5542 BTRFS_COMPARE_TREE_NEW,
5546 advance_left = ADVANCE;
5547 } else if (cmp > 0) {
5548 ret = changed_cb(left_root, right_root,
5549 left_path, right_path,
5551 BTRFS_COMPARE_TREE_DELETED,
5555 advance_right = ADVANCE;
5557 enum btrfs_compare_tree_result result;
5559 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5560 ret = tree_compare_item(left_root, left_path,
5561 right_path, tmp_buf);
5563 result = BTRFS_COMPARE_TREE_CHANGED;
5565 result = BTRFS_COMPARE_TREE_SAME;
5566 ret = changed_cb(left_root, right_root,
5567 left_path, right_path,
5568 &left_key, result, ctx);
5571 advance_left = ADVANCE;
5572 advance_right = ADVANCE;
5574 } else if (left_level == right_level) {
5575 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5577 advance_left = ADVANCE;
5578 } else if (cmp > 0) {
5579 advance_right = ADVANCE;
5581 left_blockptr = btrfs_node_blockptr(
5582 left_path->nodes[left_level],
5583 left_path->slots[left_level]);
5584 right_blockptr = btrfs_node_blockptr(
5585 right_path->nodes[right_level],
5586 right_path->slots[right_level]);
5587 left_gen = btrfs_node_ptr_generation(
5588 left_path->nodes[left_level],
5589 left_path->slots[left_level]);
5590 right_gen = btrfs_node_ptr_generation(
5591 right_path->nodes[right_level],
5592 right_path->slots[right_level]);
5593 if (left_blockptr == right_blockptr &&
5594 left_gen == right_gen) {
5596 * As we're on a shared block, don't
5597 * allow to go deeper.
5599 advance_left = ADVANCE_ONLY_NEXT;
5600 advance_right = ADVANCE_ONLY_NEXT;
5602 advance_left = ADVANCE;
5603 advance_right = ADVANCE;
5606 } else if (left_level < right_level) {
5607 advance_right = ADVANCE;
5609 advance_left = ADVANCE;
5614 btrfs_free_path(left_path);
5615 btrfs_free_path(right_path);
5621 * this is similar to btrfs_next_leaf, but does not try to preserve
5622 * and fixup the path. It looks for and returns the next key in the
5623 * tree based on the current path and the min_trans parameters.
5625 * 0 is returned if another key is found, < 0 if there are any errors
5626 * and 1 is returned if there are no higher keys in the tree
5628 * path->keep_locks should be set to 1 on the search made before
5629 * calling this function.
5631 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5632 struct btrfs_key *key, int level, u64 min_trans)
5635 struct extent_buffer *c;
5637 WARN_ON(!path->keep_locks);
5638 while (level < BTRFS_MAX_LEVEL) {
5639 if (!path->nodes[level])
5642 slot = path->slots[level] + 1;
5643 c = path->nodes[level];
5645 if (slot >= btrfs_header_nritems(c)) {
5648 struct btrfs_key cur_key;
5649 if (level + 1 >= BTRFS_MAX_LEVEL ||
5650 !path->nodes[level + 1])
5653 if (path->locks[level + 1]) {
5658 slot = btrfs_header_nritems(c) - 1;
5660 btrfs_item_key_to_cpu(c, &cur_key, slot);
5662 btrfs_node_key_to_cpu(c, &cur_key, slot);
5664 orig_lowest = path->lowest_level;
5665 btrfs_release_path(path);
5666 path->lowest_level = level;
5667 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5669 path->lowest_level = orig_lowest;
5673 c = path->nodes[level];
5674 slot = path->slots[level];
5681 btrfs_item_key_to_cpu(c, key, slot);
5683 u64 gen = btrfs_node_ptr_generation(c, slot);
5685 if (gen < min_trans) {
5689 btrfs_node_key_to_cpu(c, key, slot);
5697 * search the tree again to find a leaf with greater keys
5698 * returns 0 if it found something or 1 if there are no greater leaves.
5699 * returns < 0 on io errors.
5701 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5703 return btrfs_next_old_leaf(root, path, 0);
5706 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5711 struct extent_buffer *c;
5712 struct extent_buffer *next;
5713 struct btrfs_key key;
5716 int old_spinning = path->leave_spinning;
5717 int next_rw_lock = 0;
5719 nritems = btrfs_header_nritems(path->nodes[0]);
5723 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5728 btrfs_release_path(path);
5730 path->keep_locks = 1;
5731 path->leave_spinning = 1;
5734 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5736 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5737 path->keep_locks = 0;
5742 nritems = btrfs_header_nritems(path->nodes[0]);
5744 * by releasing the path above we dropped all our locks. A balance
5745 * could have added more items next to the key that used to be
5746 * at the very end of the block. So, check again here and
5747 * advance the path if there are now more items available.
5749 if (nritems > 0 && path->slots[0] < nritems - 1) {
5756 * So the above check misses one case:
5757 * - after releasing the path above, someone has removed the item that
5758 * used to be at the very end of the block, and balance between leafs
5759 * gets another one with bigger key.offset to replace it.
5761 * This one should be returned as well, or we can get leaf corruption
5762 * later(esp. in __btrfs_drop_extents()).
5764 * And a bit more explanation about this check,
5765 * with ret > 0, the key isn't found, the path points to the slot
5766 * where it should be inserted, so the path->slots[0] item must be the
5769 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5774 while (level < BTRFS_MAX_LEVEL) {
5775 if (!path->nodes[level]) {
5780 slot = path->slots[level] + 1;
5781 c = path->nodes[level];
5782 if (slot >= btrfs_header_nritems(c)) {
5784 if (level == BTRFS_MAX_LEVEL) {
5792 btrfs_tree_unlock_rw(next, next_rw_lock);
5793 free_extent_buffer(next);
5797 next_rw_lock = path->locks[level];
5798 ret = read_block_for_search(NULL, root, path, &next, level,
5804 btrfs_release_path(path);
5808 if (!path->skip_locking) {
5809 ret = btrfs_try_tree_read_lock(next);
5810 if (!ret && time_seq) {
5812 * If we don't get the lock, we may be racing
5813 * with push_leaf_left, holding that lock while
5814 * itself waiting for the leaf we've currently
5815 * locked. To solve this situation, we give up
5816 * on our lock and cycle.
5818 free_extent_buffer(next);
5819 btrfs_release_path(path);
5824 btrfs_set_path_blocking(path);
5825 btrfs_tree_read_lock(next);
5826 btrfs_clear_path_blocking(path, next,
5829 next_rw_lock = BTRFS_READ_LOCK;
5833 path->slots[level] = slot;
5836 c = path->nodes[level];
5837 if (path->locks[level])
5838 btrfs_tree_unlock_rw(c, path->locks[level]);
5840 free_extent_buffer(c);
5841 path->nodes[level] = next;
5842 path->slots[level] = 0;
5843 if (!path->skip_locking)
5844 path->locks[level] = next_rw_lock;
5848 ret = read_block_for_search(NULL, root, path, &next, level,
5854 btrfs_release_path(path);
5858 if (!path->skip_locking) {
5859 ret = btrfs_try_tree_read_lock(next);
5861 btrfs_set_path_blocking(path);
5862 btrfs_tree_read_lock(next);
5863 btrfs_clear_path_blocking(path, next,
5866 next_rw_lock = BTRFS_READ_LOCK;
5871 unlock_up(path, 0, 1, 0, NULL);
5872 path->leave_spinning = old_spinning;
5874 btrfs_set_path_blocking(path);
5880 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5881 * searching until it gets past min_objectid or finds an item of 'type'
5883 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5885 int btrfs_previous_item(struct btrfs_root *root,
5886 struct btrfs_path *path, u64 min_objectid,
5889 struct btrfs_key found_key;
5890 struct extent_buffer *leaf;
5895 if (path->slots[0] == 0) {
5896 btrfs_set_path_blocking(path);
5897 ret = btrfs_prev_leaf(root, path);
5903 leaf = path->nodes[0];
5904 nritems = btrfs_header_nritems(leaf);
5907 if (path->slots[0] == nritems)
5910 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5911 if (found_key.objectid < min_objectid)
5913 if (found_key.type == type)
5915 if (found_key.objectid == min_objectid &&
5916 found_key.type < type)
5923 * search in extent tree to find a previous Metadata/Data extent item with
5926 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5928 int btrfs_previous_extent_item(struct btrfs_root *root,
5929 struct btrfs_path *path, u64 min_objectid)
5931 struct btrfs_key found_key;
5932 struct extent_buffer *leaf;
5937 if (path->slots[0] == 0) {
5938 btrfs_set_path_blocking(path);
5939 ret = btrfs_prev_leaf(root, path);
5945 leaf = path->nodes[0];
5946 nritems = btrfs_header_nritems(leaf);
5949 if (path->slots[0] == nritems)
5952 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5953 if (found_key.objectid < min_objectid)
5955 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5956 found_key.type == BTRFS_METADATA_ITEM_KEY)
5958 if (found_key.objectid == min_objectid &&
5959 found_key.type < BTRFS_EXTENT_ITEM_KEY)