1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 STRATO. All rights reserved.
7 #include <linux/rbtree.h>
8 #include <trace/events/btrfs.h>
13 #include "transaction.h"
14 #include "delayed-ref.h"
17 #include "tree-mod-log.h"
19 /* Just an arbitrary number so we can be sure this happened */
20 #define BACKREF_FOUND_SHARED 6
22 struct extent_inode_elem {
25 struct extent_inode_elem *next;
28 static int check_extent_in_eb(const struct btrfs_key *key,
29 const struct extent_buffer *eb,
30 const struct btrfs_file_extent_item *fi,
32 struct extent_inode_elem **eie,
36 struct extent_inode_elem *e;
39 !btrfs_file_extent_compression(eb, fi) &&
40 !btrfs_file_extent_encryption(eb, fi) &&
41 !btrfs_file_extent_other_encoding(eb, fi)) {
45 data_offset = btrfs_file_extent_offset(eb, fi);
46 data_len = btrfs_file_extent_num_bytes(eb, fi);
48 if (extent_item_pos < data_offset ||
49 extent_item_pos >= data_offset + data_len)
51 offset = extent_item_pos - data_offset;
54 e = kmalloc(sizeof(*e), GFP_NOFS);
59 e->inum = key->objectid;
60 e->offset = key->offset + offset;
66 static void free_inode_elem_list(struct extent_inode_elem *eie)
68 struct extent_inode_elem *eie_next;
70 for (; eie; eie = eie_next) {
76 static int find_extent_in_eb(const struct extent_buffer *eb,
77 u64 wanted_disk_byte, u64 extent_item_pos,
78 struct extent_inode_elem **eie,
83 struct btrfs_file_extent_item *fi;
90 * from the shared data ref, we only have the leaf but we need
91 * the key. thus, we must look into all items and see that we
92 * find one (some) with a reference to our extent item.
94 nritems = btrfs_header_nritems(eb);
95 for (slot = 0; slot < nritems; ++slot) {
96 btrfs_item_key_to_cpu(eb, &key, slot);
97 if (key.type != BTRFS_EXTENT_DATA_KEY)
99 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
100 extent_type = btrfs_file_extent_type(eb, fi);
101 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
103 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
104 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
105 if (disk_byte != wanted_disk_byte)
108 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
117 struct rb_root_cached root;
121 #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
124 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
125 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
126 struct preftree indirect_missing_keys;
130 * Checks for a shared extent during backref search.
132 * The share_count tracks prelim_refs (direct and indirect) having a
134 * - incremented when a ref->count transitions to >0
135 * - decremented when a ref->count transitions to <1
143 static inline int extent_is_shared(struct share_check *sc)
145 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
148 static struct kmem_cache *btrfs_prelim_ref_cache;
150 int __init btrfs_prelim_ref_init(void)
152 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
153 sizeof(struct prelim_ref),
157 if (!btrfs_prelim_ref_cache)
162 void __cold btrfs_prelim_ref_exit(void)
164 kmem_cache_destroy(btrfs_prelim_ref_cache);
167 static void free_pref(struct prelim_ref *ref)
169 kmem_cache_free(btrfs_prelim_ref_cache, ref);
173 * Return 0 when both refs are for the same block (and can be merged).
174 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
175 * indicates a 'higher' block.
177 static int prelim_ref_compare(struct prelim_ref *ref1,
178 struct prelim_ref *ref2)
180 if (ref1->level < ref2->level)
182 if (ref1->level > ref2->level)
184 if (ref1->root_id < ref2->root_id)
186 if (ref1->root_id > ref2->root_id)
188 if (ref1->key_for_search.type < ref2->key_for_search.type)
190 if (ref1->key_for_search.type > ref2->key_for_search.type)
192 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
194 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
196 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
198 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
200 if (ref1->parent < ref2->parent)
202 if (ref1->parent > ref2->parent)
208 static void update_share_count(struct share_check *sc, int oldcount,
211 if ((!sc) || (oldcount == 0 && newcount < 1))
214 if (oldcount > 0 && newcount < 1)
216 else if (oldcount < 1 && newcount > 0)
221 * Add @newref to the @root rbtree, merging identical refs.
223 * Callers should assume that newref has been freed after calling.
225 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
226 struct preftree *preftree,
227 struct prelim_ref *newref,
228 struct share_check *sc)
230 struct rb_root_cached *root;
232 struct rb_node *parent = NULL;
233 struct prelim_ref *ref;
235 bool leftmost = true;
237 root = &preftree->root;
238 p = &root->rb_root.rb_node;
242 ref = rb_entry(parent, struct prelim_ref, rbnode);
243 result = prelim_ref_compare(ref, newref);
246 } else if (result > 0) {
250 /* Identical refs, merge them and free @newref */
251 struct extent_inode_elem *eie = ref->inode_list;
253 while (eie && eie->next)
257 ref->inode_list = newref->inode_list;
259 eie->next = newref->inode_list;
260 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
263 * A delayed ref can have newref->count < 0.
264 * The ref->count is updated to follow any
265 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
267 update_share_count(sc, ref->count,
268 ref->count + newref->count);
269 ref->count += newref->count;
275 update_share_count(sc, 0, newref->count);
277 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
278 rb_link_node(&newref->rbnode, parent, p);
279 rb_insert_color_cached(&newref->rbnode, root, leftmost);
283 * Release the entire tree. We don't care about internal consistency so
284 * just free everything and then reset the tree root.
286 static void prelim_release(struct preftree *preftree)
288 struct prelim_ref *ref, *next_ref;
290 rbtree_postorder_for_each_entry_safe(ref, next_ref,
291 &preftree->root.rb_root, rbnode)
294 preftree->root = RB_ROOT_CACHED;
299 * the rules for all callers of this function are:
300 * - obtaining the parent is the goal
301 * - if you add a key, you must know that it is a correct key
302 * - if you cannot add the parent or a correct key, then we will look into the
303 * block later to set a correct key
307 * backref type | shared | indirect | shared | indirect
308 * information | tree | tree | data | data
309 * --------------------+--------+----------+--------+----------
310 * parent logical | y | - | - | -
311 * key to resolve | - | y | y | y
312 * tree block logical | - | - | - | -
313 * root for resolving | y | y | y | y
315 * - column 1: we've the parent -> done
316 * - column 2, 3, 4: we use the key to find the parent
318 * on disk refs (inline or keyed)
319 * ==============================
320 * backref type | shared | indirect | shared | indirect
321 * information | tree | tree | data | data
322 * --------------------+--------+----------+--------+----------
323 * parent logical | y | - | y | -
324 * key to resolve | - | - | - | y
325 * tree block logical | y | y | y | y
326 * root for resolving | - | y | y | y
328 * - column 1, 3: we've the parent -> done
329 * - column 2: we take the first key from the block to find the parent
330 * (see add_missing_keys)
331 * - column 4: we use the key to find the parent
333 * additional information that's available but not required to find the parent
334 * block might help in merging entries to gain some speed.
336 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
337 struct preftree *preftree, u64 root_id,
338 const struct btrfs_key *key, int level, u64 parent,
339 u64 wanted_disk_byte, int count,
340 struct share_check *sc, gfp_t gfp_mask)
342 struct prelim_ref *ref;
344 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
347 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
351 ref->root_id = root_id;
353 ref->key_for_search = *key;
355 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
357 ref->inode_list = NULL;
360 ref->parent = parent;
361 ref->wanted_disk_byte = wanted_disk_byte;
362 prelim_ref_insert(fs_info, preftree, ref, sc);
363 return extent_is_shared(sc);
366 /* direct refs use root == 0, key == NULL */
367 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
368 struct preftrees *preftrees, int level, u64 parent,
369 u64 wanted_disk_byte, int count,
370 struct share_check *sc, gfp_t gfp_mask)
372 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
373 parent, wanted_disk_byte, count, sc, gfp_mask);
376 /* indirect refs use parent == 0 */
377 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
378 struct preftrees *preftrees, u64 root_id,
379 const struct btrfs_key *key, int level,
380 u64 wanted_disk_byte, int count,
381 struct share_check *sc, gfp_t gfp_mask)
383 struct preftree *tree = &preftrees->indirect;
386 tree = &preftrees->indirect_missing_keys;
387 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
388 wanted_disk_byte, count, sc, gfp_mask);
391 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
393 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
394 struct rb_node *parent = NULL;
395 struct prelim_ref *ref = NULL;
396 struct prelim_ref target = {};
399 target.parent = bytenr;
403 ref = rb_entry(parent, struct prelim_ref, rbnode);
404 result = prelim_ref_compare(ref, &target);
416 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
417 struct ulist *parents,
418 struct preftrees *preftrees, struct prelim_ref *ref,
419 int level, u64 time_seq, const u64 *extent_item_pos,
424 struct extent_buffer *eb;
425 struct btrfs_key key;
426 struct btrfs_key *key_for_search = &ref->key_for_search;
427 struct btrfs_file_extent_item *fi;
428 struct extent_inode_elem *eie = NULL, *old = NULL;
430 u64 wanted_disk_byte = ref->wanted_disk_byte;
435 eb = path->nodes[level];
436 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
443 * 1. We normally enter this function with the path already pointing to
444 * the first item to check. But sometimes, we may enter it with
446 * 2. We are searching for normal backref but bytenr of this leaf
447 * matches shared data backref
448 * 3. The leaf owner is not equal to the root we are searching
450 * For these cases, go to the next leaf before we continue.
453 if (path->slots[0] >= btrfs_header_nritems(eb) ||
454 is_shared_data_backref(preftrees, eb->start) ||
455 ref->root_id != btrfs_header_owner(eb)) {
456 if (time_seq == BTRFS_SEQ_LAST)
457 ret = btrfs_next_leaf(root, path);
459 ret = btrfs_next_old_leaf(root, path, time_seq);
462 while (!ret && count < ref->count) {
464 slot = path->slots[0];
466 btrfs_item_key_to_cpu(eb, &key, slot);
468 if (key.objectid != key_for_search->objectid ||
469 key.type != BTRFS_EXTENT_DATA_KEY)
473 * We are searching for normal backref but bytenr of this leaf
474 * matches shared data backref, OR
475 * the leaf owner is not equal to the root we are searching for
478 (is_shared_data_backref(preftrees, eb->start) ||
479 ref->root_id != btrfs_header_owner(eb))) {
480 if (time_seq == BTRFS_SEQ_LAST)
481 ret = btrfs_next_leaf(root, path);
483 ret = btrfs_next_old_leaf(root, path, time_seq);
486 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
487 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
488 data_offset = btrfs_file_extent_offset(eb, fi);
490 if (disk_byte == wanted_disk_byte) {
493 if (ref->key_for_search.offset == key.offset - data_offset)
497 if (extent_item_pos) {
498 ret = check_extent_in_eb(&key, eb, fi,
500 &eie, ignore_offset);
506 ret = ulist_add_merge_ptr(parents, eb->start,
507 eie, (void **)&old, GFP_NOFS);
510 if (!ret && extent_item_pos) {
518 if (time_seq == BTRFS_SEQ_LAST)
519 ret = btrfs_next_item(root, path);
521 ret = btrfs_next_old_item(root, path, time_seq);
527 free_inode_elem_list(eie);
532 * resolve an indirect backref in the form (root_id, key, level)
533 * to a logical address
535 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
536 struct btrfs_path *path, u64 time_seq,
537 struct preftrees *preftrees,
538 struct prelim_ref *ref, struct ulist *parents,
539 const u64 *extent_item_pos, bool ignore_offset)
541 struct btrfs_root *root;
542 struct extent_buffer *eb;
545 int level = ref->level;
546 struct btrfs_key search_key = ref->key_for_search;
549 * If we're search_commit_root we could possibly be holding locks on
550 * other tree nodes. This happens when qgroups does backref walks when
551 * adding new delayed refs. To deal with this we need to look in cache
552 * for the root, and if we don't find it then we need to search the
553 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
556 if (path->search_commit_root)
557 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
559 root = btrfs_get_fs_root(fs_info, ref->root_id, false);
565 if (!path->search_commit_root &&
566 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
571 if (btrfs_is_testing(fs_info)) {
576 if (path->search_commit_root)
577 root_level = btrfs_header_level(root->commit_root);
578 else if (time_seq == BTRFS_SEQ_LAST)
579 root_level = btrfs_header_level(root->node);
581 root_level = btrfs_old_root_level(root, time_seq);
583 if (root_level + 1 == level)
587 * We can often find data backrefs with an offset that is too large
588 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
589 * subtracting a file's offset with the data offset of its
590 * corresponding extent data item. This can happen for example in the
593 * So if we detect such case we set the search key's offset to zero to
594 * make sure we will find the matching file extent item at
595 * add_all_parents(), otherwise we will miss it because the offset
596 * taken form the backref is much larger then the offset of the file
597 * extent item. This can make us scan a very large number of file
598 * extent items, but at least it will not make us miss any.
600 * This is an ugly workaround for a behaviour that should have never
601 * existed, but it does and a fix for the clone ioctl would touch a lot
602 * of places, cause backwards incompatibility and would not fix the
603 * problem for extents cloned with older kernels.
605 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
606 search_key.offset >= LLONG_MAX)
607 search_key.offset = 0;
608 path->lowest_level = level;
609 if (time_seq == BTRFS_SEQ_LAST)
610 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
612 ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
615 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
616 ref->root_id, level, ref->count, ret,
617 ref->key_for_search.objectid, ref->key_for_search.type,
618 ref->key_for_search.offset);
622 eb = path->nodes[level];
624 if (WARN_ON(!level)) {
629 eb = path->nodes[level];
632 ret = add_all_parents(root, path, parents, preftrees, ref, level,
633 time_seq, extent_item_pos, ignore_offset);
635 btrfs_put_root(root);
637 path->lowest_level = 0;
638 btrfs_release_path(path);
642 static struct extent_inode_elem *
643 unode_aux_to_inode_list(struct ulist_node *node)
647 return (struct extent_inode_elem *)(uintptr_t)node->aux;
651 * We maintain three separate rbtrees: one for direct refs, one for
652 * indirect refs which have a key, and one for indirect refs which do not
653 * have a key. Each tree does merge on insertion.
655 * Once all of the references are located, we iterate over the tree of
656 * indirect refs with missing keys. An appropriate key is located and
657 * the ref is moved onto the tree for indirect refs. After all missing
658 * keys are thus located, we iterate over the indirect ref tree, resolve
659 * each reference, and then insert the resolved reference onto the
660 * direct tree (merging there too).
662 * New backrefs (i.e., for parent nodes) are added to the appropriate
663 * rbtree as they are encountered. The new backrefs are subsequently
666 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
667 struct btrfs_path *path, u64 time_seq,
668 struct preftrees *preftrees,
669 const u64 *extent_item_pos,
670 struct share_check *sc, bool ignore_offset)
674 struct ulist *parents;
675 struct ulist_node *node;
676 struct ulist_iterator uiter;
677 struct rb_node *rnode;
679 parents = ulist_alloc(GFP_NOFS);
684 * We could trade memory usage for performance here by iterating
685 * the tree, allocating new refs for each insertion, and then
686 * freeing the entire indirect tree when we're done. In some test
687 * cases, the tree can grow quite large (~200k objects).
689 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
690 struct prelim_ref *ref;
692 ref = rb_entry(rnode, struct prelim_ref, rbnode);
693 if (WARN(ref->parent,
694 "BUG: direct ref found in indirect tree")) {
699 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
700 preftrees->indirect.count--;
702 if (ref->count == 0) {
707 if (sc && sc->root_objectid &&
708 ref->root_id != sc->root_objectid) {
710 ret = BACKREF_FOUND_SHARED;
713 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
714 ref, parents, extent_item_pos,
717 * we can only tolerate ENOENT,otherwise,we should catch error
718 * and return directly.
720 if (err == -ENOENT) {
721 prelim_ref_insert(fs_info, &preftrees->direct, ref,
730 /* we put the first parent into the ref at hand */
731 ULIST_ITER_INIT(&uiter);
732 node = ulist_next(parents, &uiter);
733 ref->parent = node ? node->val : 0;
734 ref->inode_list = unode_aux_to_inode_list(node);
736 /* Add a prelim_ref(s) for any other parent(s). */
737 while ((node = ulist_next(parents, &uiter))) {
738 struct prelim_ref *new_ref;
740 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
747 memcpy(new_ref, ref, sizeof(*ref));
748 new_ref->parent = node->val;
749 new_ref->inode_list = unode_aux_to_inode_list(node);
750 prelim_ref_insert(fs_info, &preftrees->direct,
755 * Now it's a direct ref, put it in the direct tree. We must
756 * do this last because the ref could be merged/freed here.
758 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
760 ulist_reinit(parents);
769 * read tree blocks and add keys where required.
771 static int add_missing_keys(struct btrfs_fs_info *fs_info,
772 struct preftrees *preftrees, bool lock)
774 struct prelim_ref *ref;
775 struct extent_buffer *eb;
776 struct preftree *tree = &preftrees->indirect_missing_keys;
777 struct rb_node *node;
779 while ((node = rb_first_cached(&tree->root))) {
780 ref = rb_entry(node, struct prelim_ref, rbnode);
781 rb_erase_cached(node, &tree->root);
783 BUG_ON(ref->parent); /* should not be a direct ref */
784 BUG_ON(ref->key_for_search.type);
785 BUG_ON(!ref->wanted_disk_byte);
787 eb = read_tree_block(fs_info, ref->wanted_disk_byte,
788 ref->root_id, 0, ref->level - 1, NULL);
793 if (!extent_buffer_uptodate(eb)) {
795 free_extent_buffer(eb);
800 btrfs_tree_read_lock(eb);
801 if (btrfs_header_level(eb) == 0)
802 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
804 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
806 btrfs_tree_read_unlock(eb);
807 free_extent_buffer(eb);
808 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
815 * add all currently queued delayed refs from this head whose seq nr is
816 * smaller or equal that seq to the list
818 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
819 struct btrfs_delayed_ref_head *head, u64 seq,
820 struct preftrees *preftrees, struct share_check *sc)
822 struct btrfs_delayed_ref_node *node;
823 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
824 struct btrfs_key key;
825 struct btrfs_key tmp_op_key;
830 if (extent_op && extent_op->update_key)
831 btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
833 spin_lock(&head->lock);
834 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
835 node = rb_entry(n, struct btrfs_delayed_ref_node,
840 switch (node->action) {
841 case BTRFS_ADD_DELAYED_EXTENT:
842 case BTRFS_UPDATE_DELAYED_HEAD:
845 case BTRFS_ADD_DELAYED_REF:
846 count = node->ref_mod;
848 case BTRFS_DROP_DELAYED_REF:
849 count = node->ref_mod * -1;
854 switch (node->type) {
855 case BTRFS_TREE_BLOCK_REF_KEY: {
856 /* NORMAL INDIRECT METADATA backref */
857 struct btrfs_delayed_tree_ref *ref;
859 ref = btrfs_delayed_node_to_tree_ref(node);
860 ret = add_indirect_ref(fs_info, preftrees, ref->root,
861 &tmp_op_key, ref->level + 1,
862 node->bytenr, count, sc,
866 case BTRFS_SHARED_BLOCK_REF_KEY: {
867 /* SHARED DIRECT METADATA backref */
868 struct btrfs_delayed_tree_ref *ref;
870 ref = btrfs_delayed_node_to_tree_ref(node);
872 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
873 ref->parent, node->bytenr, count,
877 case BTRFS_EXTENT_DATA_REF_KEY: {
878 /* NORMAL INDIRECT DATA backref */
879 struct btrfs_delayed_data_ref *ref;
880 ref = btrfs_delayed_node_to_data_ref(node);
882 key.objectid = ref->objectid;
883 key.type = BTRFS_EXTENT_DATA_KEY;
884 key.offset = ref->offset;
887 * Found a inum that doesn't match our known inum, we
890 if (sc && sc->inum && ref->objectid != sc->inum) {
891 ret = BACKREF_FOUND_SHARED;
895 ret = add_indirect_ref(fs_info, preftrees, ref->root,
896 &key, 0, node->bytenr, count, sc,
900 case BTRFS_SHARED_DATA_REF_KEY: {
901 /* SHARED DIRECT FULL backref */
902 struct btrfs_delayed_data_ref *ref;
904 ref = btrfs_delayed_node_to_data_ref(node);
906 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
907 node->bytenr, count, sc,
915 * We must ignore BACKREF_FOUND_SHARED until all delayed
916 * refs have been checked.
918 if (ret && (ret != BACKREF_FOUND_SHARED))
922 ret = extent_is_shared(sc);
924 spin_unlock(&head->lock);
929 * add all inline backrefs for bytenr to the list
931 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
933 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
934 struct btrfs_path *path, u64 bytenr,
935 int *info_level, struct preftrees *preftrees,
936 struct share_check *sc)
940 struct extent_buffer *leaf;
941 struct btrfs_key key;
942 struct btrfs_key found_key;
945 struct btrfs_extent_item *ei;
950 * enumerate all inline refs
952 leaf = path->nodes[0];
953 slot = path->slots[0];
955 item_size = btrfs_item_size(leaf, slot);
956 BUG_ON(item_size < sizeof(*ei));
958 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
959 flags = btrfs_extent_flags(leaf, ei);
960 btrfs_item_key_to_cpu(leaf, &found_key, slot);
962 ptr = (unsigned long)(ei + 1);
963 end = (unsigned long)ei + item_size;
965 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
966 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
967 struct btrfs_tree_block_info *info;
969 info = (struct btrfs_tree_block_info *)ptr;
970 *info_level = btrfs_tree_block_level(leaf, info);
971 ptr += sizeof(struct btrfs_tree_block_info);
973 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
974 *info_level = found_key.offset;
976 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
980 struct btrfs_extent_inline_ref *iref;
984 iref = (struct btrfs_extent_inline_ref *)ptr;
985 type = btrfs_get_extent_inline_ref_type(leaf, iref,
987 if (type == BTRFS_REF_TYPE_INVALID)
990 offset = btrfs_extent_inline_ref_offset(leaf, iref);
993 case BTRFS_SHARED_BLOCK_REF_KEY:
994 ret = add_direct_ref(fs_info, preftrees,
995 *info_level + 1, offset,
996 bytenr, 1, NULL, GFP_NOFS);
998 case BTRFS_SHARED_DATA_REF_KEY: {
999 struct btrfs_shared_data_ref *sdref;
1002 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1003 count = btrfs_shared_data_ref_count(leaf, sdref);
1005 ret = add_direct_ref(fs_info, preftrees, 0, offset,
1006 bytenr, count, sc, GFP_NOFS);
1009 case BTRFS_TREE_BLOCK_REF_KEY:
1010 ret = add_indirect_ref(fs_info, preftrees, offset,
1011 NULL, *info_level + 1,
1012 bytenr, 1, NULL, GFP_NOFS);
1014 case BTRFS_EXTENT_DATA_REF_KEY: {
1015 struct btrfs_extent_data_ref *dref;
1019 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1020 count = btrfs_extent_data_ref_count(leaf, dref);
1021 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1023 key.type = BTRFS_EXTENT_DATA_KEY;
1024 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1026 if (sc && sc->inum && key.objectid != sc->inum) {
1027 ret = BACKREF_FOUND_SHARED;
1031 root = btrfs_extent_data_ref_root(leaf, dref);
1033 ret = add_indirect_ref(fs_info, preftrees, root,
1034 &key, 0, bytenr, count,
1043 ptr += btrfs_extent_inline_ref_size(type);
1050 * add all non-inline backrefs for bytenr to the list
1052 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1054 static int add_keyed_refs(struct btrfs_root *extent_root,
1055 struct btrfs_path *path, u64 bytenr,
1056 int info_level, struct preftrees *preftrees,
1057 struct share_check *sc)
1059 struct btrfs_fs_info *fs_info = extent_root->fs_info;
1062 struct extent_buffer *leaf;
1063 struct btrfs_key key;
1066 ret = btrfs_next_item(extent_root, path);
1074 slot = path->slots[0];
1075 leaf = path->nodes[0];
1076 btrfs_item_key_to_cpu(leaf, &key, slot);
1078 if (key.objectid != bytenr)
1080 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1082 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1086 case BTRFS_SHARED_BLOCK_REF_KEY:
1087 /* SHARED DIRECT METADATA backref */
1088 ret = add_direct_ref(fs_info, preftrees,
1089 info_level + 1, key.offset,
1090 bytenr, 1, NULL, GFP_NOFS);
1092 case BTRFS_SHARED_DATA_REF_KEY: {
1093 /* SHARED DIRECT FULL backref */
1094 struct btrfs_shared_data_ref *sdref;
1097 sdref = btrfs_item_ptr(leaf, slot,
1098 struct btrfs_shared_data_ref);
1099 count = btrfs_shared_data_ref_count(leaf, sdref);
1100 ret = add_direct_ref(fs_info, preftrees, 0,
1101 key.offset, bytenr, count,
1105 case BTRFS_TREE_BLOCK_REF_KEY:
1106 /* NORMAL INDIRECT METADATA backref */
1107 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1108 NULL, info_level + 1, bytenr,
1111 case BTRFS_EXTENT_DATA_REF_KEY: {
1112 /* NORMAL INDIRECT DATA backref */
1113 struct btrfs_extent_data_ref *dref;
1117 dref = btrfs_item_ptr(leaf, slot,
1118 struct btrfs_extent_data_ref);
1119 count = btrfs_extent_data_ref_count(leaf, dref);
1120 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1122 key.type = BTRFS_EXTENT_DATA_KEY;
1123 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1125 if (sc && sc->inum && key.objectid != sc->inum) {
1126 ret = BACKREF_FOUND_SHARED;
1130 root = btrfs_extent_data_ref_root(leaf, dref);
1131 ret = add_indirect_ref(fs_info, preftrees, root,
1132 &key, 0, bytenr, count,
1148 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1149 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1150 * indirect refs to their parent bytenr.
1151 * When roots are found, they're added to the roots list
1153 * If time_seq is set to BTRFS_SEQ_LAST, it will not search delayed_refs, and
1154 * behave much like trans == NULL case, the difference only lies in it will not
1156 * The special case is for qgroup to search roots in commit_transaction().
1158 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1159 * shared extent is detected.
1161 * Otherwise this returns 0 for success and <0 for an error.
1163 * If ignore_offset is set to false, only extent refs whose offsets match
1164 * extent_item_pos are returned. If true, every extent ref is returned
1165 * and extent_item_pos is ignored.
1167 * FIXME some caching might speed things up
1169 static int find_parent_nodes(struct btrfs_trans_handle *trans,
1170 struct btrfs_fs_info *fs_info, u64 bytenr,
1171 u64 time_seq, struct ulist *refs,
1172 struct ulist *roots, const u64 *extent_item_pos,
1173 struct share_check *sc, bool ignore_offset)
1175 struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr);
1176 struct btrfs_key key;
1177 struct btrfs_path *path;
1178 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1179 struct btrfs_delayed_ref_head *head;
1182 struct prelim_ref *ref;
1183 struct rb_node *node;
1184 struct extent_inode_elem *eie = NULL;
1185 struct preftrees preftrees = {
1186 .direct = PREFTREE_INIT,
1187 .indirect = PREFTREE_INIT,
1188 .indirect_missing_keys = PREFTREE_INIT
1191 key.objectid = bytenr;
1192 key.offset = (u64)-1;
1193 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1194 key.type = BTRFS_METADATA_ITEM_KEY;
1196 key.type = BTRFS_EXTENT_ITEM_KEY;
1198 path = btrfs_alloc_path();
1202 path->search_commit_root = 1;
1203 path->skip_locking = 1;
1206 if (time_seq == BTRFS_SEQ_LAST)
1207 path->skip_locking = 1;
1212 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1216 /* This shouldn't happen, indicates a bug or fs corruption. */
1222 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1223 time_seq != BTRFS_SEQ_LAST) {
1225 * We have a specific time_seq we care about and trans which
1226 * means we have the path lock, we need to grab the ref head and
1227 * lock it so we have a consistent view of the refs at the given
1230 delayed_refs = &trans->transaction->delayed_refs;
1231 spin_lock(&delayed_refs->lock);
1232 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1234 if (!mutex_trylock(&head->mutex)) {
1235 refcount_inc(&head->refs);
1236 spin_unlock(&delayed_refs->lock);
1238 btrfs_release_path(path);
1241 * Mutex was contended, block until it's
1242 * released and try again
1244 mutex_lock(&head->mutex);
1245 mutex_unlock(&head->mutex);
1246 btrfs_put_delayed_ref_head(head);
1249 spin_unlock(&delayed_refs->lock);
1250 ret = add_delayed_refs(fs_info, head, time_seq,
1252 mutex_unlock(&head->mutex);
1256 spin_unlock(&delayed_refs->lock);
1260 if (path->slots[0]) {
1261 struct extent_buffer *leaf;
1265 leaf = path->nodes[0];
1266 slot = path->slots[0];
1267 btrfs_item_key_to_cpu(leaf, &key, slot);
1268 if (key.objectid == bytenr &&
1269 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1270 key.type == BTRFS_METADATA_ITEM_KEY)) {
1271 ret = add_inline_refs(fs_info, path, bytenr,
1272 &info_level, &preftrees, sc);
1275 ret = add_keyed_refs(root, path, bytenr, info_level,
1282 btrfs_release_path(path);
1284 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1288 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1290 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1291 extent_item_pos, sc, ignore_offset);
1295 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1298 * This walks the tree of merged and resolved refs. Tree blocks are
1299 * read in as needed. Unique entries are added to the ulist, and
1300 * the list of found roots is updated.
1302 * We release the entire tree in one go before returning.
1304 node = rb_first_cached(&preftrees.direct.root);
1306 ref = rb_entry(node, struct prelim_ref, rbnode);
1307 node = rb_next(&ref->rbnode);
1309 * ref->count < 0 can happen here if there are delayed
1310 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1311 * prelim_ref_insert() relies on this when merging
1312 * identical refs to keep the overall count correct.
1313 * prelim_ref_insert() will merge only those refs
1314 * which compare identically. Any refs having
1315 * e.g. different offsets would not be merged,
1316 * and would retain their original ref->count < 0.
1318 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1319 if (sc && sc->root_objectid &&
1320 ref->root_id != sc->root_objectid) {
1321 ret = BACKREF_FOUND_SHARED;
1325 /* no parent == root of tree */
1326 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1330 if (ref->count && ref->parent) {
1331 if (extent_item_pos && !ref->inode_list &&
1333 struct extent_buffer *eb;
1335 eb = read_tree_block(fs_info, ref->parent, 0,
1336 0, ref->level, NULL);
1341 if (!extent_buffer_uptodate(eb)) {
1342 free_extent_buffer(eb);
1347 if (!path->skip_locking)
1348 btrfs_tree_read_lock(eb);
1349 ret = find_extent_in_eb(eb, bytenr,
1350 *extent_item_pos, &eie, ignore_offset);
1351 if (!path->skip_locking)
1352 btrfs_tree_read_unlock(eb);
1353 free_extent_buffer(eb);
1356 ref->inode_list = eie;
1358 ret = ulist_add_merge_ptr(refs, ref->parent,
1360 (void **)&eie, GFP_NOFS);
1363 if (!ret && extent_item_pos) {
1365 * We've recorded that parent, so we must extend
1366 * its inode list here.
1368 * However if there was corruption we may not
1369 * have found an eie, return an error in this
1379 eie->next = ref->inode_list;
1387 btrfs_free_path(path);
1389 prelim_release(&preftrees.direct);
1390 prelim_release(&preftrees.indirect);
1391 prelim_release(&preftrees.indirect_missing_keys);
1394 free_inode_elem_list(eie);
1398 static void free_leaf_list(struct ulist *blocks)
1400 struct ulist_node *node = NULL;
1401 struct extent_inode_elem *eie;
1402 struct ulist_iterator uiter;
1404 ULIST_ITER_INIT(&uiter);
1405 while ((node = ulist_next(blocks, &uiter))) {
1408 eie = unode_aux_to_inode_list(node);
1409 free_inode_elem_list(eie);
1417 * Finds all leafs with a reference to the specified combination of bytenr and
1418 * offset. key_list_head will point to a list of corresponding keys (caller must
1419 * free each list element). The leafs will be stored in the leafs ulist, which
1420 * must be freed with ulist_free.
1422 * returns 0 on success, <0 on error
1424 int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1425 struct btrfs_fs_info *fs_info, u64 bytenr,
1426 u64 time_seq, struct ulist **leafs,
1427 const u64 *extent_item_pos, bool ignore_offset)
1431 *leafs = ulist_alloc(GFP_NOFS);
1435 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1436 *leafs, NULL, extent_item_pos, NULL, ignore_offset);
1437 if (ret < 0 && ret != -ENOENT) {
1438 free_leaf_list(*leafs);
1446 * walk all backrefs for a given extent to find all roots that reference this
1447 * extent. Walking a backref means finding all extents that reference this
1448 * extent and in turn walk the backrefs of those, too. Naturally this is a
1449 * recursive process, but here it is implemented in an iterative fashion: We
1450 * find all referencing extents for the extent in question and put them on a
1451 * list. In turn, we find all referencing extents for those, further appending
1452 * to the list. The way we iterate the list allows adding more elements after
1453 * the current while iterating. The process stops when we reach the end of the
1454 * list. Found roots are added to the roots list.
1456 * returns 0 on success, < 0 on error.
1458 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1459 struct btrfs_fs_info *fs_info, u64 bytenr,
1460 u64 time_seq, struct ulist **roots,
1464 struct ulist_node *node = NULL;
1465 struct ulist_iterator uiter;
1468 tmp = ulist_alloc(GFP_NOFS);
1471 *roots = ulist_alloc(GFP_NOFS);
1477 ULIST_ITER_INIT(&uiter);
1479 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1480 tmp, *roots, NULL, NULL, ignore_offset);
1481 if (ret < 0 && ret != -ENOENT) {
1487 node = ulist_next(tmp, &uiter);
1498 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1499 struct btrfs_fs_info *fs_info, u64 bytenr,
1500 u64 time_seq, struct ulist **roots,
1501 bool skip_commit_root_sem)
1505 if (!trans && !skip_commit_root_sem)
1506 down_read(&fs_info->commit_root_sem);
1507 ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1508 time_seq, roots, false);
1509 if (!trans && !skip_commit_root_sem)
1510 up_read(&fs_info->commit_root_sem);
1515 * The caller has joined a transaction or is holding a read lock on the
1516 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1517 * snapshot field changing while updating or checking the cache.
1519 static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
1520 struct btrfs_root *root,
1521 u64 bytenr, int level, bool *is_shared)
1523 struct btrfs_backref_shared_cache_entry *entry;
1525 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1529 * Level -1 is used for the data extent, which is not reliable to cache
1530 * because its reference count can increase or decrease without us
1531 * realizing. We cache results only for extent buffers that lead from
1532 * the root node down to the leaf with the file extent item.
1536 entry = &cache->entries[level];
1538 /* Unused cache entry or being used for some other extent buffer. */
1539 if (entry->bytenr != bytenr)
1543 * We cached a false result, but the last snapshot generation of the
1544 * root changed, so we now have a snapshot. Don't trust the result.
1546 if (!entry->is_shared &&
1547 entry->gen != btrfs_root_last_snapshot(&root->root_item))
1551 * If we cached a true result and the last generation used for dropping
1552 * a root changed, we can not trust the result, because the dropped root
1553 * could be a snapshot sharing this extent buffer.
1555 if (entry->is_shared &&
1556 entry->gen != btrfs_get_last_root_drop_gen(root->fs_info))
1559 *is_shared = entry->is_shared;
1565 * The caller has joined a transaction or is holding a read lock on the
1566 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1567 * snapshot field changing while updating or checking the cache.
1569 static void store_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
1570 struct btrfs_root *root,
1571 u64 bytenr, int level, bool is_shared)
1573 struct btrfs_backref_shared_cache_entry *entry;
1576 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1580 * Level -1 is used for the data extent, which is not reliable to cache
1581 * because its reference count can increase or decrease without us
1582 * realizing. We cache results only for extent buffers that lead from
1583 * the root node down to the leaf with the file extent item.
1588 gen = btrfs_get_last_root_drop_gen(root->fs_info);
1590 gen = btrfs_root_last_snapshot(&root->root_item);
1592 entry = &cache->entries[level];
1593 entry->bytenr = bytenr;
1594 entry->is_shared = is_shared;
1598 * If we found an extent buffer is shared, set the cache result for all
1599 * extent buffers below it to true. As nodes in the path are COWed,
1600 * their sharedness is moved to their children, and if a leaf is COWed,
1601 * then the sharedness of a data extent becomes direct, the refcount of
1602 * data extent is increased in the extent item at the extent tree.
1605 for (int i = 0; i < level; i++) {
1606 entry = &cache->entries[i];
1607 entry->is_shared = is_shared;
1614 * Check if a data extent is shared or not.
1616 * @root: The root the inode belongs to.
1617 * @inum: Number of the inode whose extent we are checking.
1618 * @bytenr: Logical bytenr of the extent we are checking.
1619 * @extent_gen: Generation of the extent (file extent item) or 0 if it is
1621 * @roots: List of roots this extent is shared among.
1622 * @tmp: Temporary list used for iteration.
1623 * @cache: A backref lookup result cache.
1625 * btrfs_is_data_extent_shared uses the backref walking code but will short
1626 * circuit as soon as it finds a root or inode that doesn't match the
1627 * one passed in. This provides a significant performance benefit for
1628 * callers (such as fiemap) which want to know whether the extent is
1629 * shared but do not need a ref count.
1631 * This attempts to attach to the running transaction in order to account for
1632 * delayed refs, but continues on even when no running transaction exists.
1634 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1636 int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1638 struct ulist *roots, struct ulist *tmp,
1639 struct btrfs_backref_shared_cache *cache)
1641 struct btrfs_fs_info *fs_info = root->fs_info;
1642 struct btrfs_trans_handle *trans;
1643 struct ulist_iterator uiter;
1644 struct ulist_node *node;
1645 struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1647 struct share_check shared = {
1648 .root_objectid = root->root_key.objectid,
1657 trans = btrfs_join_transaction_nostart(root);
1658 if (IS_ERR(trans)) {
1659 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1660 ret = PTR_ERR(trans);
1664 down_read(&fs_info->commit_root_sem);
1666 btrfs_get_tree_mod_seq(fs_info, &elem);
1669 /* -1 means we are in the bytenr of the data extent. */
1671 ULIST_ITER_INIT(&uiter);
1676 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1677 roots, NULL, &shared, false);
1678 if (ret == BACKREF_FOUND_SHARED) {
1679 /* this is the only condition under which we return 1 */
1682 store_backref_shared_cache(cache, root, bytenr,
1686 if (ret < 0 && ret != -ENOENT)
1690 * If our data extent is not shared through reflinks and it was
1691 * created in a generation after the last one used to create a
1692 * snapshot of the inode's root, then it can not be shared
1693 * indirectly through subtrees, as that can only happen with
1694 * snapshots. In this case bail out, no need to check for the
1695 * sharedness of extent buffers.
1698 extent_gen > btrfs_root_last_snapshot(&root->root_item))
1702 store_backref_shared_cache(cache, root, bytenr,
1704 node = ulist_next(tmp, &uiter);
1709 cached = lookup_backref_shared_cache(cache, root, bytenr, level,
1712 ret = (is_shared ? 1 : 0);
1715 shared.share_count = 0;
1720 btrfs_put_tree_mod_seq(fs_info, &elem);
1721 btrfs_end_transaction(trans);
1723 up_read(&fs_info->commit_root_sem);
1726 ulist_release(roots);
1731 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1732 u64 start_off, struct btrfs_path *path,
1733 struct btrfs_inode_extref **ret_extref,
1737 struct btrfs_key key;
1738 struct btrfs_key found_key;
1739 struct btrfs_inode_extref *extref;
1740 const struct extent_buffer *leaf;
1743 key.objectid = inode_objectid;
1744 key.type = BTRFS_INODE_EXTREF_KEY;
1745 key.offset = start_off;
1747 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1752 leaf = path->nodes[0];
1753 slot = path->slots[0];
1754 if (slot >= btrfs_header_nritems(leaf)) {
1756 * If the item at offset is not found,
1757 * btrfs_search_slot will point us to the slot
1758 * where it should be inserted. In our case
1759 * that will be the slot directly before the
1760 * next INODE_REF_KEY_V2 item. In the case
1761 * that we're pointing to the last slot in a
1762 * leaf, we must move one leaf over.
1764 ret = btrfs_next_leaf(root, path);
1773 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1776 * Check that we're still looking at an extended ref key for
1777 * this particular objectid. If we have different
1778 * objectid or type then there are no more to be found
1779 * in the tree and we can exit.
1782 if (found_key.objectid != inode_objectid)
1784 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1788 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1789 extref = (struct btrfs_inode_extref *)ptr;
1790 *ret_extref = extref;
1792 *found_off = found_key.offset;
1800 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1801 * Elements of the path are separated by '/' and the path is guaranteed to be
1802 * 0-terminated. the path is only given within the current file system.
1803 * Therefore, it never starts with a '/'. the caller is responsible to provide
1804 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1805 * the start point of the resulting string is returned. this pointer is within
1807 * in case the path buffer would overflow, the pointer is decremented further
1808 * as if output was written to the buffer, though no more output is actually
1809 * generated. that way, the caller can determine how much space would be
1810 * required for the path to fit into the buffer. in that case, the returned
1811 * value will be smaller than dest. callers must check this!
1813 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1814 u32 name_len, unsigned long name_off,
1815 struct extent_buffer *eb_in, u64 parent,
1816 char *dest, u32 size)
1821 s64 bytes_left = ((s64)size) - 1;
1822 struct extent_buffer *eb = eb_in;
1823 struct btrfs_key found_key;
1824 struct btrfs_inode_ref *iref;
1826 if (bytes_left >= 0)
1827 dest[bytes_left] = '\0';
1830 bytes_left -= name_len;
1831 if (bytes_left >= 0)
1832 read_extent_buffer(eb, dest + bytes_left,
1833 name_off, name_len);
1835 if (!path->skip_locking)
1836 btrfs_tree_read_unlock(eb);
1837 free_extent_buffer(eb);
1839 ret = btrfs_find_item(fs_root, path, parent, 0,
1840 BTRFS_INODE_REF_KEY, &found_key);
1846 next_inum = found_key.offset;
1848 /* regular exit ahead */
1849 if (parent == next_inum)
1852 slot = path->slots[0];
1853 eb = path->nodes[0];
1854 /* make sure we can use eb after releasing the path */
1856 path->nodes[0] = NULL;
1859 btrfs_release_path(path);
1860 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1862 name_len = btrfs_inode_ref_name_len(eb, iref);
1863 name_off = (unsigned long)(iref + 1);
1867 if (bytes_left >= 0)
1868 dest[bytes_left] = '/';
1871 btrfs_release_path(path);
1874 return ERR_PTR(ret);
1876 return dest + bytes_left;
1880 * this makes the path point to (logical EXTENT_ITEM *)
1881 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1882 * tree blocks and <0 on error.
1884 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1885 struct btrfs_path *path, struct btrfs_key *found_key,
1888 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
1893 const struct extent_buffer *eb;
1894 struct btrfs_extent_item *ei;
1895 struct btrfs_key key;
1897 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1898 key.type = BTRFS_METADATA_ITEM_KEY;
1900 key.type = BTRFS_EXTENT_ITEM_KEY;
1901 key.objectid = logical;
1902 key.offset = (u64)-1;
1904 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1908 ret = btrfs_previous_extent_item(extent_root, path, 0);
1914 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1915 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1916 size = fs_info->nodesize;
1917 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1918 size = found_key->offset;
1920 if (found_key->objectid > logical ||
1921 found_key->objectid + size <= logical) {
1922 btrfs_debug(fs_info,
1923 "logical %llu is not within any extent", logical);
1927 eb = path->nodes[0];
1928 item_size = btrfs_item_size(eb, path->slots[0]);
1929 BUG_ON(item_size < sizeof(*ei));
1931 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1932 flags = btrfs_extent_flags(eb, ei);
1934 btrfs_debug(fs_info,
1935 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1936 logical, logical - found_key->objectid, found_key->objectid,
1937 found_key->offset, flags, item_size);
1939 WARN_ON(!flags_ret);
1941 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1942 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1943 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1944 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1954 * helper function to iterate extent inline refs. ptr must point to a 0 value
1955 * for the first call and may be modified. it is used to track state.
1956 * if more refs exist, 0 is returned and the next call to
1957 * get_extent_inline_ref must pass the modified ptr parameter to get the
1958 * next ref. after the last ref was processed, 1 is returned.
1959 * returns <0 on error
1961 static int get_extent_inline_ref(unsigned long *ptr,
1962 const struct extent_buffer *eb,
1963 const struct btrfs_key *key,
1964 const struct btrfs_extent_item *ei,
1966 struct btrfs_extent_inline_ref **out_eiref,
1971 struct btrfs_tree_block_info *info;
1975 flags = btrfs_extent_flags(eb, ei);
1976 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1977 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1978 /* a skinny metadata extent */
1980 (struct btrfs_extent_inline_ref *)(ei + 1);
1982 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1983 info = (struct btrfs_tree_block_info *)(ei + 1);
1985 (struct btrfs_extent_inline_ref *)(info + 1);
1988 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1990 *ptr = (unsigned long)*out_eiref;
1991 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1995 end = (unsigned long)ei + item_size;
1996 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1997 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1998 BTRFS_REF_TYPE_ANY);
1999 if (*out_type == BTRFS_REF_TYPE_INVALID)
2002 *ptr += btrfs_extent_inline_ref_size(*out_type);
2003 WARN_ON(*ptr > end);
2005 return 1; /* last */
2011 * reads the tree block backref for an extent. tree level and root are returned
2012 * through out_level and out_root. ptr must point to a 0 value for the first
2013 * call and may be modified (see get_extent_inline_ref comment).
2014 * returns 0 if data was provided, 1 if there was no more data to provide or
2017 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2018 struct btrfs_key *key, struct btrfs_extent_item *ei,
2019 u32 item_size, u64 *out_root, u8 *out_level)
2023 struct btrfs_extent_inline_ref *eiref;
2025 if (*ptr == (unsigned long)-1)
2029 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
2034 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
2035 type == BTRFS_SHARED_BLOCK_REF_KEY)
2042 /* we can treat both ref types equally here */
2043 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
2045 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
2046 struct btrfs_tree_block_info *info;
2048 info = (struct btrfs_tree_block_info *)(ei + 1);
2049 *out_level = btrfs_tree_block_level(eb, info);
2051 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
2052 *out_level = (u8)key->offset;
2056 *ptr = (unsigned long)-1;
2061 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
2062 struct extent_inode_elem *inode_list,
2063 u64 root, u64 extent_item_objectid,
2064 iterate_extent_inodes_t *iterate, void *ctx)
2066 struct extent_inode_elem *eie;
2069 for (eie = inode_list; eie; eie = eie->next) {
2070 btrfs_debug(fs_info,
2071 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
2072 extent_item_objectid, eie->inum,
2074 ret = iterate(eie->inum, eie->offset, root, ctx);
2076 btrfs_debug(fs_info,
2077 "stopping iteration for %llu due to ret=%d",
2078 extent_item_objectid, ret);
2087 * calls iterate() for every inode that references the extent identified by
2088 * the given parameters.
2089 * when the iterator function returns a non-zero value, iteration stops.
2091 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
2092 u64 extent_item_objectid, u64 extent_item_pos,
2093 int search_commit_root,
2094 iterate_extent_inodes_t *iterate, void *ctx,
2098 struct btrfs_trans_handle *trans = NULL;
2099 struct ulist *refs = NULL;
2100 struct ulist *roots = NULL;
2101 struct ulist_node *ref_node = NULL;
2102 struct ulist_node *root_node = NULL;
2103 struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
2104 struct ulist_iterator ref_uiter;
2105 struct ulist_iterator root_uiter;
2107 btrfs_debug(fs_info, "resolving all inodes for extent %llu",
2108 extent_item_objectid);
2110 if (!search_commit_root) {
2111 trans = btrfs_attach_transaction(fs_info->tree_root);
2112 if (IS_ERR(trans)) {
2113 if (PTR_ERR(trans) != -ENOENT &&
2114 PTR_ERR(trans) != -EROFS)
2115 return PTR_ERR(trans);
2121 btrfs_get_tree_mod_seq(fs_info, &seq_elem);
2123 down_read(&fs_info->commit_root_sem);
2125 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
2126 seq_elem.seq, &refs,
2127 &extent_item_pos, ignore_offset);
2131 ULIST_ITER_INIT(&ref_uiter);
2132 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2133 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
2134 seq_elem.seq, &roots,
2138 ULIST_ITER_INIT(&root_uiter);
2139 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
2140 btrfs_debug(fs_info,
2141 "root %llu references leaf %llu, data list %#llx",
2142 root_node->val, ref_node->val,
2144 ret = iterate_leaf_refs(fs_info,
2145 (struct extent_inode_elem *)
2146 (uintptr_t)ref_node->aux,
2148 extent_item_objectid,
2154 free_leaf_list(refs);
2157 btrfs_put_tree_mod_seq(fs_info, &seq_elem);
2158 btrfs_end_transaction(trans);
2160 up_read(&fs_info->commit_root_sem);
2166 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
2168 struct btrfs_data_container *inodes = ctx;
2169 const size_t c = 3 * sizeof(u64);
2171 if (inodes->bytes_left >= c) {
2172 inodes->bytes_left -= c;
2173 inodes->val[inodes->elem_cnt] = inum;
2174 inodes->val[inodes->elem_cnt + 1] = offset;
2175 inodes->val[inodes->elem_cnt + 2] = root;
2176 inodes->elem_cnt += 3;
2178 inodes->bytes_missing += c - inodes->bytes_left;
2179 inodes->bytes_left = 0;
2180 inodes->elem_missed += 3;
2186 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2187 struct btrfs_path *path,
2188 void *ctx, bool ignore_offset)
2191 u64 extent_item_pos;
2193 struct btrfs_key found_key;
2194 int search_commit_root = path->search_commit_root;
2196 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2197 btrfs_release_path(path);
2200 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2203 extent_item_pos = logical - found_key.objectid;
2204 ret = iterate_extent_inodes(fs_info, found_key.objectid,
2205 extent_item_pos, search_commit_root,
2206 build_ino_list, ctx, ignore_offset);
2211 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2212 struct extent_buffer *eb, struct inode_fs_paths *ipath);
2214 static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
2223 struct btrfs_root *fs_root = ipath->fs_root;
2224 struct btrfs_path *path = ipath->btrfs_path;
2225 struct extent_buffer *eb;
2226 struct btrfs_inode_ref *iref;
2227 struct btrfs_key found_key;
2230 ret = btrfs_find_item(fs_root, path, inum,
2231 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2237 ret = found ? 0 : -ENOENT;
2242 parent = found_key.offset;
2243 slot = path->slots[0];
2244 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2249 btrfs_release_path(path);
2251 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2253 for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2254 name_len = btrfs_inode_ref_name_len(eb, iref);
2255 /* path must be released before calling iterate()! */
2256 btrfs_debug(fs_root->fs_info,
2257 "following ref at offset %u for inode %llu in tree %llu",
2258 cur, found_key.objectid,
2259 fs_root->root_key.objectid);
2260 ret = inode_to_path(parent, name_len,
2261 (unsigned long)(iref + 1), eb, ipath);
2264 len = sizeof(*iref) + name_len;
2265 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2267 free_extent_buffer(eb);
2270 btrfs_release_path(path);
2275 static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
2282 struct btrfs_root *fs_root = ipath->fs_root;
2283 struct btrfs_path *path = ipath->btrfs_path;
2284 struct extent_buffer *eb;
2285 struct btrfs_inode_extref *extref;
2291 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2296 ret = found ? 0 : -ENOENT;
2301 slot = path->slots[0];
2302 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2307 btrfs_release_path(path);
2309 item_size = btrfs_item_size(eb, slot);
2310 ptr = btrfs_item_ptr_offset(eb, slot);
2313 while (cur_offset < item_size) {
2316 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2317 parent = btrfs_inode_extref_parent(eb, extref);
2318 name_len = btrfs_inode_extref_name_len(eb, extref);
2319 ret = inode_to_path(parent, name_len,
2320 (unsigned long)&extref->name, eb, ipath);
2324 cur_offset += btrfs_inode_extref_name_len(eb, extref);
2325 cur_offset += sizeof(*extref);
2327 free_extent_buffer(eb);
2332 btrfs_release_path(path);
2338 * returns 0 if the path could be dumped (probably truncated)
2339 * returns <0 in case of an error
2341 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2342 struct extent_buffer *eb, struct inode_fs_paths *ipath)
2346 int i = ipath->fspath->elem_cnt;
2347 const int s_ptr = sizeof(char *);
2350 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2351 ipath->fspath->bytes_left - s_ptr : 0;
2353 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2354 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2355 name_off, eb, inum, fspath_min, bytes_left);
2357 return PTR_ERR(fspath);
2359 if (fspath > fspath_min) {
2360 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2361 ++ipath->fspath->elem_cnt;
2362 ipath->fspath->bytes_left = fspath - fspath_min;
2364 ++ipath->fspath->elem_missed;
2365 ipath->fspath->bytes_missing += fspath_min - fspath;
2366 ipath->fspath->bytes_left = 0;
2373 * this dumps all file system paths to the inode into the ipath struct, provided
2374 * is has been created large enough. each path is zero-terminated and accessed
2375 * from ipath->fspath->val[i].
2376 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2377 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2378 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2379 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2380 * have been needed to return all paths.
2382 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2387 ret = iterate_inode_refs(inum, ipath);
2390 else if (ret != -ENOENT)
2393 ret = iterate_inode_extrefs(inum, ipath);
2394 if (ret == -ENOENT && found_refs)
2400 struct btrfs_data_container *init_data_container(u32 total_bytes)
2402 struct btrfs_data_container *data;
2405 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2406 data = kvmalloc(alloc_bytes, GFP_KERNEL);
2408 return ERR_PTR(-ENOMEM);
2410 if (total_bytes >= sizeof(*data)) {
2411 data->bytes_left = total_bytes - sizeof(*data);
2412 data->bytes_missing = 0;
2414 data->bytes_missing = sizeof(*data) - total_bytes;
2415 data->bytes_left = 0;
2419 data->elem_missed = 0;
2425 * allocates space to return multiple file system paths for an inode.
2426 * total_bytes to allocate are passed, note that space usable for actual path
2427 * information will be total_bytes - sizeof(struct inode_fs_paths).
2428 * the returned pointer must be freed with free_ipath() in the end.
2430 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2431 struct btrfs_path *path)
2433 struct inode_fs_paths *ifp;
2434 struct btrfs_data_container *fspath;
2436 fspath = init_data_container(total_bytes);
2438 return ERR_CAST(fspath);
2440 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2443 return ERR_PTR(-ENOMEM);
2446 ifp->btrfs_path = path;
2447 ifp->fspath = fspath;
2448 ifp->fs_root = fs_root;
2453 void free_ipath(struct inode_fs_paths *ipath)
2457 kvfree(ipath->fspath);
2461 struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2462 struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2464 struct btrfs_backref_iter *ret;
2466 ret = kzalloc(sizeof(*ret), gfp_flag);
2470 ret->path = btrfs_alloc_path();
2476 /* Current backref iterator only supports iteration in commit root */
2477 ret->path->search_commit_root = 1;
2478 ret->path->skip_locking = 1;
2479 ret->fs_info = fs_info;
2484 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2486 struct btrfs_fs_info *fs_info = iter->fs_info;
2487 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2488 struct btrfs_path *path = iter->path;
2489 struct btrfs_extent_item *ei;
2490 struct btrfs_key key;
2493 key.objectid = bytenr;
2494 key.type = BTRFS_METADATA_ITEM_KEY;
2495 key.offset = (u64)-1;
2496 iter->bytenr = bytenr;
2498 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2505 if (path->slots[0] == 0) {
2506 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2512 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2513 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2514 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2518 memcpy(&iter->cur_key, &key, sizeof(key));
2519 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2521 iter->end_ptr = (u32)(iter->item_ptr +
2522 btrfs_item_size(path->nodes[0], path->slots[0]));
2523 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2524 struct btrfs_extent_item);
2527 * Only support iteration on tree backref yet.
2529 * This is an extra precaution for non skinny-metadata, where
2530 * EXTENT_ITEM is also used for tree blocks, that we can only use
2531 * extent flags to determine if it's a tree block.
2533 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2537 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2539 /* If there is no inline backref, go search for keyed backref */
2540 if (iter->cur_ptr >= iter->end_ptr) {
2541 ret = btrfs_next_item(extent_root, path);
2543 /* No inline nor keyed ref */
2551 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2553 if (iter->cur_key.objectid != bytenr ||
2554 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2555 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2559 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2561 iter->item_ptr = iter->cur_ptr;
2562 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
2563 path->nodes[0], path->slots[0]));
2568 btrfs_backref_iter_release(iter);
2573 * Go to the next backref item of current bytenr, can be either inlined or
2576 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2578 * Return 0 if we get next backref without problem.
2579 * Return >0 if there is no extra backref for this bytenr.
2580 * Return <0 if there is something wrong happened.
2582 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2584 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2585 struct btrfs_root *extent_root;
2586 struct btrfs_path *path = iter->path;
2587 struct btrfs_extent_inline_ref *iref;
2591 if (btrfs_backref_iter_is_inline_ref(iter)) {
2592 /* We're still inside the inline refs */
2593 ASSERT(iter->cur_ptr < iter->end_ptr);
2595 if (btrfs_backref_has_tree_block_info(iter)) {
2596 /* First tree block info */
2597 size = sizeof(struct btrfs_tree_block_info);
2599 /* Use inline ref type to determine the size */
2602 iref = (struct btrfs_extent_inline_ref *)
2603 ((unsigned long)iter->cur_ptr);
2604 type = btrfs_extent_inline_ref_type(eb, iref);
2606 size = btrfs_extent_inline_ref_size(type);
2608 iter->cur_ptr += size;
2609 if (iter->cur_ptr < iter->end_ptr)
2612 /* All inline items iterated, fall through */
2615 /* We're at keyed items, there is no inline item, go to the next one */
2616 extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
2617 ret = btrfs_next_item(extent_root, iter->path);
2621 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2622 if (iter->cur_key.objectid != iter->bytenr ||
2623 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2624 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2626 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2628 iter->cur_ptr = iter->item_ptr;
2629 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
2634 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2635 struct btrfs_backref_cache *cache, int is_reloc)
2639 cache->rb_root = RB_ROOT;
2640 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2641 INIT_LIST_HEAD(&cache->pending[i]);
2642 INIT_LIST_HEAD(&cache->changed);
2643 INIT_LIST_HEAD(&cache->detached);
2644 INIT_LIST_HEAD(&cache->leaves);
2645 INIT_LIST_HEAD(&cache->pending_edge);
2646 INIT_LIST_HEAD(&cache->useless_node);
2647 cache->fs_info = fs_info;
2648 cache->is_reloc = is_reloc;
2651 struct btrfs_backref_node *btrfs_backref_alloc_node(
2652 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2654 struct btrfs_backref_node *node;
2656 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2657 node = kzalloc(sizeof(*node), GFP_NOFS);
2661 INIT_LIST_HEAD(&node->list);
2662 INIT_LIST_HEAD(&node->upper);
2663 INIT_LIST_HEAD(&node->lower);
2664 RB_CLEAR_NODE(&node->rb_node);
2666 node->level = level;
2667 node->bytenr = bytenr;
2672 struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2673 struct btrfs_backref_cache *cache)
2675 struct btrfs_backref_edge *edge;
2677 edge = kzalloc(sizeof(*edge), GFP_NOFS);
2684 * Drop the backref node from cache, also cleaning up all its
2685 * upper edges and any uncached nodes in the path.
2687 * This cleanup happens bottom up, thus the node should either
2688 * be the lowest node in the cache or a detached node.
2690 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2691 struct btrfs_backref_node *node)
2693 struct btrfs_backref_node *upper;
2694 struct btrfs_backref_edge *edge;
2699 BUG_ON(!node->lowest && !node->detached);
2700 while (!list_empty(&node->upper)) {
2701 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2703 upper = edge->node[UPPER];
2704 list_del(&edge->list[LOWER]);
2705 list_del(&edge->list[UPPER]);
2706 btrfs_backref_free_edge(cache, edge);
2709 * Add the node to leaf node list if no other child block
2712 if (list_empty(&upper->lower)) {
2713 list_add_tail(&upper->lower, &cache->leaves);
2718 btrfs_backref_drop_node(cache, node);
2722 * Release all nodes/edges from current cache
2724 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2726 struct btrfs_backref_node *node;
2729 while (!list_empty(&cache->detached)) {
2730 node = list_entry(cache->detached.next,
2731 struct btrfs_backref_node, list);
2732 btrfs_backref_cleanup_node(cache, node);
2735 while (!list_empty(&cache->leaves)) {
2736 node = list_entry(cache->leaves.next,
2737 struct btrfs_backref_node, lower);
2738 btrfs_backref_cleanup_node(cache, node);
2741 cache->last_trans = 0;
2743 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2744 ASSERT(list_empty(&cache->pending[i]));
2745 ASSERT(list_empty(&cache->pending_edge));
2746 ASSERT(list_empty(&cache->useless_node));
2747 ASSERT(list_empty(&cache->changed));
2748 ASSERT(list_empty(&cache->detached));
2749 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2750 ASSERT(!cache->nr_nodes);
2751 ASSERT(!cache->nr_edges);
2755 * Handle direct tree backref
2757 * Direct tree backref means, the backref item shows its parent bytenr
2758 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2760 * @ref_key: The converted backref key.
2761 * For keyed backref, it's the item key.
2762 * For inlined backref, objectid is the bytenr,
2763 * type is btrfs_inline_ref_type, offset is
2764 * btrfs_inline_ref_offset.
2766 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2767 struct btrfs_key *ref_key,
2768 struct btrfs_backref_node *cur)
2770 struct btrfs_backref_edge *edge;
2771 struct btrfs_backref_node *upper;
2772 struct rb_node *rb_node;
2774 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2776 /* Only reloc root uses backref pointing to itself */
2777 if (ref_key->objectid == ref_key->offset) {
2778 struct btrfs_root *root;
2780 cur->is_reloc_root = 1;
2781 /* Only reloc backref cache cares about a specific root */
2782 if (cache->is_reloc) {
2783 root = find_reloc_root(cache->fs_info, cur->bytenr);
2789 * For generic purpose backref cache, reloc root node
2792 list_add(&cur->list, &cache->useless_node);
2797 edge = btrfs_backref_alloc_edge(cache);
2801 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2803 /* Parent node not yet cached */
2804 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2807 btrfs_backref_free_edge(cache, edge);
2812 * Backrefs for the upper level block isn't cached, add the
2813 * block to pending list
2815 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2817 /* Parent node already cached */
2818 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2819 ASSERT(upper->checked);
2820 INIT_LIST_HEAD(&edge->list[UPPER]);
2822 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2827 * Handle indirect tree backref
2829 * Indirect tree backref means, we only know which tree the node belongs to.
2830 * We still need to do a tree search to find out the parents. This is for
2831 * TREE_BLOCK_REF backref (keyed or inlined).
2833 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
2834 * @tree_key: The first key of this tree block.
2835 * @path: A clean (released) path, to avoid allocating path every time
2836 * the function get called.
2838 static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2839 struct btrfs_path *path,
2840 struct btrfs_key *ref_key,
2841 struct btrfs_key *tree_key,
2842 struct btrfs_backref_node *cur)
2844 struct btrfs_fs_info *fs_info = cache->fs_info;
2845 struct btrfs_backref_node *upper;
2846 struct btrfs_backref_node *lower;
2847 struct btrfs_backref_edge *edge;
2848 struct extent_buffer *eb;
2849 struct btrfs_root *root;
2850 struct rb_node *rb_node;
2852 bool need_check = true;
2855 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2857 return PTR_ERR(root);
2858 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2861 if (btrfs_root_level(&root->root_item) == cur->level) {
2863 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2865 * For reloc backref cache, we may ignore reloc root. But for
2866 * general purpose backref cache, we can't rely on
2867 * btrfs_should_ignore_reloc_root() as it may conflict with
2868 * current running relocation and lead to missing root.
2870 * For general purpose backref cache, reloc root detection is
2871 * completely relying on direct backref (key->offset is parent
2872 * bytenr), thus only do such check for reloc cache.
2874 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2875 btrfs_put_root(root);
2876 list_add(&cur->list, &cache->useless_node);
2883 level = cur->level + 1;
2885 /* Search the tree to find parent blocks referring to the block */
2886 path->search_commit_root = 1;
2887 path->skip_locking = 1;
2888 path->lowest_level = level;
2889 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2890 path->lowest_level = 0;
2892 btrfs_put_root(root);
2895 if (ret > 0 && path->slots[level] > 0)
2896 path->slots[level]--;
2898 eb = path->nodes[level];
2899 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2901 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2902 cur->bytenr, level - 1, root->root_key.objectid,
2903 tree_key->objectid, tree_key->type, tree_key->offset);
2904 btrfs_put_root(root);
2910 /* Add all nodes and edges in the path */
2911 for (; level < BTRFS_MAX_LEVEL; level++) {
2912 if (!path->nodes[level]) {
2913 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2915 /* Same as previous should_ignore_reloc_root() call */
2916 if (btrfs_should_ignore_reloc_root(root) &&
2918 btrfs_put_root(root);
2919 list_add(&lower->list, &cache->useless_node);
2926 edge = btrfs_backref_alloc_edge(cache);
2928 btrfs_put_root(root);
2933 eb = path->nodes[level];
2934 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2936 upper = btrfs_backref_alloc_node(cache, eb->start,
2939 btrfs_put_root(root);
2940 btrfs_backref_free_edge(cache, edge);
2944 upper->owner = btrfs_header_owner(eb);
2945 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2949 * If we know the block isn't shared we can avoid
2950 * checking its backrefs.
2952 if (btrfs_block_can_be_shared(root, eb))
2958 * Add the block to pending list if we need to check its
2959 * backrefs, we only do this once while walking up a
2960 * tree as we will catch anything else later on.
2962 if (!upper->checked && need_check) {
2964 list_add_tail(&edge->list[UPPER],
2965 &cache->pending_edge);
2969 INIT_LIST_HEAD(&edge->list[UPPER]);
2972 upper = rb_entry(rb_node, struct btrfs_backref_node,
2974 ASSERT(upper->checked);
2975 INIT_LIST_HEAD(&edge->list[UPPER]);
2977 upper->owner = btrfs_header_owner(eb);
2979 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2982 btrfs_put_root(root);
2989 btrfs_release_path(path);
2994 * Add backref node @cur into @cache.
2996 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2997 * links aren't yet bi-directional. Needs to finish such links.
2998 * Use btrfs_backref_finish_upper_links() to finish such linkage.
3000 * @path: Released path for indirect tree backref lookup
3001 * @iter: Released backref iter for extent tree search
3002 * @node_key: The first key of the tree block
3004 int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
3005 struct btrfs_path *path,
3006 struct btrfs_backref_iter *iter,
3007 struct btrfs_key *node_key,
3008 struct btrfs_backref_node *cur)
3010 struct btrfs_fs_info *fs_info = cache->fs_info;
3011 struct btrfs_backref_edge *edge;
3012 struct btrfs_backref_node *exist;
3015 ret = btrfs_backref_iter_start(iter, cur->bytenr);
3019 * We skip the first btrfs_tree_block_info, as we don't use the key
3020 * stored in it, but fetch it from the tree block
3022 if (btrfs_backref_has_tree_block_info(iter)) {
3023 ret = btrfs_backref_iter_next(iter);
3026 /* No extra backref? This means the tree block is corrupted */
3032 WARN_ON(cur->checked);
3033 if (!list_empty(&cur->upper)) {
3035 * The backref was added previously when processing backref of
3036 * type BTRFS_TREE_BLOCK_REF_KEY
3038 ASSERT(list_is_singular(&cur->upper));
3039 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
3041 ASSERT(list_empty(&edge->list[UPPER]));
3042 exist = edge->node[UPPER];
3044 * Add the upper level block to pending list if we need check
3047 if (!exist->checked)
3048 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3053 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
3054 struct extent_buffer *eb;
3055 struct btrfs_key key;
3059 eb = btrfs_backref_get_eb(iter);
3061 key.objectid = iter->bytenr;
3062 if (btrfs_backref_iter_is_inline_ref(iter)) {
3063 struct btrfs_extent_inline_ref *iref;
3065 /* Update key for inline backref */
3066 iref = (struct btrfs_extent_inline_ref *)
3067 ((unsigned long)iter->cur_ptr);
3068 type = btrfs_get_extent_inline_ref_type(eb, iref,
3069 BTRFS_REF_TYPE_BLOCK);
3070 if (type == BTRFS_REF_TYPE_INVALID) {
3075 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3077 key.type = iter->cur_key.type;
3078 key.offset = iter->cur_key.offset;
3082 * Parent node found and matches current inline ref, no need to
3083 * rebuild this node for this inline ref
3086 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
3087 exist->owner == key.offset) ||
3088 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
3089 exist->bytenr == key.offset))) {
3094 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3095 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3096 ret = handle_direct_tree_backref(cache, &key, cur);
3100 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
3102 btrfs_print_v0_err(fs_info);
3103 btrfs_handle_fs_error(fs_info, ret, NULL);
3105 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
3110 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
3111 * means the root objectid. We need to search the tree to get
3112 * its parent bytenr.
3114 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
3123 btrfs_backref_iter_release(iter);
3128 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3130 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3131 struct btrfs_backref_node *start)
3133 struct list_head *useless_node = &cache->useless_node;
3134 struct btrfs_backref_edge *edge;
3135 struct rb_node *rb_node;
3136 LIST_HEAD(pending_edge);
3138 ASSERT(start->checked);
3140 /* Insert this node to cache if it's not COW-only */
3141 if (!start->cowonly) {
3142 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3145 btrfs_backref_panic(cache->fs_info, start->bytenr,
3147 list_add_tail(&start->lower, &cache->leaves);
3151 * Use breadth first search to iterate all related edges.
3153 * The starting points are all the edges of this node
3155 list_for_each_entry(edge, &start->upper, list[LOWER])
3156 list_add_tail(&edge->list[UPPER], &pending_edge);
3158 while (!list_empty(&pending_edge)) {
3159 struct btrfs_backref_node *upper;
3160 struct btrfs_backref_node *lower;
3162 edge = list_first_entry(&pending_edge,
3163 struct btrfs_backref_edge, list[UPPER]);
3164 list_del_init(&edge->list[UPPER]);
3165 upper = edge->node[UPPER];
3166 lower = edge->node[LOWER];
3168 /* Parent is detached, no need to keep any edges */
3169 if (upper->detached) {
3170 list_del(&edge->list[LOWER]);
3171 btrfs_backref_free_edge(cache, edge);
3173 /* Lower node is orphan, queue for cleanup */
3174 if (list_empty(&lower->upper))
3175 list_add(&lower->list, useless_node);
3180 * All new nodes added in current build_backref_tree() haven't
3181 * been linked to the cache rb tree.
3182 * So if we have upper->rb_node populated, this means a cache
3183 * hit. We only need to link the edge, as @upper and all its
3184 * parents have already been linked.
3186 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3187 if (upper->lowest) {
3188 list_del_init(&upper->lower);
3192 list_add_tail(&edge->list[UPPER], &upper->lower);
3196 /* Sanity check, we shouldn't have any unchecked nodes */
3197 if (!upper->checked) {
3202 /* Sanity check, COW-only node has non-COW-only parent */
3203 if (start->cowonly != upper->cowonly) {
3208 /* Only cache non-COW-only (subvolume trees) tree blocks */
3209 if (!upper->cowonly) {
3210 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3213 btrfs_backref_panic(cache->fs_info,
3214 upper->bytenr, -EEXIST);
3219 list_add_tail(&edge->list[UPPER], &upper->lower);
3222 * Also queue all the parent edges of this uncached node
3223 * to finish the upper linkage
3225 list_for_each_entry(edge, &upper->upper, list[LOWER])
3226 list_add_tail(&edge->list[UPPER], &pending_edge);
3231 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3232 struct btrfs_backref_node *node)
3234 struct btrfs_backref_node *lower;
3235 struct btrfs_backref_node *upper;
3236 struct btrfs_backref_edge *edge;
3238 while (!list_empty(&cache->useless_node)) {
3239 lower = list_first_entry(&cache->useless_node,
3240 struct btrfs_backref_node, list);
3241 list_del_init(&lower->list);
3243 while (!list_empty(&cache->pending_edge)) {
3244 edge = list_first_entry(&cache->pending_edge,
3245 struct btrfs_backref_edge, list[UPPER]);
3246 list_del(&edge->list[UPPER]);
3247 list_del(&edge->list[LOWER]);
3248 lower = edge->node[LOWER];
3249 upper = edge->node[UPPER];
3250 btrfs_backref_free_edge(cache, edge);
3253 * Lower is no longer linked to any upper backref nodes and
3254 * isn't in the cache, we can free it ourselves.
3256 if (list_empty(&lower->upper) &&
3257 RB_EMPTY_NODE(&lower->rb_node))
3258 list_add(&lower->list, &cache->useless_node);
3260 if (!RB_EMPTY_NODE(&upper->rb_node))
3263 /* Add this guy's upper edges to the list to process */
3264 list_for_each_entry(edge, &upper->upper, list[LOWER])
3265 list_add_tail(&edge->list[UPPER],
3266 &cache->pending_edge);
3267 if (list_empty(&upper->upper))
3268 list_add(&upper->list, &cache->useless_node);
3271 while (!list_empty(&cache->useless_node)) {
3272 lower = list_first_entry(&cache->useless_node,
3273 struct btrfs_backref_node, list);
3274 list_del_init(&lower->list);
3277 btrfs_backref_drop_node(cache, lower);
3280 btrfs_backref_cleanup_node(cache, node);
3281 ASSERT(list_empty(&cache->useless_node) &&
3282 list_empty(&cache->pending_edge));