Merge tag 'rproc-v6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/remoteproc...
[linux-block.git] / fs / btrfs / backref.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
a542ad1b
JS
2/*
3 * Copyright (C) 2011 STRATO. All rights reserved.
a542ad1b
JS
4 */
5
f54de068 6#include <linux/mm.h>
afce772e 7#include <linux/rbtree.h>
00142756 8#include <trace/events/btrfs.h>
a542ad1b
JS
9#include "ctree.h"
10#include "disk-io.h"
11#include "backref.h"
8da6d581
JS
12#include "ulist.h"
13#include "transaction.h"
14#include "delayed-ref.h"
b916a59a 15#include "locking.h"
1b60d2ec 16#include "misc.h"
f3a84ccd 17#include "tree-mod-log.h"
c7f13d42 18#include "fs.h"
07e81dc9 19#include "accessors.h"
a0231804 20#include "extent-tree.h"
67707479 21#include "relocation.h"
27137fac 22#include "tree-checker.h"
a542ad1b 23
877c1476
FM
24/* Just arbitrary numbers so we can be sure one of these happened. */
25#define BACKREF_FOUND_SHARED 6
26#define BACKREF_FOUND_NOT_SHARED 7
dc046b10 27
976b1908
JS
28struct extent_inode_elem {
29 u64 inum;
30 u64 offset;
c7499a64 31 u64 num_bytes;
976b1908
JS
32 struct extent_inode_elem *next;
33};
34
88ffb665
FM
35static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
36 const struct btrfs_key *key,
73980bec
JM
37 const struct extent_buffer *eb,
38 const struct btrfs_file_extent_item *fi,
6ce6ba53 39 struct extent_inode_elem **eie)
976b1908 40{
c7499a64 41 const u64 data_len = btrfs_file_extent_num_bytes(eb, fi);
88ffb665 42 u64 offset = key->offset;
976b1908 43 struct extent_inode_elem *e;
88ffb665
FM
44 const u64 *root_ids;
45 int root_count;
46 bool cached;
976b1908 47
0cad8f14
FM
48 if (!ctx->ignore_extent_item_pos &&
49 !btrfs_file_extent_compression(eb, fi) &&
8ca15e05
JB
50 !btrfs_file_extent_encryption(eb, fi) &&
51 !btrfs_file_extent_other_encoding(eb, fi)) {
52 u64 data_offset;
976b1908 53
8ca15e05 54 data_offset = btrfs_file_extent_offset(eb, fi);
8ca15e05 55
88ffb665
FM
56 if (ctx->extent_item_pos < data_offset ||
57 ctx->extent_item_pos >= data_offset + data_len)
8ca15e05 58 return 1;
88ffb665 59 offset += ctx->extent_item_pos - data_offset;
8ca15e05 60 }
976b1908 61
88ffb665
FM
62 if (!ctx->indirect_ref_iterator || !ctx->cache_lookup)
63 goto add_inode_elem;
64
65 cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids,
66 &root_count);
67 if (!cached)
68 goto add_inode_elem;
69
70 for (int i = 0; i < root_count; i++) {
71 int ret;
72
73 ret = ctx->indirect_ref_iterator(key->objectid, offset,
74 data_len, root_ids[i],
75 ctx->user_ctx);
76 if (ret)
77 return ret;
78 }
79
80add_inode_elem:
976b1908
JS
81 e = kmalloc(sizeof(*e), GFP_NOFS);
82 if (!e)
83 return -ENOMEM;
84
85 e->next = *eie;
86 e->inum = key->objectid;
88ffb665 87 e->offset = offset;
c7499a64 88 e->num_bytes = data_len;
976b1908
JS
89 *eie = e;
90
91 return 0;
92}
93
f05c4746
WS
94static void free_inode_elem_list(struct extent_inode_elem *eie)
95{
96 struct extent_inode_elem *eie_next;
97
98 for (; eie; eie = eie_next) {
99 eie_next = eie->next;
100 kfree(eie);
101 }
102}
103
88ffb665
FM
104static int find_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
105 const struct extent_buffer *eb,
6ce6ba53 106 struct extent_inode_elem **eie)
976b1908
JS
107{
108 u64 disk_byte;
109 struct btrfs_key key;
110 struct btrfs_file_extent_item *fi;
111 int slot;
112 int nritems;
113 int extent_type;
114 int ret;
115
116 /*
117 * from the shared data ref, we only have the leaf but we need
118 * the key. thus, we must look into all items and see that we
119 * find one (some) with a reference to our extent item.
120 */
121 nritems = btrfs_header_nritems(eb);
122 for (slot = 0; slot < nritems; ++slot) {
123 btrfs_item_key_to_cpu(eb, &key, slot);
124 if (key.type != BTRFS_EXTENT_DATA_KEY)
125 continue;
126 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
127 extent_type = btrfs_file_extent_type(eb, fi);
128 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
129 continue;
130 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
131 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
88ffb665 132 if (disk_byte != ctx->bytenr)
976b1908
JS
133 continue;
134
88ffb665
FM
135 ret = check_extent_in_eb(ctx, &key, eb, fi, eie);
136 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
976b1908
JS
137 return ret;
138 }
139
140 return 0;
141}
142
86d5f994 143struct preftree {
ecf160b4 144 struct rb_root_cached root;
6c336b21 145 unsigned int count;
86d5f994
EN
146};
147
ecf160b4 148#define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
86d5f994
EN
149
150struct preftrees {
151 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
152 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
153 struct preftree indirect_missing_keys;
154};
155
3ec4d323
EN
156/*
157 * Checks for a shared extent during backref search.
158 *
159 * The share_count tracks prelim_refs (direct and indirect) having a
160 * ref->count >0:
161 * - incremented when a ref->count transitions to >0
162 * - decremented when a ref->count transitions to <1
163 */
164struct share_check {
877c1476
FM
165 struct btrfs_backref_share_check_ctx *ctx;
166 struct btrfs_root *root;
3ec4d323 167 u64 inum;
73e339e6 168 u64 data_bytenr;
6976201f 169 u64 data_extent_gen;
73e339e6
FM
170 /*
171 * Counts number of inodes that refer to an extent (different inodes in
172 * the same root or different roots) that we could find. The sharedness
173 * check typically stops once this counter gets greater than 1, so it
174 * may not reflect the total number of inodes.
175 */
3ec4d323 176 int share_count;
73e339e6
FM
177 /*
178 * The number of times we found our inode refers to the data extent we
179 * are determining the sharedness. In other words, how many file extent
180 * items we could find for our inode that point to our target data
181 * extent. The value we get here after finishing the extent sharedness
182 * check may be smaller than reality, but if it ends up being greater
183 * than 1, then we know for sure the inode has multiple file extent
184 * items that point to our inode, and we can safely assume it's useful
185 * to cache the sharedness check result.
186 */
187 int self_ref_count;
4fc7b572 188 bool have_delayed_delete_refs;
3ec4d323
EN
189};
190
191static inline int extent_is_shared(struct share_check *sc)
192{
193 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
194}
195
b9e9a6cb
WS
196static struct kmem_cache *btrfs_prelim_ref_cache;
197
198int __init btrfs_prelim_ref_init(void)
199{
200 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
e0c476b1 201 sizeof(struct prelim_ref),
b9e9a6cb 202 0,
fba4b697 203 SLAB_MEM_SPREAD,
b9e9a6cb
WS
204 NULL);
205 if (!btrfs_prelim_ref_cache)
206 return -ENOMEM;
207 return 0;
208}
209
e67c718b 210void __cold btrfs_prelim_ref_exit(void)
b9e9a6cb 211{
5598e900 212 kmem_cache_destroy(btrfs_prelim_ref_cache);
b9e9a6cb
WS
213}
214
86d5f994
EN
215static void free_pref(struct prelim_ref *ref)
216{
217 kmem_cache_free(btrfs_prelim_ref_cache, ref);
218}
219
220/*
221 * Return 0 when both refs are for the same block (and can be merged).
222 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
223 * indicates a 'higher' block.
224 */
225static int prelim_ref_compare(struct prelim_ref *ref1,
226 struct prelim_ref *ref2)
227{
228 if (ref1->level < ref2->level)
229 return -1;
230 if (ref1->level > ref2->level)
231 return 1;
232 if (ref1->root_id < ref2->root_id)
233 return -1;
234 if (ref1->root_id > ref2->root_id)
235 return 1;
236 if (ref1->key_for_search.type < ref2->key_for_search.type)
237 return -1;
238 if (ref1->key_for_search.type > ref2->key_for_search.type)
239 return 1;
240 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
241 return -1;
242 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
243 return 1;
244 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
245 return -1;
246 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
247 return 1;
248 if (ref1->parent < ref2->parent)
249 return -1;
250 if (ref1->parent > ref2->parent)
251 return 1;
252
253 return 0;
254}
255
ccc8dc75 256static void update_share_count(struct share_check *sc, int oldcount,
73e339e6 257 int newcount, struct prelim_ref *newref)
3ec4d323
EN
258{
259 if ((!sc) || (oldcount == 0 && newcount < 1))
260 return;
261
262 if (oldcount > 0 && newcount < 1)
263 sc->share_count--;
264 else if (oldcount < 1 && newcount > 0)
265 sc->share_count++;
73e339e6 266
877c1476 267 if (newref->root_id == sc->root->root_key.objectid &&
73e339e6
FM
268 newref->wanted_disk_byte == sc->data_bytenr &&
269 newref->key_for_search.objectid == sc->inum)
270 sc->self_ref_count += newref->count;
3ec4d323
EN
271}
272
86d5f994
EN
273/*
274 * Add @newref to the @root rbtree, merging identical refs.
275 *
3ec4d323 276 * Callers should assume that newref has been freed after calling.
86d5f994 277 */
00142756
JM
278static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
279 struct preftree *preftree,
3ec4d323
EN
280 struct prelim_ref *newref,
281 struct share_check *sc)
86d5f994 282{
ecf160b4 283 struct rb_root_cached *root;
86d5f994
EN
284 struct rb_node **p;
285 struct rb_node *parent = NULL;
286 struct prelim_ref *ref;
287 int result;
ecf160b4 288 bool leftmost = true;
86d5f994
EN
289
290 root = &preftree->root;
ecf160b4 291 p = &root->rb_root.rb_node;
86d5f994
EN
292
293 while (*p) {
294 parent = *p;
295 ref = rb_entry(parent, struct prelim_ref, rbnode);
296 result = prelim_ref_compare(ref, newref);
297 if (result < 0) {
298 p = &(*p)->rb_left;
299 } else if (result > 0) {
300 p = &(*p)->rb_right;
ecf160b4 301 leftmost = false;
86d5f994
EN
302 } else {
303 /* Identical refs, merge them and free @newref */
304 struct extent_inode_elem *eie = ref->inode_list;
305
306 while (eie && eie->next)
307 eie = eie->next;
308
309 if (!eie)
310 ref->inode_list = newref->inode_list;
311 else
312 eie->next = newref->inode_list;
00142756
JM
313 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
314 preftree->count);
3ec4d323
EN
315 /*
316 * A delayed ref can have newref->count < 0.
317 * The ref->count is updated to follow any
318 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
319 */
320 update_share_count(sc, ref->count,
73e339e6 321 ref->count + newref->count, newref);
86d5f994
EN
322 ref->count += newref->count;
323 free_pref(newref);
324 return;
325 }
326 }
327
73e339e6 328 update_share_count(sc, 0, newref->count, newref);
6c336b21 329 preftree->count++;
00142756 330 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
86d5f994 331 rb_link_node(&newref->rbnode, parent, p);
ecf160b4 332 rb_insert_color_cached(&newref->rbnode, root, leftmost);
86d5f994
EN
333}
334
335/*
336 * Release the entire tree. We don't care about internal consistency so
337 * just free everything and then reset the tree root.
338 */
339static void prelim_release(struct preftree *preftree)
340{
341 struct prelim_ref *ref, *next_ref;
342
ecf160b4 343 rbtree_postorder_for_each_entry_safe(ref, next_ref,
92876eec
FM
344 &preftree->root.rb_root, rbnode) {
345 free_inode_elem_list(ref->inode_list);
86d5f994 346 free_pref(ref);
92876eec 347 }
86d5f994 348
ecf160b4 349 preftree->root = RB_ROOT_CACHED;
6c336b21 350 preftree->count = 0;
86d5f994
EN
351}
352
d5c88b73
JS
353/*
354 * the rules for all callers of this function are:
355 * - obtaining the parent is the goal
356 * - if you add a key, you must know that it is a correct key
357 * - if you cannot add the parent or a correct key, then we will look into the
358 * block later to set a correct key
359 *
360 * delayed refs
361 * ============
362 * backref type | shared | indirect | shared | indirect
363 * information | tree | tree | data | data
364 * --------------------+--------+----------+--------+----------
365 * parent logical | y | - | - | -
366 * key to resolve | - | y | y | y
367 * tree block logical | - | - | - | -
368 * root for resolving | y | y | y | y
369 *
370 * - column 1: we've the parent -> done
371 * - column 2, 3, 4: we use the key to find the parent
372 *
373 * on disk refs (inline or keyed)
374 * ==============================
375 * backref type | shared | indirect | shared | indirect
376 * information | tree | tree | data | data
377 * --------------------+--------+----------+--------+----------
378 * parent logical | y | - | y | -
379 * key to resolve | - | - | - | y
380 * tree block logical | y | y | y | y
381 * root for resolving | - | y | y | y
382 *
383 * - column 1, 3: we've the parent -> done
384 * - column 2: we take the first key from the block to find the parent
e0c476b1 385 * (see add_missing_keys)
d5c88b73
JS
386 * - column 4: we use the key to find the parent
387 *
388 * additional information that's available but not required to find the parent
389 * block might help in merging entries to gain some speed.
390 */
00142756
JM
391static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
392 struct preftree *preftree, u64 root_id,
e0c476b1 393 const struct btrfs_key *key, int level, u64 parent,
3ec4d323
EN
394 u64 wanted_disk_byte, int count,
395 struct share_check *sc, gfp_t gfp_mask)
8da6d581 396{
e0c476b1 397 struct prelim_ref *ref;
8da6d581 398
48ec4736
LB
399 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
400 return 0;
401
b9e9a6cb 402 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
8da6d581
JS
403 if (!ref)
404 return -ENOMEM;
405
406 ref->root_id = root_id;
7ac8b88e 407 if (key)
d5c88b73 408 ref->key_for_search = *key;
7ac8b88e 409 else
d5c88b73 410 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
8da6d581 411
3301958b 412 ref->inode_list = NULL;
8da6d581
JS
413 ref->level = level;
414 ref->count = count;
415 ref->parent = parent;
416 ref->wanted_disk_byte = wanted_disk_byte;
3ec4d323
EN
417 prelim_ref_insert(fs_info, preftree, ref, sc);
418 return extent_is_shared(sc);
8da6d581
JS
419}
420
86d5f994 421/* direct refs use root == 0, key == NULL */
00142756
JM
422static int add_direct_ref(const struct btrfs_fs_info *fs_info,
423 struct preftrees *preftrees, int level, u64 parent,
3ec4d323
EN
424 u64 wanted_disk_byte, int count,
425 struct share_check *sc, gfp_t gfp_mask)
86d5f994 426{
00142756 427 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
3ec4d323 428 parent, wanted_disk_byte, count, sc, gfp_mask);
86d5f994
EN
429}
430
431/* indirect refs use parent == 0 */
00142756
JM
432static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
433 struct preftrees *preftrees, u64 root_id,
86d5f994 434 const struct btrfs_key *key, int level,
3ec4d323
EN
435 u64 wanted_disk_byte, int count,
436 struct share_check *sc, gfp_t gfp_mask)
86d5f994
EN
437{
438 struct preftree *tree = &preftrees->indirect;
439
440 if (!key)
441 tree = &preftrees->indirect_missing_keys;
00142756 442 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
3ec4d323 443 wanted_disk_byte, count, sc, gfp_mask);
86d5f994
EN
444}
445
ed58f2e6 446static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
447{
448 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
449 struct rb_node *parent = NULL;
450 struct prelim_ref *ref = NULL;
9c6c723f 451 struct prelim_ref target = {};
ed58f2e6 452 int result;
453
454 target.parent = bytenr;
455
456 while (*p) {
457 parent = *p;
458 ref = rb_entry(parent, struct prelim_ref, rbnode);
459 result = prelim_ref_compare(ref, &target);
460
461 if (result < 0)
462 p = &(*p)->rb_left;
463 else if (result > 0)
464 p = &(*p)->rb_right;
465 else
466 return 1;
467 }
468 return 0;
469}
470
a2c8d27e
FM
471static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
472 struct btrfs_root *root, struct btrfs_path *path,
ed58f2e6 473 struct ulist *parents,
474 struct preftrees *preftrees, struct prelim_ref *ref,
a2c8d27e 475 int level)
8da6d581 476{
69bca40d
AB
477 int ret = 0;
478 int slot;
479 struct extent_buffer *eb;
480 struct btrfs_key key;
7ef81ac8 481 struct btrfs_key *key_for_search = &ref->key_for_search;
8da6d581 482 struct btrfs_file_extent_item *fi;
ed8c4913 483 struct extent_inode_elem *eie = NULL, *old = NULL;
8da6d581 484 u64 disk_byte;
7ef81ac8
JB
485 u64 wanted_disk_byte = ref->wanted_disk_byte;
486 u64 count = 0;
7ac8b88e 487 u64 data_offset;
560840af 488 u8 type;
8da6d581 489
69bca40d
AB
490 if (level != 0) {
491 eb = path->nodes[level];
492 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
3301958b
JS
493 if (ret < 0)
494 return ret;
8da6d581 495 return 0;
69bca40d 496 }
8da6d581
JS
497
498 /*
ed58f2e6 499 * 1. We normally enter this function with the path already pointing to
500 * the first item to check. But sometimes, we may enter it with
501 * slot == nritems.
502 * 2. We are searching for normal backref but bytenr of this leaf
503 * matches shared data backref
cfc0eed0 504 * 3. The leaf owner is not equal to the root we are searching
505 *
ed58f2e6 506 * For these cases, go to the next leaf before we continue.
8da6d581 507 */
ed58f2e6 508 eb = path->nodes[0];
509 if (path->slots[0] >= btrfs_header_nritems(eb) ||
cfc0eed0 510 is_shared_data_backref(preftrees, eb->start) ||
511 ref->root_id != btrfs_header_owner(eb)) {
a2c8d27e 512 if (ctx->time_seq == BTRFS_SEQ_LAST)
21633fc6
QW
513 ret = btrfs_next_leaf(root, path);
514 else
a2c8d27e 515 ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
21633fc6 516 }
8da6d581 517
b25b0b87 518 while (!ret && count < ref->count) {
8da6d581 519 eb = path->nodes[0];
69bca40d
AB
520 slot = path->slots[0];
521
522 btrfs_item_key_to_cpu(eb, &key, slot);
523
524 if (key.objectid != key_for_search->objectid ||
525 key.type != BTRFS_EXTENT_DATA_KEY)
526 break;
527
ed58f2e6 528 /*
529 * We are searching for normal backref but bytenr of this leaf
cfc0eed0 530 * matches shared data backref, OR
531 * the leaf owner is not equal to the root we are searching for
ed58f2e6 532 */
cfc0eed0 533 if (slot == 0 &&
534 (is_shared_data_backref(preftrees, eb->start) ||
535 ref->root_id != btrfs_header_owner(eb))) {
a2c8d27e 536 if (ctx->time_seq == BTRFS_SEQ_LAST)
ed58f2e6 537 ret = btrfs_next_leaf(root, path);
538 else
a2c8d27e 539 ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
ed58f2e6 540 continue;
541 }
69bca40d 542 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
560840af
BB
543 type = btrfs_file_extent_type(eb, fi);
544 if (type == BTRFS_FILE_EXTENT_INLINE)
545 goto next;
69bca40d 546 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
7ac8b88e 547 data_offset = btrfs_file_extent_offset(eb, fi);
69bca40d
AB
548
549 if (disk_byte == wanted_disk_byte) {
550 eie = NULL;
ed8c4913 551 old = NULL;
7ac8b88e 552 if (ref->key_for_search.offset == key.offset - data_offset)
553 count++;
554 else
555 goto next;
0cad8f14 556 if (!ctx->skip_inode_ref_list) {
88ffb665
FM
557 ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
558 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
559 ret < 0)
69bca40d
AB
560 break;
561 }
ed8c4913
JB
562 if (ret > 0)
563 goto next;
4eb1f66d
TI
564 ret = ulist_add_merge_ptr(parents, eb->start,
565 eie, (void **)&old, GFP_NOFS);
ed8c4913
JB
566 if (ret < 0)
567 break;
0cad8f14 568 if (!ret && !ctx->skip_inode_ref_list) {
ed8c4913
JB
569 while (old->next)
570 old = old->next;
571 old->next = eie;
69bca40d 572 }
f05c4746 573 eie = NULL;
8da6d581 574 }
ed8c4913 575next:
a2c8d27e 576 if (ctx->time_seq == BTRFS_SEQ_LAST)
21633fc6
QW
577 ret = btrfs_next_item(root, path);
578 else
a2c8d27e 579 ret = btrfs_next_old_item(root, path, ctx->time_seq);
8da6d581
JS
580 }
581
88ffb665 582 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
f05c4746 583 free_inode_elem_list(eie);
88ffb665
FM
584 else if (ret > 0)
585 ret = 0;
586
69bca40d 587 return ret;
8da6d581
JS
588}
589
590/*
591 * resolve an indirect backref in the form (root_id, key, level)
592 * to a logical address
593 */
a2c8d27e
FM
594static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx,
595 struct btrfs_path *path,
ed58f2e6 596 struct preftrees *preftrees,
a2c8d27e 597 struct prelim_ref *ref, struct ulist *parents)
8da6d581 598{
8da6d581 599 struct btrfs_root *root;
8da6d581
JS
600 struct extent_buffer *eb;
601 int ret = 0;
602 int root_level;
603 int level = ref->level;
7ac8b88e 604 struct btrfs_key search_key = ref->key_for_search;
8da6d581 605
49d11bea
JB
606 /*
607 * If we're search_commit_root we could possibly be holding locks on
608 * other tree nodes. This happens when qgroups does backref walks when
609 * adding new delayed refs. To deal with this we need to look in cache
610 * for the root, and if we don't find it then we need to search the
611 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
612 * here.
613 */
614 if (path->search_commit_root)
a2c8d27e 615 root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id);
49d11bea 616 else
a2c8d27e 617 root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false);
8da6d581
JS
618 if (IS_ERR(root)) {
619 ret = PTR_ERR(root);
9326f76f
JB
620 goto out_free;
621 }
622
39dba873
JB
623 if (!path->search_commit_root &&
624 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
625 ret = -ENOENT;
626 goto out;
627 }
628
a2c8d27e 629 if (btrfs_is_testing(ctx->fs_info)) {
d9ee522b
JB
630 ret = -ENOENT;
631 goto out;
632 }
633
9e351cc8
JB
634 if (path->search_commit_root)
635 root_level = btrfs_header_level(root->commit_root);
a2c8d27e 636 else if (ctx->time_seq == BTRFS_SEQ_LAST)
21633fc6 637 root_level = btrfs_header_level(root->node);
9e351cc8 638 else
a2c8d27e 639 root_level = btrfs_old_root_level(root, ctx->time_seq);
8da6d581 640
c75e8394 641 if (root_level + 1 == level)
8da6d581
JS
642 goto out;
643
7ac8b88e 644 /*
645 * We can often find data backrefs with an offset that is too large
646 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
647 * subtracting a file's offset with the data offset of its
648 * corresponding extent data item. This can happen for example in the
649 * clone ioctl.
650 *
651 * So if we detect such case we set the search key's offset to zero to
652 * make sure we will find the matching file extent item at
653 * add_all_parents(), otherwise we will miss it because the offset
654 * taken form the backref is much larger then the offset of the file
655 * extent item. This can make us scan a very large number of file
656 * extent items, but at least it will not make us miss any.
657 *
658 * This is an ugly workaround for a behaviour that should have never
659 * existed, but it does and a fix for the clone ioctl would touch a lot
660 * of places, cause backwards incompatibility and would not fix the
661 * problem for extents cloned with older kernels.
662 */
663 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
664 search_key.offset >= LLONG_MAX)
665 search_key.offset = 0;
8da6d581 666 path->lowest_level = level;
a2c8d27e 667 if (ctx->time_seq == BTRFS_SEQ_LAST)
7ac8b88e 668 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
21633fc6 669 else
a2c8d27e 670 ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq);
538f72cd 671
a2c8d27e 672 btrfs_debug(ctx->fs_info,
ab8d0fc4 673 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
c1c9ff7c
GU
674 ref->root_id, level, ref->count, ret,
675 ref->key_for_search.objectid, ref->key_for_search.type,
676 ref->key_for_search.offset);
8da6d581
JS
677 if (ret < 0)
678 goto out;
679
680 eb = path->nodes[level];
9345457f 681 while (!eb) {
fae7f21c 682 if (WARN_ON(!level)) {
9345457f
JS
683 ret = 1;
684 goto out;
685 }
686 level--;
687 eb = path->nodes[level];
8da6d581
JS
688 }
689
a2c8d27e 690 ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level);
8da6d581 691out:
00246528 692 btrfs_put_root(root);
9326f76f 693out_free:
da61d31a
JB
694 path->lowest_level = 0;
695 btrfs_release_path(path);
8da6d581
JS
696 return ret;
697}
698
4dae077a
JM
699static struct extent_inode_elem *
700unode_aux_to_inode_list(struct ulist_node *node)
701{
702 if (!node)
703 return NULL;
704 return (struct extent_inode_elem *)(uintptr_t)node->aux;
705}
706
5614dc3a
FM
707static void free_leaf_list(struct ulist *ulist)
708{
709 struct ulist_node *node;
710 struct ulist_iterator uiter;
711
712 ULIST_ITER_INIT(&uiter);
713 while ((node = ulist_next(ulist, &uiter)))
714 free_inode_elem_list(unode_aux_to_inode_list(node));
715
716 ulist_free(ulist);
717}
718
8da6d581 719/*
52042d8e 720 * We maintain three separate rbtrees: one for direct refs, one for
86d5f994
EN
721 * indirect refs which have a key, and one for indirect refs which do not
722 * have a key. Each tree does merge on insertion.
723 *
724 * Once all of the references are located, we iterate over the tree of
725 * indirect refs with missing keys. An appropriate key is located and
726 * the ref is moved onto the tree for indirect refs. After all missing
727 * keys are thus located, we iterate over the indirect ref tree, resolve
728 * each reference, and then insert the resolved reference onto the
729 * direct tree (merging there too).
730 *
731 * New backrefs (i.e., for parent nodes) are added to the appropriate
732 * rbtree as they are encountered. The new backrefs are subsequently
733 * resolved as above.
8da6d581 734 */
a2c8d27e
FM
735static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
736 struct btrfs_path *path,
86d5f994 737 struct preftrees *preftrees,
6ce6ba53 738 struct share_check *sc)
8da6d581
JS
739{
740 int err;
741 int ret = 0;
8da6d581
JS
742 struct ulist *parents;
743 struct ulist_node *node;
cd1b413c 744 struct ulist_iterator uiter;
86d5f994 745 struct rb_node *rnode;
8da6d581
JS
746
747 parents = ulist_alloc(GFP_NOFS);
748 if (!parents)
749 return -ENOMEM;
750
751 /*
86d5f994
EN
752 * We could trade memory usage for performance here by iterating
753 * the tree, allocating new refs for each insertion, and then
754 * freeing the entire indirect tree when we're done. In some test
755 * cases, the tree can grow quite large (~200k objects).
8da6d581 756 */
ecf160b4 757 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
86d5f994
EN
758 struct prelim_ref *ref;
759
760 ref = rb_entry(rnode, struct prelim_ref, rbnode);
761 if (WARN(ref->parent,
762 "BUG: direct ref found in indirect tree")) {
763 ret = -EINVAL;
764 goto out;
765 }
766
ecf160b4 767 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
6c336b21 768 preftrees->indirect.count--;
86d5f994
EN
769
770 if (ref->count == 0) {
771 free_pref(ref);
8da6d581 772 continue;
86d5f994
EN
773 }
774
877c1476 775 if (sc && ref->root_id != sc->root->root_key.objectid) {
86d5f994 776 free_pref(ref);
dc046b10
JB
777 ret = BACKREF_FOUND_SHARED;
778 goto out;
779 }
a2c8d27e 780 err = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
95def2ed
WS
781 /*
782 * we can only tolerate ENOENT,otherwise,we should catch error
783 * and return directly.
784 */
785 if (err == -ENOENT) {
a2c8d27e 786 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref,
3ec4d323 787 NULL);
8da6d581 788 continue;
95def2ed 789 } else if (err) {
86d5f994 790 free_pref(ref);
95def2ed
WS
791 ret = err;
792 goto out;
793 }
8da6d581
JS
794
795 /* we put the first parent into the ref at hand */
cd1b413c
JS
796 ULIST_ITER_INIT(&uiter);
797 node = ulist_next(parents, &uiter);
8da6d581 798 ref->parent = node ? node->val : 0;
4dae077a 799 ref->inode_list = unode_aux_to_inode_list(node);
8da6d581 800
86d5f994 801 /* Add a prelim_ref(s) for any other parent(s). */
cd1b413c 802 while ((node = ulist_next(parents, &uiter))) {
86d5f994
EN
803 struct prelim_ref *new_ref;
804
b9e9a6cb
WS
805 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
806 GFP_NOFS);
8da6d581 807 if (!new_ref) {
86d5f994 808 free_pref(ref);
8da6d581 809 ret = -ENOMEM;
e36902d4 810 goto out;
8da6d581
JS
811 }
812 memcpy(new_ref, ref, sizeof(*ref));
813 new_ref->parent = node->val;
4dae077a 814 new_ref->inode_list = unode_aux_to_inode_list(node);
a2c8d27e 815 prelim_ref_insert(ctx->fs_info, &preftrees->direct,
3ec4d323 816 new_ref, NULL);
8da6d581 817 }
86d5f994 818
3ec4d323 819 /*
52042d8e 820 * Now it's a direct ref, put it in the direct tree. We must
3ec4d323
EN
821 * do this last because the ref could be merged/freed here.
822 */
a2c8d27e 823 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL);
86d5f994 824
8da6d581 825 ulist_reinit(parents);
9dd14fd6 826 cond_resched();
8da6d581 827 }
e36902d4 828out:
5614dc3a
FM
829 /*
830 * We may have inode lists attached to refs in the parents ulist, so we
831 * must free them before freeing the ulist and its refs.
832 */
833 free_leaf_list(parents);
8da6d581
JS
834 return ret;
835}
836
d5c88b73
JS
837/*
838 * read tree blocks and add keys where required.
839 */
e0c476b1 840static int add_missing_keys(struct btrfs_fs_info *fs_info,
38e3eebf 841 struct preftrees *preftrees, bool lock)
d5c88b73 842{
e0c476b1 843 struct prelim_ref *ref;
d5c88b73 844 struct extent_buffer *eb;
86d5f994
EN
845 struct preftree *tree = &preftrees->indirect_missing_keys;
846 struct rb_node *node;
d5c88b73 847
ecf160b4 848 while ((node = rb_first_cached(&tree->root))) {
789d6a3a
QW
849 struct btrfs_tree_parent_check check = { 0 };
850
86d5f994 851 ref = rb_entry(node, struct prelim_ref, rbnode);
ecf160b4 852 rb_erase_cached(node, &tree->root);
86d5f994
EN
853
854 BUG_ON(ref->parent); /* should not be a direct ref */
855 BUG_ON(ref->key_for_search.type);
d5c88b73 856 BUG_ON(!ref->wanted_disk_byte);
86d5f994 857
789d6a3a
QW
858 check.level = ref->level - 1;
859 check.owner_root = ref->root_id;
860
861 eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check);
64c043de 862 if (IS_ERR(eb)) {
86d5f994 863 free_pref(ref);
64c043de 864 return PTR_ERR(eb);
4eb150d6
QW
865 }
866 if (!extent_buffer_uptodate(eb)) {
86d5f994 867 free_pref(ref);
416bc658
JB
868 free_extent_buffer(eb);
869 return -EIO;
870 }
4eb150d6 871
38e3eebf
JB
872 if (lock)
873 btrfs_tree_read_lock(eb);
d5c88b73
JS
874 if (btrfs_header_level(eb) == 0)
875 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
876 else
877 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
38e3eebf
JB
878 if (lock)
879 btrfs_tree_read_unlock(eb);
d5c88b73 880 free_extent_buffer(eb);
3ec4d323 881 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
9dd14fd6 882 cond_resched();
d5c88b73
JS
883 }
884 return 0;
885}
886
8da6d581
JS
887/*
888 * add all currently queued delayed refs from this head whose seq nr is
889 * smaller or equal that seq to the list
890 */
00142756
JM
891static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
892 struct btrfs_delayed_ref_head *head, u64 seq,
b25b0b87 893 struct preftrees *preftrees, struct share_check *sc)
8da6d581 894{
c6fc2454 895 struct btrfs_delayed_ref_node *node;
d5c88b73 896 struct btrfs_key key;
0e0adbcf 897 struct rb_node *n;
01747e92 898 int count;
b1375d64 899 int ret = 0;
8da6d581 900
d7df2c79 901 spin_lock(&head->lock);
e3d03965 902 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
0e0adbcf
JB
903 node = rb_entry(n, struct btrfs_delayed_ref_node,
904 ref_node);
8da6d581
JS
905 if (node->seq > seq)
906 continue;
907
908 switch (node->action) {
909 case BTRFS_ADD_DELAYED_EXTENT:
910 case BTRFS_UPDATE_DELAYED_HEAD:
911 WARN_ON(1);
912 continue;
913 case BTRFS_ADD_DELAYED_REF:
01747e92 914 count = node->ref_mod;
8da6d581
JS
915 break;
916 case BTRFS_DROP_DELAYED_REF:
01747e92 917 count = node->ref_mod * -1;
8da6d581
JS
918 break;
919 default:
290342f6 920 BUG();
8da6d581
JS
921 }
922 switch (node->type) {
923 case BTRFS_TREE_BLOCK_REF_KEY: {
86d5f994 924 /* NORMAL INDIRECT METADATA backref */
8da6d581 925 struct btrfs_delayed_tree_ref *ref;
943553ef
FM
926 struct btrfs_key *key_ptr = NULL;
927
928 if (head->extent_op && head->extent_op->update_key) {
929 btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
930 key_ptr = &key;
931 }
8da6d581
JS
932
933 ref = btrfs_delayed_node_to_tree_ref(node);
00142756 934 ret = add_indirect_ref(fs_info, preftrees, ref->root,
943553ef 935 key_ptr, ref->level + 1,
01747e92
EN
936 node->bytenr, count, sc,
937 GFP_ATOMIC);
8da6d581
JS
938 break;
939 }
940 case BTRFS_SHARED_BLOCK_REF_KEY: {
86d5f994 941 /* SHARED DIRECT METADATA backref */
8da6d581
JS
942 struct btrfs_delayed_tree_ref *ref;
943
944 ref = btrfs_delayed_node_to_tree_ref(node);
86d5f994 945
01747e92
EN
946 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
947 ref->parent, node->bytenr, count,
3ec4d323 948 sc, GFP_ATOMIC);
8da6d581
JS
949 break;
950 }
951 case BTRFS_EXTENT_DATA_REF_KEY: {
86d5f994 952 /* NORMAL INDIRECT DATA backref */
8da6d581 953 struct btrfs_delayed_data_ref *ref;
8da6d581
JS
954 ref = btrfs_delayed_node_to_data_ref(node);
955
956 key.objectid = ref->objectid;
957 key.type = BTRFS_EXTENT_DATA_KEY;
958 key.offset = ref->offset;
dc046b10
JB
959
960 /*
4fc7b572
FM
961 * If we have a share check context and a reference for
962 * another inode, we can't exit immediately. This is
963 * because even if this is a BTRFS_ADD_DELAYED_REF
964 * reference we may find next a BTRFS_DROP_DELAYED_REF
965 * which cancels out this ADD reference.
966 *
967 * If this is a DROP reference and there was no previous
968 * ADD reference, then we need to signal that when we
969 * process references from the extent tree (through
970 * add_inline_refs() and add_keyed_refs()), we should
971 * not exit early if we find a reference for another
972 * inode, because one of the delayed DROP references
973 * may cancel that reference in the extent tree.
dc046b10 974 */
4fc7b572
FM
975 if (sc && count < 0)
976 sc->have_delayed_delete_refs = true;
dc046b10 977
00142756 978 ret = add_indirect_ref(fs_info, preftrees, ref->root,
01747e92
EN
979 &key, 0, node->bytenr, count, sc,
980 GFP_ATOMIC);
8da6d581
JS
981 break;
982 }
983 case BTRFS_SHARED_DATA_REF_KEY: {
86d5f994 984 /* SHARED DIRECT FULL backref */
8da6d581 985 struct btrfs_delayed_data_ref *ref;
8da6d581
JS
986
987 ref = btrfs_delayed_node_to_data_ref(node);
86d5f994 988
01747e92
EN
989 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
990 node->bytenr, count, sc,
991 GFP_ATOMIC);
8da6d581
JS
992 break;
993 }
994 default:
995 WARN_ON(1);
996 }
3ec4d323
EN
997 /*
998 * We must ignore BACKREF_FOUND_SHARED until all delayed
999 * refs have been checked.
1000 */
1001 if (ret && (ret != BACKREF_FOUND_SHARED))
d7df2c79 1002 break;
8da6d581 1003 }
3ec4d323
EN
1004 if (!ret)
1005 ret = extent_is_shared(sc);
4fc7b572 1006
d7df2c79
JB
1007 spin_unlock(&head->lock);
1008 return ret;
8da6d581
JS
1009}
1010
1011/*
1012 * add all inline backrefs for bytenr to the list
3ec4d323
EN
1013 *
1014 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
8da6d581 1015 */
f73853c7
FM
1016static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
1017 struct btrfs_path *path,
86d5f994 1018 int *info_level, struct preftrees *preftrees,
b25b0b87 1019 struct share_check *sc)
8da6d581 1020{
b1375d64 1021 int ret = 0;
8da6d581
JS
1022 int slot;
1023 struct extent_buffer *leaf;
1024 struct btrfs_key key;
261c84b6 1025 struct btrfs_key found_key;
8da6d581
JS
1026 unsigned long ptr;
1027 unsigned long end;
1028 struct btrfs_extent_item *ei;
1029 u64 flags;
1030 u64 item_size;
1031
1032 /*
1033 * enumerate all inline refs
1034 */
1035 leaf = path->nodes[0];
dadcaf78 1036 slot = path->slots[0];
8da6d581 1037
3212fa14 1038 item_size = btrfs_item_size(leaf, slot);
8da6d581
JS
1039 BUG_ON(item_size < sizeof(*ei));
1040
1041 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
f73853c7
FM
1042
1043 if (ctx->check_extent_item) {
1044 ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx);
1045 if (ret)
1046 return ret;
1047 }
1048
8da6d581 1049 flags = btrfs_extent_flags(leaf, ei);
261c84b6 1050 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8da6d581
JS
1051
1052 ptr = (unsigned long)(ei + 1);
1053 end = (unsigned long)ei + item_size;
1054
261c84b6
JB
1055 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1056 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
8da6d581 1057 struct btrfs_tree_block_info *info;
8da6d581
JS
1058
1059 info = (struct btrfs_tree_block_info *)ptr;
1060 *info_level = btrfs_tree_block_level(leaf, info);
8da6d581
JS
1061 ptr += sizeof(struct btrfs_tree_block_info);
1062 BUG_ON(ptr > end);
261c84b6
JB
1063 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1064 *info_level = found_key.offset;
8da6d581
JS
1065 } else {
1066 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1067 }
1068
1069 while (ptr < end) {
1070 struct btrfs_extent_inline_ref *iref;
1071 u64 offset;
1072 int type;
1073
1074 iref = (struct btrfs_extent_inline_ref *)ptr;
3de28d57
LB
1075 type = btrfs_get_extent_inline_ref_type(leaf, iref,
1076 BTRFS_REF_TYPE_ANY);
1077 if (type == BTRFS_REF_TYPE_INVALID)
af431dcb 1078 return -EUCLEAN;
3de28d57 1079
8da6d581
JS
1080 offset = btrfs_extent_inline_ref_offset(leaf, iref);
1081
1082 switch (type) {
1083 case BTRFS_SHARED_BLOCK_REF_KEY:
f73853c7 1084 ret = add_direct_ref(ctx->fs_info, preftrees,
00142756 1085 *info_level + 1, offset,
f73853c7 1086 ctx->bytenr, 1, NULL, GFP_NOFS);
8da6d581
JS
1087 break;
1088 case BTRFS_SHARED_DATA_REF_KEY: {
1089 struct btrfs_shared_data_ref *sdref;
1090 int count;
1091
1092 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1093 count = btrfs_shared_data_ref_count(leaf, sdref);
86d5f994 1094
f73853c7
FM
1095 ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset,
1096 ctx->bytenr, count, sc, GFP_NOFS);
8da6d581
JS
1097 break;
1098 }
1099 case BTRFS_TREE_BLOCK_REF_KEY:
f73853c7 1100 ret = add_indirect_ref(ctx->fs_info, preftrees, offset,
00142756 1101 NULL, *info_level + 1,
f73853c7 1102 ctx->bytenr, 1, NULL, GFP_NOFS);
8da6d581
JS
1103 break;
1104 case BTRFS_EXTENT_DATA_REF_KEY: {
1105 struct btrfs_extent_data_ref *dref;
1106 int count;
1107 u64 root;
1108
1109 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1110 count = btrfs_extent_data_ref_count(leaf, dref);
1111 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1112 dref);
1113 key.type = BTRFS_EXTENT_DATA_KEY;
1114 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
dc046b10 1115
a0a5472a 1116 if (sc && key.objectid != sc->inum &&
4fc7b572 1117 !sc->have_delayed_delete_refs) {
dc046b10
JB
1118 ret = BACKREF_FOUND_SHARED;
1119 break;
1120 }
1121
8da6d581 1122 root = btrfs_extent_data_ref_root(leaf, dref);
86d5f994 1123
adf02418
FM
1124 if (!ctx->skip_data_ref ||
1125 !ctx->skip_data_ref(root, key.objectid, key.offset,
1126 ctx->user_ctx))
1127 ret = add_indirect_ref(ctx->fs_info, preftrees,
1128 root, &key, 0, ctx->bytenr,
1129 count, sc, GFP_NOFS);
8da6d581
JS
1130 break;
1131 }
1132 default:
1133 WARN_ON(1);
1134 }
1149ab6b
WS
1135 if (ret)
1136 return ret;
8da6d581
JS
1137 ptr += btrfs_extent_inline_ref_size(type);
1138 }
1139
1140 return 0;
1141}
1142
1143/*
1144 * add all non-inline backrefs for bytenr to the list
3ec4d323
EN
1145 *
1146 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
8da6d581 1147 */
adf02418
FM
1148static int add_keyed_refs(struct btrfs_backref_walk_ctx *ctx,
1149 struct btrfs_root *extent_root,
1150 struct btrfs_path *path,
86d5f994 1151 int info_level, struct preftrees *preftrees,
3ec4d323 1152 struct share_check *sc)
8da6d581 1153{
98cc4222 1154 struct btrfs_fs_info *fs_info = extent_root->fs_info;
8da6d581
JS
1155 int ret;
1156 int slot;
1157 struct extent_buffer *leaf;
1158 struct btrfs_key key;
1159
1160 while (1) {
1161 ret = btrfs_next_item(extent_root, path);
1162 if (ret < 0)
1163 break;
1164 if (ret) {
1165 ret = 0;
1166 break;
1167 }
1168
1169 slot = path->slots[0];
1170 leaf = path->nodes[0];
1171 btrfs_item_key_to_cpu(leaf, &key, slot);
1172
adf02418 1173 if (key.objectid != ctx->bytenr)
8da6d581
JS
1174 break;
1175 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1176 continue;
1177 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1178 break;
1179
1180 switch (key.type) {
1181 case BTRFS_SHARED_BLOCK_REF_KEY:
86d5f994 1182 /* SHARED DIRECT METADATA backref */
00142756
JM
1183 ret = add_direct_ref(fs_info, preftrees,
1184 info_level + 1, key.offset,
adf02418 1185 ctx->bytenr, 1, NULL, GFP_NOFS);
8da6d581
JS
1186 break;
1187 case BTRFS_SHARED_DATA_REF_KEY: {
86d5f994 1188 /* SHARED DIRECT FULL backref */
8da6d581
JS
1189 struct btrfs_shared_data_ref *sdref;
1190 int count;
1191
1192 sdref = btrfs_item_ptr(leaf, slot,
1193 struct btrfs_shared_data_ref);
1194 count = btrfs_shared_data_ref_count(leaf, sdref);
00142756 1195 ret = add_direct_ref(fs_info, preftrees, 0,
adf02418 1196 key.offset, ctx->bytenr, count,
3ec4d323 1197 sc, GFP_NOFS);
8da6d581
JS
1198 break;
1199 }
1200 case BTRFS_TREE_BLOCK_REF_KEY:
86d5f994 1201 /* NORMAL INDIRECT METADATA backref */
00142756 1202 ret = add_indirect_ref(fs_info, preftrees, key.offset,
adf02418 1203 NULL, info_level + 1, ctx->bytenr,
3ec4d323 1204 1, NULL, GFP_NOFS);
8da6d581
JS
1205 break;
1206 case BTRFS_EXTENT_DATA_REF_KEY: {
86d5f994 1207 /* NORMAL INDIRECT DATA backref */
8da6d581
JS
1208 struct btrfs_extent_data_ref *dref;
1209 int count;
1210 u64 root;
1211
1212 dref = btrfs_item_ptr(leaf, slot,
1213 struct btrfs_extent_data_ref);
1214 count = btrfs_extent_data_ref_count(leaf, dref);
1215 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1216 dref);
1217 key.type = BTRFS_EXTENT_DATA_KEY;
1218 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
dc046b10 1219
a0a5472a 1220 if (sc && key.objectid != sc->inum &&
4fc7b572 1221 !sc->have_delayed_delete_refs) {
dc046b10
JB
1222 ret = BACKREF_FOUND_SHARED;
1223 break;
1224 }
1225
8da6d581 1226 root = btrfs_extent_data_ref_root(leaf, dref);
adf02418
FM
1227
1228 if (!ctx->skip_data_ref ||
1229 !ctx->skip_data_ref(root, key.objectid, key.offset,
1230 ctx->user_ctx))
1231 ret = add_indirect_ref(fs_info, preftrees, root,
1232 &key, 0, ctx->bytenr,
1233 count, sc, GFP_NOFS);
8da6d581
JS
1234 break;
1235 }
1236 default:
1237 WARN_ON(1);
1238 }
1149ab6b
WS
1239 if (ret)
1240 return ret;
1241
8da6d581
JS
1242 }
1243
1244 return ret;
1245}
1246
583f4ac5
FM
1247/*
1248 * The caller has joined a transaction or is holding a read lock on the
1249 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1250 * snapshot field changing while updating or checking the cache.
1251 */
1252static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1253 struct btrfs_root *root,
1254 u64 bytenr, int level, bool *is_shared)
1255{
4e4488d4 1256 const struct btrfs_fs_info *fs_info = root->fs_info;
583f4ac5
FM
1257 struct btrfs_backref_shared_cache_entry *entry;
1258
4e4488d4
FM
1259 if (!current->journal_info)
1260 lockdep_assert_held(&fs_info->commit_root_sem);
1261
583f4ac5
FM
1262 if (!ctx->use_path_cache)
1263 return false;
1264
1265 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1266 return false;
1267
1268 /*
1269 * Level -1 is used for the data extent, which is not reliable to cache
1270 * because its reference count can increase or decrease without us
1271 * realizing. We cache results only for extent buffers that lead from
1272 * the root node down to the leaf with the file extent item.
1273 */
1274 ASSERT(level >= 0);
1275
1276 entry = &ctx->path_cache_entries[level];
1277
1278 /* Unused cache entry or being used for some other extent buffer. */
1279 if (entry->bytenr != bytenr)
1280 return false;
1281
1282 /*
1283 * We cached a false result, but the last snapshot generation of the
1284 * root changed, so we now have a snapshot. Don't trust the result.
1285 */
1286 if (!entry->is_shared &&
1287 entry->gen != btrfs_root_last_snapshot(&root->root_item))
1288 return false;
1289
1290 /*
1291 * If we cached a true result and the last generation used for dropping
1292 * a root changed, we can not trust the result, because the dropped root
1293 * could be a snapshot sharing this extent buffer.
1294 */
1295 if (entry->is_shared &&
4e4488d4 1296 entry->gen != btrfs_get_last_root_drop_gen(fs_info))
583f4ac5
FM
1297 return false;
1298
1299 *is_shared = entry->is_shared;
1300 /*
1301 * If the node at this level is shared, than all nodes below are also
1302 * shared. Currently some of the nodes below may be marked as not shared
1303 * because we have just switched from one leaf to another, and switched
1304 * also other nodes above the leaf and below the current level, so mark
1305 * them as shared.
1306 */
1307 if (*is_shared) {
1308 for (int i = 0; i < level; i++) {
1309 ctx->path_cache_entries[i].is_shared = true;
1310 ctx->path_cache_entries[i].gen = entry->gen;
1311 }
1312 }
1313
1314 return true;
1315}
1316
1317/*
1318 * The caller has joined a transaction or is holding a read lock on the
1319 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1320 * snapshot field changing while updating or checking the cache.
1321 */
1322static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1323 struct btrfs_root *root,
1324 u64 bytenr, int level, bool is_shared)
1325{
4e4488d4 1326 const struct btrfs_fs_info *fs_info = root->fs_info;
583f4ac5
FM
1327 struct btrfs_backref_shared_cache_entry *entry;
1328 u64 gen;
1329
4e4488d4
FM
1330 if (!current->journal_info)
1331 lockdep_assert_held(&fs_info->commit_root_sem);
1332
583f4ac5
FM
1333 if (!ctx->use_path_cache)
1334 return;
1335
1336 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1337 return;
1338
1339 /*
1340 * Level -1 is used for the data extent, which is not reliable to cache
1341 * because its reference count can increase or decrease without us
1342 * realizing. We cache results only for extent buffers that lead from
1343 * the root node down to the leaf with the file extent item.
1344 */
1345 ASSERT(level >= 0);
1346
1347 if (is_shared)
4e4488d4 1348 gen = btrfs_get_last_root_drop_gen(fs_info);
583f4ac5
FM
1349 else
1350 gen = btrfs_root_last_snapshot(&root->root_item);
1351
1352 entry = &ctx->path_cache_entries[level];
1353 entry->bytenr = bytenr;
1354 entry->is_shared = is_shared;
1355 entry->gen = gen;
1356
1357 /*
1358 * If we found an extent buffer is shared, set the cache result for all
1359 * extent buffers below it to true. As nodes in the path are COWed,
1360 * their sharedness is moved to their children, and if a leaf is COWed,
1361 * then the sharedness of a data extent becomes direct, the refcount of
1362 * data extent is increased in the extent item at the extent tree.
1363 */
1364 if (is_shared) {
1365 for (int i = 0; i < level; i++) {
1366 entry = &ctx->path_cache_entries[i];
1367 entry->is_shared = is_shared;
1368 entry->gen = gen;
1369 }
1370 }
1371}
1372
8da6d581
JS
1373/*
1374 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1375 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1376 * indirect refs to their parent bytenr.
1377 * When roots are found, they're added to the roots list
1378 *
a2c8d27e
FM
1379 * @ctx: Backref walking context object, must be not NULL.
1380 * @sc: If !NULL, then immediately return BACKREF_FOUND_SHARED when a
1381 * shared extent is detected.
3ec4d323
EN
1382 *
1383 * Otherwise this returns 0 for success and <0 for an error.
1384 *
8da6d581
JS
1385 * FIXME some caching might speed things up
1386 */
a2c8d27e 1387static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
6ce6ba53 1388 struct share_check *sc)
8da6d581 1389{
a2c8d27e 1390 struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr);
8da6d581
JS
1391 struct btrfs_key key;
1392 struct btrfs_path *path;
8da6d581 1393 struct btrfs_delayed_ref_root *delayed_refs = NULL;
d3b01064 1394 struct btrfs_delayed_ref_head *head;
8da6d581
JS
1395 int info_level = 0;
1396 int ret;
e0c476b1 1397 struct prelim_ref *ref;
86d5f994 1398 struct rb_node *node;
f05c4746 1399 struct extent_inode_elem *eie = NULL;
86d5f994
EN
1400 struct preftrees preftrees = {
1401 .direct = PREFTREE_INIT,
1402 .indirect = PREFTREE_INIT,
1403 .indirect_missing_keys = PREFTREE_INIT
1404 };
8da6d581 1405
56f5c199
FM
1406 /* Roots ulist is not needed when using a sharedness check context. */
1407 if (sc)
a2c8d27e 1408 ASSERT(ctx->roots == NULL);
56f5c199 1409
a2c8d27e 1410 key.objectid = ctx->bytenr;
8da6d581 1411 key.offset = (u64)-1;
a2c8d27e 1412 if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA))
261c84b6
JB
1413 key.type = BTRFS_METADATA_ITEM_KEY;
1414 else
1415 key.type = BTRFS_EXTENT_ITEM_KEY;
8da6d581
JS
1416
1417 path = btrfs_alloc_path();
1418 if (!path)
1419 return -ENOMEM;
a2c8d27e 1420 if (!ctx->trans) {
da61d31a 1421 path->search_commit_root = 1;
e84752d4
WS
1422 path->skip_locking = 1;
1423 }
8da6d581 1424
a2c8d27e 1425 if (ctx->time_seq == BTRFS_SEQ_LAST)
21633fc6
QW
1426 path->skip_locking = 1;
1427
8da6d581 1428again:
d3b01064
LZ
1429 head = NULL;
1430
98cc4222 1431 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8da6d581
JS
1432 if (ret < 0)
1433 goto out;
fcba0120
JB
1434 if (ret == 0) {
1435 /* This shouldn't happen, indicates a bug or fs corruption. */
1436 ASSERT(ret != 0);
1437 ret = -EUCLEAN;
1438 goto out;
1439 }
8da6d581 1440
a2c8d27e
FM
1441 if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) &&
1442 ctx->time_seq != BTRFS_SEQ_LAST) {
7a3ae2f8 1443 /*
9665ebd5
JB
1444 * We have a specific time_seq we care about and trans which
1445 * means we have the path lock, we need to grab the ref head and
1446 * lock it so we have a consistent view of the refs at the given
1447 * time.
7a3ae2f8 1448 */
a2c8d27e 1449 delayed_refs = &ctx->trans->transaction->delayed_refs;
7a3ae2f8 1450 spin_lock(&delayed_refs->lock);
a2c8d27e 1451 head = btrfs_find_delayed_ref_head(delayed_refs, ctx->bytenr);
7a3ae2f8
JS
1452 if (head) {
1453 if (!mutex_trylock(&head->mutex)) {
d278850e 1454 refcount_inc(&head->refs);
7a3ae2f8
JS
1455 spin_unlock(&delayed_refs->lock);
1456
1457 btrfs_release_path(path);
1458
1459 /*
1460 * Mutex was contended, block until it's
1461 * released and try again
1462 */
1463 mutex_lock(&head->mutex);
1464 mutex_unlock(&head->mutex);
d278850e 1465 btrfs_put_delayed_ref_head(head);
7a3ae2f8
JS
1466 goto again;
1467 }
d7df2c79 1468 spin_unlock(&delayed_refs->lock);
a2c8d27e 1469 ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq,
b25b0b87 1470 &preftrees, sc);
155725c9 1471 mutex_unlock(&head->mutex);
d7df2c79 1472 if (ret)
7a3ae2f8 1473 goto out;
d7df2c79
JB
1474 } else {
1475 spin_unlock(&delayed_refs->lock);
d3b01064 1476 }
8da6d581 1477 }
8da6d581
JS
1478
1479 if (path->slots[0]) {
1480 struct extent_buffer *leaf;
1481 int slot;
1482
dadcaf78 1483 path->slots[0]--;
8da6d581 1484 leaf = path->nodes[0];
dadcaf78 1485 slot = path->slots[0];
8da6d581 1486 btrfs_item_key_to_cpu(leaf, &key, slot);
a2c8d27e 1487 if (key.objectid == ctx->bytenr &&
261c84b6
JB
1488 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1489 key.type == BTRFS_METADATA_ITEM_KEY)) {
f73853c7
FM
1490 ret = add_inline_refs(ctx, path, &info_level,
1491 &preftrees, sc);
8da6d581
JS
1492 if (ret)
1493 goto out;
adf02418 1494 ret = add_keyed_refs(ctx, root, path, info_level,
3ec4d323 1495 &preftrees, sc);
8da6d581
JS
1496 if (ret)
1497 goto out;
1498 }
1499 }
8da6d581 1500
56f5c199
FM
1501 /*
1502 * If we have a share context and we reached here, it means the extent
1503 * is not directly shared (no multiple reference items for it),
1504 * otherwise we would have exited earlier with a return value of
1505 * BACKREF_FOUND_SHARED after processing delayed references or while
1506 * processing inline or keyed references from the extent tree.
1507 * The extent may however be indirectly shared through shared subtrees
1508 * as a result from creating snapshots, so we determine below what is
1509 * its parent node, in case we are dealing with a metadata extent, or
1510 * what's the leaf (or leaves), from a fs tree, that has a file extent
1511 * item pointing to it in case we are dealing with a data extent.
1512 */
1513 ASSERT(extent_is_shared(sc) == 0);
1514
877c1476
FM
1515 /*
1516 * If we are here for a data extent and we have a share_check structure
1517 * it means the data extent is not directly shared (does not have
1518 * multiple reference items), so we have to check if a path in the fs
1519 * tree (going from the root node down to the leaf that has the file
1520 * extent item pointing to the data extent) is shared, that is, if any
1521 * of the extent buffers in the path is referenced by other trees.
1522 */
a2c8d27e 1523 if (sc && ctx->bytenr == sc->data_bytenr) {
6976201f
FM
1524 /*
1525 * If our data extent is from a generation more recent than the
1526 * last generation used to snapshot the root, then we know that
1527 * it can not be shared through subtrees, so we can skip
1528 * resolving indirect references, there's no point in
1529 * determining the extent buffers for the path from the fs tree
1530 * root node down to the leaf that has the file extent item that
1531 * points to the data extent.
1532 */
1533 if (sc->data_extent_gen >
1534 btrfs_root_last_snapshot(&sc->root->root_item)) {
1535 ret = BACKREF_FOUND_NOT_SHARED;
1536 goto out;
1537 }
1538
877c1476
FM
1539 /*
1540 * If we are only determining if a data extent is shared or not
1541 * and the corresponding file extent item is located in the same
1542 * leaf as the previous file extent item, we can skip resolving
1543 * indirect references for a data extent, since the fs tree path
1544 * is the same (same leaf, so same path). We skip as long as the
1545 * cached result for the leaf is valid and only if there's only
1546 * one file extent item pointing to the data extent, because in
1547 * the case of multiple file extent items, they may be located
1548 * in different leaves and therefore we have multiple paths.
1549 */
1550 if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr &&
1551 sc->self_ref_count == 1) {
1552 bool cached;
1553 bool is_shared;
1554
1555 cached = lookup_backref_shared_cache(sc->ctx, sc->root,
1556 sc->ctx->curr_leaf_bytenr,
1557 0, &is_shared);
1558 if (cached) {
1559 if (is_shared)
1560 ret = BACKREF_FOUND_SHARED;
1561 else
1562 ret = BACKREF_FOUND_NOT_SHARED;
1563 goto out;
1564 }
1565 }
1566 }
1567
86d5f994 1568 btrfs_release_path(path);
8da6d581 1569
a2c8d27e 1570 ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0);
d5c88b73
JS
1571 if (ret)
1572 goto out;
1573
ecf160b4 1574 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
8da6d581 1575
a2c8d27e 1576 ret = resolve_indirect_refs(ctx, path, &preftrees, sc);
8da6d581
JS
1577 if (ret)
1578 goto out;
1579
ecf160b4 1580 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
8da6d581 1581
86d5f994
EN
1582 /*
1583 * This walks the tree of merged and resolved refs. Tree blocks are
1584 * read in as needed. Unique entries are added to the ulist, and
1585 * the list of found roots is updated.
1586 *
1587 * We release the entire tree in one go before returning.
1588 */
ecf160b4 1589 node = rb_first_cached(&preftrees.direct.root);
86d5f994
EN
1590 while (node) {
1591 ref = rb_entry(node, struct prelim_ref, rbnode);
1592 node = rb_next(&ref->rbnode);
c8195a7b
ZB
1593 /*
1594 * ref->count < 0 can happen here if there are delayed
1595 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1596 * prelim_ref_insert() relies on this when merging
1597 * identical refs to keep the overall count correct.
1598 * prelim_ref_insert() will merge only those refs
1599 * which compare identically. Any refs having
1600 * e.g. different offsets would not be merged,
1601 * and would retain their original ref->count < 0.
1602 */
a2c8d27e 1603 if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) {
8da6d581 1604 /* no parent == root of tree */
a2c8d27e 1605 ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS);
f1723939
WS
1606 if (ret < 0)
1607 goto out;
8da6d581
JS
1608 }
1609 if (ref->count && ref->parent) {
0cad8f14 1610 if (!ctx->skip_inode_ref_list && !ref->inode_list &&
a2c8d27e 1611 ref->level == 0) {
789d6a3a 1612 struct btrfs_tree_parent_check check = { 0 };
976b1908 1613 struct extent_buffer *eb;
707e8a07 1614
789d6a3a
QW
1615 check.level = ref->level;
1616
1617 eb = read_tree_block(ctx->fs_info, ref->parent,
1618 &check);
64c043de
LB
1619 if (IS_ERR(eb)) {
1620 ret = PTR_ERR(eb);
1621 goto out;
4eb150d6
QW
1622 }
1623 if (!extent_buffer_uptodate(eb)) {
416bc658 1624 free_extent_buffer(eb);
c16c2e2e
WS
1625 ret = -EIO;
1626 goto out;
416bc658 1627 }
38e3eebf 1628
ac5887c8 1629 if (!path->skip_locking)
38e3eebf 1630 btrfs_tree_read_lock(eb);
88ffb665 1631 ret = find_extent_in_eb(ctx, eb, &eie);
38e3eebf 1632 if (!path->skip_locking)
ac5887c8 1633 btrfs_tree_read_unlock(eb);
976b1908 1634 free_extent_buffer(eb);
88ffb665
FM
1635 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1636 ret < 0)
f5929cd8
FDBM
1637 goto out;
1638 ref->inode_list = eie;
92876eec
FM
1639 /*
1640 * We transferred the list ownership to the ref,
1641 * so set to NULL to avoid a double free in case
1642 * an error happens after this.
1643 */
1644 eie = NULL;
976b1908 1645 }
a2c8d27e 1646 ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
4eb1f66d
TI
1647 ref->inode_list,
1648 (void **)&eie, GFP_NOFS);
f1723939
WS
1649 if (ret < 0)
1650 goto out;
0cad8f14 1651 if (!ret && !ctx->skip_inode_ref_list) {
3301958b 1652 /*
9f05c09d
JB
1653 * We've recorded that parent, so we must extend
1654 * its inode list here.
1655 *
1656 * However if there was corruption we may not
1657 * have found an eie, return an error in this
1658 * case.
3301958b 1659 */
9f05c09d
JB
1660 ASSERT(eie);
1661 if (!eie) {
1662 ret = -EUCLEAN;
1663 goto out;
1664 }
3301958b
JS
1665 while (eie->next)
1666 eie = eie->next;
1667 eie->next = ref->inode_list;
1668 }
f05c4746 1669 eie = NULL;
92876eec
FM
1670 /*
1671 * We have transferred the inode list ownership from
1672 * this ref to the ref we added to the 'refs' ulist.
1673 * So set this ref's inode list to NULL to avoid
1674 * use-after-free when our caller uses it or double
1675 * frees in case an error happens before we return.
1676 */
1677 ref->inode_list = NULL;
8da6d581 1678 }
9dd14fd6 1679 cond_resched();
8da6d581
JS
1680 }
1681
1682out:
8da6d581 1683 btrfs_free_path(path);
86d5f994
EN
1684
1685 prelim_release(&preftrees.direct);
1686 prelim_release(&preftrees.indirect);
1687 prelim_release(&preftrees.indirect_missing_keys);
1688
88ffb665 1689 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
f05c4746 1690 free_inode_elem_list(eie);
8da6d581
JS
1691 return ret;
1692}
1693
1694/*
a2c8d27e
FM
1695 * Finds all leaves with a reference to the specified combination of
1696 * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
1697 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1698 * function. The caller should free the ulist with free_leaf_list() if
1699 * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
1700 * enough.
8da6d581 1701 *
a2c8d27e 1702 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
8da6d581 1703 */
a2c8d27e 1704int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
8da6d581 1705{
8da6d581
JS
1706 int ret;
1707
a2c8d27e
FM
1708 ASSERT(ctx->refs == NULL);
1709
1710 ctx->refs = ulist_alloc(GFP_NOFS);
1711 if (!ctx->refs)
8da6d581 1712 return -ENOMEM;
8da6d581 1713
a2c8d27e 1714 ret = find_parent_nodes(ctx, NULL);
88ffb665
FM
1715 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1716 (ret < 0 && ret != -ENOENT)) {
a2c8d27e
FM
1717 free_leaf_list(ctx->refs);
1718 ctx->refs = NULL;
8da6d581
JS
1719 return ret;
1720 }
1721
1722 return 0;
1723}
1724
1725/*
a2c8d27e 1726 * Walk all backrefs for a given extent to find all roots that reference this
8da6d581
JS
1727 * extent. Walking a backref means finding all extents that reference this
1728 * extent and in turn walk the backrefs of those, too. Naturally this is a
1729 * recursive process, but here it is implemented in an iterative fashion: We
1730 * find all referencing extents for the extent in question and put them on a
1731 * list. In turn, we find all referencing extents for those, further appending
1732 * to the list. The way we iterate the list allows adding more elements after
1733 * the current while iterating. The process stops when we reach the end of the
a2c8d27e
FM
1734 * list.
1735 *
1baea6f1
FM
1736 * Found roots are added to @ctx->roots, which is allocated by this function if
1737 * it points to NULL, in which case the caller is responsible for freeing it
1738 * after it's not needed anymore.
1739 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1740 * ulist to do temporary work, and frees it before returning.
8da6d581 1741 *
1baea6f1 1742 * Returns 0 on success, < 0 on error.
8da6d581 1743 */
a2c8d27e 1744static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
8da6d581 1745{
a2c8d27e 1746 const u64 orig_bytenr = ctx->bytenr;
0cad8f14 1747 const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list;
1baea6f1 1748 bool roots_ulist_allocated = false;
cd1b413c 1749 struct ulist_iterator uiter;
a2c8d27e
FM
1750 int ret = 0;
1751
1752 ASSERT(ctx->refs == NULL);
8da6d581 1753
a2c8d27e
FM
1754 ctx->refs = ulist_alloc(GFP_NOFS);
1755 if (!ctx->refs)
8da6d581 1756 return -ENOMEM;
a2c8d27e 1757
a2c8d27e 1758 if (!ctx->roots) {
1baea6f1
FM
1759 ctx->roots = ulist_alloc(GFP_NOFS);
1760 if (!ctx->roots) {
1761 ulist_free(ctx->refs);
1762 ctx->refs = NULL;
1763 return -ENOMEM;
1764 }
1765 roots_ulist_allocated = true;
8da6d581
JS
1766 }
1767
0cad8f14 1768 ctx->skip_inode_ref_list = true;
a2c8d27e 1769
cd1b413c 1770 ULIST_ITER_INIT(&uiter);
8da6d581 1771 while (1) {
a2c8d27e
FM
1772 struct ulist_node *node;
1773
1774 ret = find_parent_nodes(ctx, NULL);
8da6d581 1775 if (ret < 0 && ret != -ENOENT) {
1baea6f1
FM
1776 if (roots_ulist_allocated) {
1777 ulist_free(ctx->roots);
1778 ctx->roots = NULL;
1779 }
a2c8d27e 1780 break;
8da6d581 1781 }
a2c8d27e
FM
1782 ret = 0;
1783 node = ulist_next(ctx->refs, &uiter);
8da6d581
JS
1784 if (!node)
1785 break;
a2c8d27e 1786 ctx->bytenr = node->val;
bca1a290 1787 cond_resched();
8da6d581
JS
1788 }
1789
a2c8d27e
FM
1790 ulist_free(ctx->refs);
1791 ctx->refs = NULL;
1792 ctx->bytenr = orig_bytenr;
0cad8f14 1793 ctx->skip_inode_ref_list = orig_skip_inode_ref_list;
a2c8d27e
FM
1794
1795 return ret;
8da6d581
JS
1796}
1797
a2c8d27e 1798int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx,
c7bcbb21 1799 bool skip_commit_root_sem)
9e351cc8
JB
1800{
1801 int ret;
1802
a2c8d27e
FM
1803 if (!ctx->trans && !skip_commit_root_sem)
1804 down_read(&ctx->fs_info->commit_root_sem);
1805 ret = btrfs_find_all_roots_safe(ctx);
1806 if (!ctx->trans && !skip_commit_root_sem)
1807 up_read(&ctx->fs_info->commit_root_sem);
9e351cc8
JB
1808 return ret;
1809}
1810
84a7949d
FM
1811struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void)
1812{
1813 struct btrfs_backref_share_check_ctx *ctx;
1814
1815 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1816 if (!ctx)
1817 return NULL;
1818
1819 ulist_init(&ctx->refs);
84a7949d
FM
1820
1821 return ctx;
1822}
1823
1824void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx)
1825{
1826 if (!ctx)
1827 return;
1828
1829 ulist_release(&ctx->refs);
84a7949d
FM
1830 kfree(ctx);
1831}
1832
8eedadda
FM
1833/*
1834 * Check if a data extent is shared or not.
6e353e3b 1835 *
ceb707da 1836 * @inode: The inode whose extent we are checking.
b8f164e3
FM
1837 * @bytenr: Logical bytenr of the extent we are checking.
1838 * @extent_gen: Generation of the extent (file extent item) or 0 if it is
1839 * not known.
61dbb952 1840 * @ctx: A backref sharedness check context.
2c2ed5aa 1841 *
8eedadda 1842 * btrfs_is_data_extent_shared uses the backref walking code but will short
2c2ed5aa
MF
1843 * circuit as soon as it finds a root or inode that doesn't match the
1844 * one passed in. This provides a significant performance benefit for
1845 * callers (such as fiemap) which want to know whether the extent is
1846 * shared but do not need a ref count.
1847 *
03628cdb
FM
1848 * This attempts to attach to the running transaction in order to account for
1849 * delayed refs, but continues on even when no running transaction exists.
bb739cf0 1850 *
2c2ed5aa
MF
1851 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1852 */
ceb707da 1853int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
b8f164e3 1854 u64 extent_gen,
61dbb952 1855 struct btrfs_backref_share_check_ctx *ctx)
dc046b10 1856{
a2c8d27e 1857 struct btrfs_backref_walk_ctx walk_ctx = { 0 };
ceb707da 1858 struct btrfs_root *root = inode->root;
bb739cf0
EN
1859 struct btrfs_fs_info *fs_info = root->fs_info;
1860 struct btrfs_trans_handle *trans;
dc046b10
JB
1861 struct ulist_iterator uiter;
1862 struct ulist_node *node;
f3a84ccd 1863 struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
dc046b10 1864 int ret = 0;
3ec4d323 1865 struct share_check shared = {
877c1476
FM
1866 .ctx = ctx,
1867 .root = root,
ceb707da 1868 .inum = btrfs_ino(inode),
73e339e6 1869 .data_bytenr = bytenr,
6976201f 1870 .data_extent_gen = extent_gen,
3ec4d323 1871 .share_count = 0,
73e339e6 1872 .self_ref_count = 0,
4fc7b572 1873 .have_delayed_delete_refs = false,
3ec4d323 1874 };
12a824dc 1875 int level;
e2fd8306
FM
1876 bool leaf_cached;
1877 bool leaf_is_shared;
dc046b10 1878
73e339e6
FM
1879 for (int i = 0; i < BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; i++) {
1880 if (ctx->prev_extents_cache[i].bytenr == bytenr)
1881 return ctx->prev_extents_cache[i].is_shared;
1882 }
1883
84a7949d 1884 ulist_init(&ctx->refs);
dc046b10 1885
a6d155d2 1886 trans = btrfs_join_transaction_nostart(root);
bb739cf0 1887 if (IS_ERR(trans)) {
03628cdb
FM
1888 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1889 ret = PTR_ERR(trans);
1890 goto out;
1891 }
bb739cf0 1892 trans = NULL;
dc046b10 1893 down_read(&fs_info->commit_root_sem);
bb739cf0
EN
1894 } else {
1895 btrfs_get_tree_mod_seq(fs_info, &elem);
a2c8d27e 1896 walk_ctx.time_seq = elem.seq;
bb739cf0
EN
1897 }
1898
e2fd8306
FM
1899 ctx->use_path_cache = true;
1900
1901 /*
1902 * We may have previously determined that the current leaf is shared.
1903 * If it is, then we have a data extent that is shared due to a shared
1904 * subtree (caused by snapshotting) and we don't need to check for data
1905 * backrefs. If the leaf is not shared, then we must do backref walking
1906 * to determine if the data extent is shared through reflinks.
1907 */
1908 leaf_cached = lookup_backref_shared_cache(ctx, root,
1909 ctx->curr_leaf_bytenr, 0,
1910 &leaf_is_shared);
1911 if (leaf_cached && leaf_is_shared) {
1912 ret = 1;
1913 goto out_trans;
1914 }
1915
0cad8f14 1916 walk_ctx.skip_inode_ref_list = true;
a2c8d27e
FM
1917 walk_ctx.trans = trans;
1918 walk_ctx.fs_info = fs_info;
1919 walk_ctx.refs = &ctx->refs;
1920
12a824dc
FM
1921 /* -1 means we are in the bytenr of the data extent. */
1922 level = -1;
dc046b10
JB
1923 ULIST_ITER_INIT(&uiter);
1924 while (1) {
2280d425 1925 const unsigned long prev_ref_count = ctx->refs.nnodes;
12a824dc 1926
a2c8d27e
FM
1927 walk_ctx.bytenr = bytenr;
1928 ret = find_parent_nodes(&walk_ctx, &shared);
877c1476
FM
1929 if (ret == BACKREF_FOUND_SHARED ||
1930 ret == BACKREF_FOUND_NOT_SHARED) {
1931 /* If shared must return 1, otherwise return 0. */
1932 ret = (ret == BACKREF_FOUND_SHARED) ? 1 : 0;
12a824dc 1933 if (level >= 0)
61dbb952 1934 store_backref_shared_cache(ctx, root, bytenr,
877c1476 1935 level, ret == 1);
dc046b10
JB
1936 break;
1937 }
1938 if (ret < 0 && ret != -ENOENT)
1939 break;
2c2ed5aa 1940 ret = 0;
b8f164e3 1941
63c84b46 1942 /*
2280d425
FM
1943 * More than one extent buffer (bytenr) may have been added to
1944 * the ctx->refs ulist, in which case we have to check multiple
1945 * tree paths in case the first one is not shared, so we can not
1946 * use the path cache which is made for a single path. Multiple
1947 * extent buffers at the current level happen when:
1948 *
1949 * 1) level -1, the data extent: If our data extent was not
1950 * directly shared (without multiple reference items), then
1951 * it might have a single reference item with a count > 1 for
1952 * the same offset, which means there are 2 (or more) file
1953 * extent items that point to the data extent - this happens
1954 * when a file extent item needs to be split and then one
1955 * item gets moved to another leaf due to a b+tree leaf split
1956 * when inserting some item. In this case the file extent
1957 * items may be located in different leaves and therefore
1958 * some of the leaves may be referenced through shared
1959 * subtrees while others are not. Since our extent buffer
1960 * cache only works for a single path (by far the most common
1961 * case and simpler to deal with), we can not use it if we
1962 * have multiple leaves (which implies multiple paths).
1963 *
1964 * 2) level >= 0, a tree node/leaf: We can have a mix of direct
1965 * and indirect references on a b+tree node/leaf, so we have
1966 * to check multiple paths, and the extent buffer (the
1967 * current bytenr) may be shared or not. One example is
1968 * during relocation as we may get a shared tree block ref
1969 * (direct ref) and a non-shared tree block ref (indirect
1970 * ref) for the same node/leaf.
63c84b46 1971 */
2280d425 1972 if ((ctx->refs.nnodes - prev_ref_count) > 1)
61dbb952 1973 ctx->use_path_cache = false;
63c84b46 1974
12a824dc 1975 if (level >= 0)
61dbb952 1976 store_backref_shared_cache(ctx, root, bytenr,
12a824dc 1977 level, false);
84a7949d 1978 node = ulist_next(&ctx->refs, &uiter);
dc046b10
JB
1979 if (!node)
1980 break;
1981 bytenr = node->val;
2280d425
FM
1982 if (ctx->use_path_cache) {
1983 bool is_shared;
1984 bool cached;
1985
1986 level++;
1987 cached = lookup_backref_shared_cache(ctx, root, bytenr,
1988 level, &is_shared);
1989 if (cached) {
1990 ret = (is_shared ? 1 : 0);
1991 break;
1992 }
12a824dc 1993 }
18bf591b 1994 shared.share_count = 0;
4fc7b572 1995 shared.have_delayed_delete_refs = false;
dc046b10
JB
1996 cond_resched();
1997 }
bb739cf0 1998
2280d425
FM
1999 /*
2000 * If the path cache is disabled, then it means at some tree level we
2001 * got multiple parents due to a mix of direct and indirect backrefs or
2002 * multiple leaves with file extent items pointing to the same data
2003 * extent. We have to invalidate the cache and cache only the sharedness
2004 * result for the levels where we got only one node/reference.
2005 */
2006 if (!ctx->use_path_cache) {
2007 int i = 0;
2008
2009 level--;
2010 if (ret >= 0 && level >= 0) {
2011 bytenr = ctx->path_cache_entries[level].bytenr;
2012 ctx->use_path_cache = true;
2013 store_backref_shared_cache(ctx, root, bytenr, level, ret);
2014 i = level + 1;
2015 }
2016
2017 for ( ; i < BTRFS_MAX_LEVEL; i++)
2018 ctx->path_cache_entries[i].bytenr = 0;
2019 }
2020
73e339e6
FM
2021 /*
2022 * Cache the sharedness result for the data extent if we know our inode
2023 * has more than 1 file extent item that refers to the data extent.
2024 */
2025 if (ret >= 0 && shared.self_ref_count > 1) {
2026 int slot = ctx->prev_extents_cache_slot;
2027
2028 ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr;
2029 ctx->prev_extents_cache[slot].is_shared = (ret == 1);
2030
2031 slot = (slot + 1) % BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE;
2032 ctx->prev_extents_cache_slot = slot;
2033 }
2034
e2fd8306 2035out_trans:
bb739cf0 2036 if (trans) {
dc046b10 2037 btrfs_put_tree_mod_seq(fs_info, &elem);
bb739cf0
EN
2038 btrfs_end_transaction(trans);
2039 } else {
dc046b10 2040 up_read(&fs_info->commit_root_sem);
bb739cf0 2041 }
03628cdb 2042out:
84a7949d 2043 ulist_release(&ctx->refs);
877c1476
FM
2044 ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr;
2045
dc046b10
JB
2046 return ret;
2047}
2048
f186373f
MF
2049int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
2050 u64 start_off, struct btrfs_path *path,
2051 struct btrfs_inode_extref **ret_extref,
2052 u64 *found_off)
2053{
2054 int ret, slot;
2055 struct btrfs_key key;
2056 struct btrfs_key found_key;
2057 struct btrfs_inode_extref *extref;
73980bec 2058 const struct extent_buffer *leaf;
f186373f
MF
2059 unsigned long ptr;
2060
2061 key.objectid = inode_objectid;
962a298f 2062 key.type = BTRFS_INODE_EXTREF_KEY;
f186373f
MF
2063 key.offset = start_off;
2064
2065 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2066 if (ret < 0)
2067 return ret;
2068
2069 while (1) {
2070 leaf = path->nodes[0];
2071 slot = path->slots[0];
2072 if (slot >= btrfs_header_nritems(leaf)) {
2073 /*
2074 * If the item at offset is not found,
2075 * btrfs_search_slot will point us to the slot
2076 * where it should be inserted. In our case
2077 * that will be the slot directly before the
2078 * next INODE_REF_KEY_V2 item. In the case
2079 * that we're pointing to the last slot in a
2080 * leaf, we must move one leaf over.
2081 */
2082 ret = btrfs_next_leaf(root, path);
2083 if (ret) {
2084 if (ret >= 1)
2085 ret = -ENOENT;
2086 break;
2087 }
2088 continue;
2089 }
2090
2091 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2092
2093 /*
2094 * Check that we're still looking at an extended ref key for
2095 * this particular objectid. If we have different
2096 * objectid or type then there are no more to be found
2097 * in the tree and we can exit.
2098 */
2099 ret = -ENOENT;
2100 if (found_key.objectid != inode_objectid)
2101 break;
962a298f 2102 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
f186373f
MF
2103 break;
2104
2105 ret = 0;
2106 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2107 extref = (struct btrfs_inode_extref *)ptr;
2108 *ret_extref = extref;
2109 if (found_off)
2110 *found_off = found_key.offset;
2111 break;
2112 }
2113
2114 return ret;
2115}
2116
48a3b636
ES
2117/*
2118 * this iterates to turn a name (from iref/extref) into a full filesystem path.
2119 * Elements of the path are separated by '/' and the path is guaranteed to be
2120 * 0-terminated. the path is only given within the current file system.
2121 * Therefore, it never starts with a '/'. the caller is responsible to provide
2122 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
2123 * the start point of the resulting string is returned. this pointer is within
2124 * dest, normally.
2125 * in case the path buffer would overflow, the pointer is decremented further
2126 * as if output was written to the buffer, though no more output is actually
2127 * generated. that way, the caller can determine how much space would be
2128 * required for the path to fit into the buffer. in that case, the returned
2129 * value will be smaller than dest. callers must check this!
2130 */
96b5bd77
JS
2131char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
2132 u32 name_len, unsigned long name_off,
2133 struct extent_buffer *eb_in, u64 parent,
2134 char *dest, u32 size)
a542ad1b 2135{
a542ad1b
JS
2136 int slot;
2137 u64 next_inum;
2138 int ret;
661bec6b 2139 s64 bytes_left = ((s64)size) - 1;
a542ad1b
JS
2140 struct extent_buffer *eb = eb_in;
2141 struct btrfs_key found_key;
d24bec3a 2142 struct btrfs_inode_ref *iref;
a542ad1b
JS
2143
2144 if (bytes_left >= 0)
2145 dest[bytes_left] = '\0';
2146
2147 while (1) {
d24bec3a 2148 bytes_left -= name_len;
a542ad1b
JS
2149 if (bytes_left >= 0)
2150 read_extent_buffer(eb, dest + bytes_left,
d24bec3a 2151 name_off, name_len);
b916a59a 2152 if (eb != eb_in) {
0c0fe3b0 2153 if (!path->skip_locking)
ac5887c8 2154 btrfs_tree_read_unlock(eb);
a542ad1b 2155 free_extent_buffer(eb);
b916a59a 2156 }
c234a24d
DS
2157 ret = btrfs_find_item(fs_root, path, parent, 0,
2158 BTRFS_INODE_REF_KEY, &found_key);
8f24b496
JS
2159 if (ret > 0)
2160 ret = -ENOENT;
a542ad1b
JS
2161 if (ret)
2162 break;
d24bec3a 2163
a542ad1b
JS
2164 next_inum = found_key.offset;
2165
2166 /* regular exit ahead */
2167 if (parent == next_inum)
2168 break;
2169
2170 slot = path->slots[0];
2171 eb = path->nodes[0];
2172 /* make sure we can use eb after releasing the path */
b916a59a 2173 if (eb != eb_in) {
0c0fe3b0
FM
2174 path->nodes[0] = NULL;
2175 path->locks[0] = 0;
b916a59a 2176 }
a542ad1b 2177 btrfs_release_path(path);
a542ad1b 2178 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
d24bec3a
MF
2179
2180 name_len = btrfs_inode_ref_name_len(eb, iref);
2181 name_off = (unsigned long)(iref + 1);
2182
a542ad1b
JS
2183 parent = next_inum;
2184 --bytes_left;
2185 if (bytes_left >= 0)
2186 dest[bytes_left] = '/';
2187 }
2188
2189 btrfs_release_path(path);
2190
2191 if (ret)
2192 return ERR_PTR(ret);
2193
2194 return dest + bytes_left;
2195}
2196
2197/*
2198 * this makes the path point to (logical EXTENT_ITEM *)
2199 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
2200 * tree blocks and <0 on error.
2201 */
2202int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
69917e43
LB
2203 struct btrfs_path *path, struct btrfs_key *found_key,
2204 u64 *flags_ret)
a542ad1b 2205{
29cbcf40 2206 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
a542ad1b
JS
2207 int ret;
2208 u64 flags;
261c84b6 2209 u64 size = 0;
a542ad1b 2210 u32 item_size;
73980bec 2211 const struct extent_buffer *eb;
a542ad1b
JS
2212 struct btrfs_extent_item *ei;
2213 struct btrfs_key key;
2214
261c84b6
JB
2215 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2216 key.type = BTRFS_METADATA_ITEM_KEY;
2217 else
2218 key.type = BTRFS_EXTENT_ITEM_KEY;
a542ad1b
JS
2219 key.objectid = logical;
2220 key.offset = (u64)-1;
2221
29cbcf40 2222 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
a542ad1b
JS
2223 if (ret < 0)
2224 return ret;
a542ad1b 2225
29cbcf40 2226 ret = btrfs_previous_extent_item(extent_root, path, 0);
850a8cdf
WS
2227 if (ret) {
2228 if (ret > 0)
2229 ret = -ENOENT;
2230 return ret;
580f0a67 2231 }
850a8cdf 2232 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
261c84b6 2233 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
da17066c 2234 size = fs_info->nodesize;
261c84b6
JB
2235 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
2236 size = found_key->offset;
2237
580f0a67 2238 if (found_key->objectid > logical ||
261c84b6 2239 found_key->objectid + size <= logical) {
ab8d0fc4
JM
2240 btrfs_debug(fs_info,
2241 "logical %llu is not within any extent", logical);
a542ad1b 2242 return -ENOENT;
4692cf58 2243 }
a542ad1b
JS
2244
2245 eb = path->nodes[0];
3212fa14 2246 item_size = btrfs_item_size(eb, path->slots[0]);
a542ad1b
JS
2247 BUG_ON(item_size < sizeof(*ei));
2248
2249 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
2250 flags = btrfs_extent_flags(eb, ei);
2251
ab8d0fc4
JM
2252 btrfs_debug(fs_info,
2253 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
c1c9ff7c
GU
2254 logical, logical - found_key->objectid, found_key->objectid,
2255 found_key->offset, flags, item_size);
69917e43
LB
2256
2257 WARN_ON(!flags_ret);
2258 if (flags_ret) {
2259 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2260 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
2261 else if (flags & BTRFS_EXTENT_FLAG_DATA)
2262 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
2263 else
290342f6 2264 BUG();
69917e43
LB
2265 return 0;
2266 }
a542ad1b
JS
2267
2268 return -EIO;
2269}
2270
2271/*
2272 * helper function to iterate extent inline refs. ptr must point to a 0 value
2273 * for the first call and may be modified. it is used to track state.
2274 * if more refs exist, 0 is returned and the next call to
e0c476b1 2275 * get_extent_inline_ref must pass the modified ptr parameter to get the
a542ad1b
JS
2276 * next ref. after the last ref was processed, 1 is returned.
2277 * returns <0 on error
2278 */
e0c476b1
JM
2279static int get_extent_inline_ref(unsigned long *ptr,
2280 const struct extent_buffer *eb,
2281 const struct btrfs_key *key,
2282 const struct btrfs_extent_item *ei,
2283 u32 item_size,
2284 struct btrfs_extent_inline_ref **out_eiref,
2285 int *out_type)
a542ad1b
JS
2286{
2287 unsigned long end;
2288 u64 flags;
2289 struct btrfs_tree_block_info *info;
2290
2291 if (!*ptr) {
2292 /* first call */
2293 flags = btrfs_extent_flags(eb, ei);
2294 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
6eda71d0
LB
2295 if (key->type == BTRFS_METADATA_ITEM_KEY) {
2296 /* a skinny metadata extent */
2297 *out_eiref =
2298 (struct btrfs_extent_inline_ref *)(ei + 1);
2299 } else {
2300 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
2301 info = (struct btrfs_tree_block_info *)(ei + 1);
2302 *out_eiref =
2303 (struct btrfs_extent_inline_ref *)(info + 1);
2304 }
a542ad1b
JS
2305 } else {
2306 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
2307 }
2308 *ptr = (unsigned long)*out_eiref;
cd857dd6 2309 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
a542ad1b
JS
2310 return -ENOENT;
2311 }
2312
2313 end = (unsigned long)ei + item_size;
6eda71d0 2314 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
3de28d57
LB
2315 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
2316 BTRFS_REF_TYPE_ANY);
2317 if (*out_type == BTRFS_REF_TYPE_INVALID)
af431dcb 2318 return -EUCLEAN;
a542ad1b
JS
2319
2320 *ptr += btrfs_extent_inline_ref_size(*out_type);
2321 WARN_ON(*ptr > end);
2322 if (*ptr == end)
2323 return 1; /* last */
2324
2325 return 0;
2326}
2327
2328/*
2329 * reads the tree block backref for an extent. tree level and root are returned
2330 * through out_level and out_root. ptr must point to a 0 value for the first
e0c476b1 2331 * call and may be modified (see get_extent_inline_ref comment).
a542ad1b
JS
2332 * returns 0 if data was provided, 1 if there was no more data to provide or
2333 * <0 on error.
2334 */
2335int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
6eda71d0
LB
2336 struct btrfs_key *key, struct btrfs_extent_item *ei,
2337 u32 item_size, u64 *out_root, u8 *out_level)
a542ad1b
JS
2338{
2339 int ret;
2340 int type;
a542ad1b
JS
2341 struct btrfs_extent_inline_ref *eiref;
2342
2343 if (*ptr == (unsigned long)-1)
2344 return 1;
2345
2346 while (1) {
e0c476b1 2347 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
6eda71d0 2348 &eiref, &type);
a542ad1b
JS
2349 if (ret < 0)
2350 return ret;
2351
2352 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
2353 type == BTRFS_SHARED_BLOCK_REF_KEY)
2354 break;
2355
2356 if (ret == 1)
2357 return 1;
2358 }
2359
2360 /* we can treat both ref types equally here */
a542ad1b 2361 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
a1317f45
FM
2362
2363 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
2364 struct btrfs_tree_block_info *info;
2365
2366 info = (struct btrfs_tree_block_info *)(ei + 1);
2367 *out_level = btrfs_tree_block_level(eb, info);
2368 } else {
2369 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
2370 *out_level = (u8)key->offset;
2371 }
a542ad1b
JS
2372
2373 if (ret == 1)
2374 *ptr = (unsigned long)-1;
2375
2376 return 0;
2377}
2378
ab8d0fc4
JM
2379static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
2380 struct extent_inode_elem *inode_list,
2381 u64 root, u64 extent_item_objectid,
2382 iterate_extent_inodes_t *iterate, void *ctx)
a542ad1b 2383{
976b1908 2384 struct extent_inode_elem *eie;
4692cf58 2385 int ret = 0;
4692cf58 2386
976b1908 2387 for (eie = inode_list; eie; eie = eie->next) {
ab8d0fc4
JM
2388 btrfs_debug(fs_info,
2389 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
2390 extent_item_objectid, eie->inum,
2391 eie->offset, root);
c7499a64 2392 ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx);
4692cf58 2393 if (ret) {
ab8d0fc4
JM
2394 btrfs_debug(fs_info,
2395 "stopping iteration for %llu due to ret=%d",
2396 extent_item_objectid, ret);
4692cf58
JS
2397 break;
2398 }
a542ad1b
JS
2399 }
2400
a542ad1b
JS
2401 return ret;
2402}
2403
2404/*
2405 * calls iterate() for every inode that references the extent identified by
4692cf58 2406 * the given parameters.
a542ad1b
JS
2407 * when the iterator function returns a non-zero value, iteration stops.
2408 */
a2c8d27e
FM
2409int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
2410 bool search_commit_root,
2411 iterate_extent_inodes_t *iterate, void *user_ctx)
a542ad1b 2412{
a542ad1b 2413 int ret;
a2c8d27e
FM
2414 struct ulist *refs;
2415 struct ulist_node *ref_node;
f3a84ccd 2416 struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
cd1b413c 2417 struct ulist_iterator ref_uiter;
a542ad1b 2418
a2c8d27e
FM
2419 btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu",
2420 ctx->bytenr);
2421
2422 ASSERT(ctx->trans == NULL);
1baea6f1
FM
2423 ASSERT(ctx->roots == NULL);
2424
da61d31a 2425 if (!search_commit_root) {
a2c8d27e
FM
2426 struct btrfs_trans_handle *trans;
2427
2428 trans = btrfs_attach_transaction(ctx->fs_info->tree_root);
bfc61c36
FM
2429 if (IS_ERR(trans)) {
2430 if (PTR_ERR(trans) != -ENOENT &&
66d04209 2431 PTR_ERR(trans) != -EROFS)
bfc61c36
FM
2432 return PTR_ERR(trans);
2433 trans = NULL;
2434 }
a2c8d27e 2435 ctx->trans = trans;
bfc61c36
FM
2436 }
2437
a2c8d27e
FM
2438 if (ctx->trans) {
2439 btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem);
2440 ctx->time_seq = seq_elem.seq;
2441 } else {
2442 down_read(&ctx->fs_info->commit_root_sem);
2443 }
a542ad1b 2444
a2c8d27e 2445 ret = btrfs_find_all_leafs(ctx);
4692cf58
JS
2446 if (ret)
2447 goto out;
a2c8d27e
FM
2448 refs = ctx->refs;
2449 ctx->refs = NULL;
a542ad1b 2450
cd1b413c
JS
2451 ULIST_ITER_INIT(&ref_uiter);
2452 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
66d04209 2453 const u64 leaf_bytenr = ref_node->val;
a2c8d27e
FM
2454 struct ulist_node *root_node;
2455 struct ulist_iterator root_uiter;
66d04209
FM
2456 struct extent_inode_elem *inode_list;
2457
2458 inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux;
2459
2460 if (ctx->cache_lookup) {
2461 const u64 *root_ids;
2462 int root_count;
2463 bool cached;
2464
2465 cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx,
2466 &root_ids, &root_count);
2467 if (cached) {
2468 for (int i = 0; i < root_count; i++) {
2469 ret = iterate_leaf_refs(ctx->fs_info,
2470 inode_list,
2471 root_ids[i],
2472 leaf_bytenr,
2473 iterate,
2474 user_ctx);
2475 if (ret)
2476 break;
2477 }
2478 continue;
2479 }
2480 }
2481
2482 if (!ctx->roots) {
2483 ctx->roots = ulist_alloc(GFP_NOFS);
2484 if (!ctx->roots) {
2485 ret = -ENOMEM;
2486 break;
2487 }
2488 }
a2c8d27e 2489
66d04209 2490 ctx->bytenr = leaf_bytenr;
a2c8d27e 2491 ret = btrfs_find_all_roots_safe(ctx);
4692cf58
JS
2492 if (ret)
2493 break;
a2c8d27e 2494
66d04209
FM
2495 if (ctx->cache_store)
2496 ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx);
2497
cd1b413c 2498 ULIST_ITER_INIT(&root_uiter);
a2c8d27e
FM
2499 while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) {
2500 btrfs_debug(ctx->fs_info,
ab8d0fc4
JM
2501 "root %llu references leaf %llu, data list %#llx",
2502 root_node->val, ref_node->val,
2503 ref_node->aux);
66d04209 2504 ret = iterate_leaf_refs(ctx->fs_info, inode_list,
a2c8d27e
FM
2505 root_node->val, ctx->bytenr,
2506 iterate, user_ctx);
4692cf58 2507 }
1baea6f1 2508 ulist_reinit(ctx->roots);
a542ad1b
JS
2509 }
2510
976b1908 2511 free_leaf_list(refs);
4692cf58 2512out:
a2c8d27e
FM
2513 if (ctx->trans) {
2514 btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem);
2515 btrfs_end_transaction(ctx->trans);
2516 ctx->trans = NULL;
9e351cc8 2517 } else {
a2c8d27e 2518 up_read(&ctx->fs_info->commit_root_sem);
7a3ae2f8
JS
2519 }
2520
1baea6f1
FM
2521 ulist_free(ctx->roots);
2522 ctx->roots = NULL;
2523
88ffb665
FM
2524 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP)
2525 ret = 0;
2526
a542ad1b
JS
2527 return ret;
2528}
2529
c7499a64 2530static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx)
e3059ec0
DS
2531{
2532 struct btrfs_data_container *inodes = ctx;
2533 const size_t c = 3 * sizeof(u64);
2534
2535 if (inodes->bytes_left >= c) {
2536 inodes->bytes_left -= c;
2537 inodes->val[inodes->elem_cnt] = inum;
2538 inodes->val[inodes->elem_cnt + 1] = offset;
2539 inodes->val[inodes->elem_cnt + 2] = root;
2540 inodes->elem_cnt += 3;
2541 } else {
2542 inodes->bytes_missing += c - inodes->bytes_left;
2543 inodes->bytes_left = 0;
2544 inodes->elem_missed += 3;
2545 }
2546
2547 return 0;
2548}
2549
a542ad1b
JS
2550int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2551 struct btrfs_path *path,
e3059ec0 2552 void *ctx, bool ignore_offset)
a542ad1b 2553{
a2c8d27e 2554 struct btrfs_backref_walk_ctx walk_ctx = { 0 };
a542ad1b 2555 int ret;
69917e43 2556 u64 flags = 0;
a542ad1b 2557 struct btrfs_key found_key;
7a3ae2f8 2558 int search_commit_root = path->search_commit_root;
a542ad1b 2559
69917e43 2560 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
4692cf58 2561 btrfs_release_path(path);
a542ad1b
JS
2562 if (ret < 0)
2563 return ret;
69917e43 2564 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
3627bf45 2565 return -EINVAL;
a542ad1b 2566
a2c8d27e 2567 walk_ctx.bytenr = found_key.objectid;
6ce6ba53 2568 if (ignore_offset)
a2c8d27e 2569 walk_ctx.ignore_extent_item_pos = true;
6ce6ba53 2570 else
a2c8d27e
FM
2571 walk_ctx.extent_item_pos = logical - found_key.objectid;
2572 walk_ctx.fs_info = fs_info;
6ce6ba53 2573
a2c8d27e
FM
2574 return iterate_extent_inodes(&walk_ctx, search_commit_root,
2575 build_ino_list, ctx);
a542ad1b
JS
2576}
2577
ad6240f6 2578static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
875d1daa 2579 struct extent_buffer *eb, struct inode_fs_paths *ipath);
d24bec3a 2580
875d1daa 2581static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
a542ad1b 2582{
aefc1eb1 2583 int ret = 0;
a542ad1b
JS
2584 int slot;
2585 u32 cur;
2586 u32 len;
2587 u32 name_len;
2588 u64 parent = 0;
2589 int found = 0;
875d1daa
DS
2590 struct btrfs_root *fs_root = ipath->fs_root;
2591 struct btrfs_path *path = ipath->btrfs_path;
a542ad1b 2592 struct extent_buffer *eb;
a542ad1b
JS
2593 struct btrfs_inode_ref *iref;
2594 struct btrfs_key found_key;
2595
aefc1eb1 2596 while (!ret) {
c234a24d
DS
2597 ret = btrfs_find_item(fs_root, path, inum,
2598 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2599 &found_key);
2600
a542ad1b
JS
2601 if (ret < 0)
2602 break;
2603 if (ret) {
2604 ret = found ? 0 : -ENOENT;
2605 break;
2606 }
2607 ++found;
2608
2609 parent = found_key.offset;
2610 slot = path->slots[0];
3fe81ce2
FDBM
2611 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2612 if (!eb) {
2613 ret = -ENOMEM;
2614 break;
2615 }
a542ad1b
JS
2616 btrfs_release_path(path);
2617
a542ad1b
JS
2618 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2619
3212fa14 2620 for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
a542ad1b
JS
2621 name_len = btrfs_inode_ref_name_len(eb, iref);
2622 /* path must be released before calling iterate()! */
ab8d0fc4
JM
2623 btrfs_debug(fs_root->fs_info,
2624 "following ref at offset %u for inode %llu in tree %llu",
4fd786e6
MT
2625 cur, found_key.objectid,
2626 fs_root->root_key.objectid);
ad6240f6 2627 ret = inode_to_path(parent, name_len,
875d1daa 2628 (unsigned long)(iref + 1), eb, ipath);
aefc1eb1 2629 if (ret)
a542ad1b 2630 break;
a542ad1b
JS
2631 len = sizeof(*iref) + name_len;
2632 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2633 }
2634 free_extent_buffer(eb);
2635 }
2636
2637 btrfs_release_path(path);
2638
2639 return ret;
2640}
2641
875d1daa 2642static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
d24bec3a
MF
2643{
2644 int ret;
2645 int slot;
2646 u64 offset = 0;
2647 u64 parent;
2648 int found = 0;
875d1daa
DS
2649 struct btrfs_root *fs_root = ipath->fs_root;
2650 struct btrfs_path *path = ipath->btrfs_path;
d24bec3a
MF
2651 struct extent_buffer *eb;
2652 struct btrfs_inode_extref *extref;
d24bec3a
MF
2653 u32 item_size;
2654 u32 cur_offset;
2655 unsigned long ptr;
2656
2657 while (1) {
2658 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2659 &offset);
2660 if (ret < 0)
2661 break;
2662 if (ret) {
2663 ret = found ? 0 : -ENOENT;
2664 break;
2665 }
2666 ++found;
2667
2668 slot = path->slots[0];
3fe81ce2
FDBM
2669 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2670 if (!eb) {
2671 ret = -ENOMEM;
2672 break;
2673 }
d24bec3a
MF
2674 btrfs_release_path(path);
2675
3212fa14 2676 item_size = btrfs_item_size(eb, slot);
2849a854 2677 ptr = btrfs_item_ptr_offset(eb, slot);
d24bec3a
MF
2678 cur_offset = 0;
2679
2680 while (cur_offset < item_size) {
2681 u32 name_len;
2682
2683 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2684 parent = btrfs_inode_extref_parent(eb, extref);
2685 name_len = btrfs_inode_extref_name_len(eb, extref);
ad6240f6 2686 ret = inode_to_path(parent, name_len,
875d1daa 2687 (unsigned long)&extref->name, eb, ipath);
d24bec3a
MF
2688 if (ret)
2689 break;
2690
2849a854 2691 cur_offset += btrfs_inode_extref_name_len(eb, extref);
d24bec3a
MF
2692 cur_offset += sizeof(*extref);
2693 }
d24bec3a
MF
2694 free_extent_buffer(eb);
2695
2696 offset++;
2697 }
2698
2699 btrfs_release_path(path);
2700
2701 return ret;
2702}
2703
a542ad1b
JS
2704/*
2705 * returns 0 if the path could be dumped (probably truncated)
2706 * returns <0 in case of an error
2707 */
d24bec3a 2708static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
875d1daa 2709 struct extent_buffer *eb, struct inode_fs_paths *ipath)
a542ad1b 2710{
a542ad1b
JS
2711 char *fspath;
2712 char *fspath_min;
2713 int i = ipath->fspath->elem_cnt;
2714 const int s_ptr = sizeof(char *);
2715 u32 bytes_left;
2716
2717 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2718 ipath->fspath->bytes_left - s_ptr : 0;
2719
740c3d22 2720 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
96b5bd77
JS
2721 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2722 name_off, eb, inum, fspath_min, bytes_left);
a542ad1b
JS
2723 if (IS_ERR(fspath))
2724 return PTR_ERR(fspath);
2725
2726 if (fspath > fspath_min) {
745c4d8e 2727 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
a542ad1b
JS
2728 ++ipath->fspath->elem_cnt;
2729 ipath->fspath->bytes_left = fspath - fspath_min;
2730 } else {
2731 ++ipath->fspath->elem_missed;
2732 ipath->fspath->bytes_missing += fspath_min - fspath;
2733 ipath->fspath->bytes_left = 0;
2734 }
2735
2736 return 0;
2737}
2738
2739/*
2740 * this dumps all file system paths to the inode into the ipath struct, provided
2741 * is has been created large enough. each path is zero-terminated and accessed
740c3d22 2742 * from ipath->fspath->val[i].
a542ad1b 2743 * when it returns, there are ipath->fspath->elem_cnt number of paths available
740c3d22 2744 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
01327610 2745 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
a542ad1b
JS
2746 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2747 * have been needed to return all paths.
2748 */
2749int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2750{
ad6240f6
DS
2751 int ret;
2752 int found_refs = 0;
2753
875d1daa 2754 ret = iterate_inode_refs(inum, ipath);
ad6240f6
DS
2755 if (!ret)
2756 ++found_refs;
2757 else if (ret != -ENOENT)
2758 return ret;
2759
875d1daa 2760 ret = iterate_inode_extrefs(inum, ipath);
ad6240f6
DS
2761 if (ret == -ENOENT && found_refs)
2762 return 0;
2763
2764 return ret;
a542ad1b
JS
2765}
2766
a542ad1b
JS
2767struct btrfs_data_container *init_data_container(u32 total_bytes)
2768{
2769 struct btrfs_data_container *data;
2770 size_t alloc_bytes;
2771
2772 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
f54de068 2773 data = kvmalloc(alloc_bytes, GFP_KERNEL);
a542ad1b
JS
2774 if (!data)
2775 return ERR_PTR(-ENOMEM);
2776
2777 if (total_bytes >= sizeof(*data)) {
2778 data->bytes_left = total_bytes - sizeof(*data);
2779 data->bytes_missing = 0;
2780 } else {
2781 data->bytes_missing = sizeof(*data) - total_bytes;
2782 data->bytes_left = 0;
2783 }
2784
2785 data->elem_cnt = 0;
2786 data->elem_missed = 0;
2787
2788 return data;
2789}
2790
2791/*
2792 * allocates space to return multiple file system paths for an inode.
2793 * total_bytes to allocate are passed, note that space usable for actual path
2794 * information will be total_bytes - sizeof(struct inode_fs_paths).
2795 * the returned pointer must be freed with free_ipath() in the end.
2796 */
2797struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2798 struct btrfs_path *path)
2799{
2800 struct inode_fs_paths *ifp;
2801 struct btrfs_data_container *fspath;
2802
2803 fspath = init_data_container(total_bytes);
2804 if (IS_ERR(fspath))
afc6961f 2805 return ERR_CAST(fspath);
a542ad1b 2806
f54de068 2807 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
a542ad1b 2808 if (!ifp) {
f54de068 2809 kvfree(fspath);
a542ad1b
JS
2810 return ERR_PTR(-ENOMEM);
2811 }
2812
2813 ifp->btrfs_path = path;
2814 ifp->fspath = fspath;
2815 ifp->fs_root = fs_root;
2816
2817 return ifp;
2818}
2819
2820void free_ipath(struct inode_fs_paths *ipath)
2821{
4735fb28
JJ
2822 if (!ipath)
2823 return;
f54de068 2824 kvfree(ipath->fspath);
a542ad1b
JS
2825 kfree(ipath);
2826}
a37f232b 2827
d68194b2 2828struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info)
a37f232b
QW
2829{
2830 struct btrfs_backref_iter *ret;
2831
d68194b2 2832 ret = kzalloc(sizeof(*ret), GFP_NOFS);
a37f232b
QW
2833 if (!ret)
2834 return NULL;
2835
2836 ret->path = btrfs_alloc_path();
c15c2ec0 2837 if (!ret->path) {
a37f232b
QW
2838 kfree(ret);
2839 return NULL;
2840 }
2841
2842 /* Current backref iterator only supports iteration in commit root */
2843 ret->path->search_commit_root = 1;
2844 ret->path->skip_locking = 1;
2845 ret->fs_info = fs_info;
2846
2847 return ret;
2848}
2849
2850int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2851{
2852 struct btrfs_fs_info *fs_info = iter->fs_info;
29cbcf40 2853 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
a37f232b
QW
2854 struct btrfs_path *path = iter->path;
2855 struct btrfs_extent_item *ei;
2856 struct btrfs_key key;
2857 int ret;
2858
2859 key.objectid = bytenr;
2860 key.type = BTRFS_METADATA_ITEM_KEY;
2861 key.offset = (u64)-1;
2862 iter->bytenr = bytenr;
2863
29cbcf40 2864 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
a37f232b
QW
2865 if (ret < 0)
2866 return ret;
2867 if (ret == 0) {
2868 ret = -EUCLEAN;
2869 goto release;
2870 }
2871 if (path->slots[0] == 0) {
2872 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2873 ret = -EUCLEAN;
2874 goto release;
2875 }
2876 path->slots[0]--;
2877
2878 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2879 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2880 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2881 ret = -ENOENT;
2882 goto release;
2883 }
2884 memcpy(&iter->cur_key, &key, sizeof(key));
2885 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2886 path->slots[0]);
2887 iter->end_ptr = (u32)(iter->item_ptr +
3212fa14 2888 btrfs_item_size(path->nodes[0], path->slots[0]));
a37f232b
QW
2889 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2890 struct btrfs_extent_item);
2891
2892 /*
2893 * Only support iteration on tree backref yet.
2894 *
2895 * This is an extra precaution for non skinny-metadata, where
2896 * EXTENT_ITEM is also used for tree blocks, that we can only use
2897 * extent flags to determine if it's a tree block.
2898 */
2899 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2900 ret = -ENOTSUPP;
2901 goto release;
2902 }
2903 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2904
2905 /* If there is no inline backref, go search for keyed backref */
2906 if (iter->cur_ptr >= iter->end_ptr) {
29cbcf40 2907 ret = btrfs_next_item(extent_root, path);
a37f232b
QW
2908
2909 /* No inline nor keyed ref */
2910 if (ret > 0) {
2911 ret = -ENOENT;
2912 goto release;
2913 }
2914 if (ret < 0)
2915 goto release;
2916
2917 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2918 path->slots[0]);
2919 if (iter->cur_key.objectid != bytenr ||
2920 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2921 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2922 ret = -ENOENT;
2923 goto release;
2924 }
2925 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2926 path->slots[0]);
2927 iter->item_ptr = iter->cur_ptr;
3212fa14 2928 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
a37f232b
QW
2929 path->nodes[0], path->slots[0]));
2930 }
2931
2932 return 0;
2933release:
2934 btrfs_backref_iter_release(iter);
2935 return ret;
2936}
c39c2ddc
QW
2937
2938/*
2939 * Go to the next backref item of current bytenr, can be either inlined or
2940 * keyed.
2941 *
2942 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2943 *
2944 * Return 0 if we get next backref without problem.
2945 * Return >0 if there is no extra backref for this bytenr.
2946 * Return <0 if there is something wrong happened.
2947 */
2948int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2949{
2950 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
29cbcf40 2951 struct btrfs_root *extent_root;
c39c2ddc
QW
2952 struct btrfs_path *path = iter->path;
2953 struct btrfs_extent_inline_ref *iref;
2954 int ret;
2955 u32 size;
2956
2957 if (btrfs_backref_iter_is_inline_ref(iter)) {
2958 /* We're still inside the inline refs */
2959 ASSERT(iter->cur_ptr < iter->end_ptr);
2960
2961 if (btrfs_backref_has_tree_block_info(iter)) {
2962 /* First tree block info */
2963 size = sizeof(struct btrfs_tree_block_info);
2964 } else {
2965 /* Use inline ref type to determine the size */
2966 int type;
2967
2968 iref = (struct btrfs_extent_inline_ref *)
2969 ((unsigned long)iter->cur_ptr);
2970 type = btrfs_extent_inline_ref_type(eb, iref);
2971
2972 size = btrfs_extent_inline_ref_size(type);
2973 }
2974 iter->cur_ptr += size;
2975 if (iter->cur_ptr < iter->end_ptr)
2976 return 0;
2977
2978 /* All inline items iterated, fall through */
2979 }
2980
2981 /* We're at keyed items, there is no inline item, go to the next one */
29cbcf40
JB
2982 extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
2983 ret = btrfs_next_item(extent_root, iter->path);
c39c2ddc
QW
2984 if (ret)
2985 return ret;
2986
2987 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2988 if (iter->cur_key.objectid != iter->bytenr ||
2989 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2990 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2991 return 1;
2992 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2993 path->slots[0]);
2994 iter->cur_ptr = iter->item_ptr;
3212fa14 2995 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
c39c2ddc
QW
2996 path->slots[0]);
2997 return 0;
2998}
584fb121
QW
2999
3000void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
3001 struct btrfs_backref_cache *cache, int is_reloc)
3002{
3003 int i;
3004
3005 cache->rb_root = RB_ROOT;
3006 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3007 INIT_LIST_HEAD(&cache->pending[i]);
3008 INIT_LIST_HEAD(&cache->changed);
3009 INIT_LIST_HEAD(&cache->detached);
3010 INIT_LIST_HEAD(&cache->leaves);
3011 INIT_LIST_HEAD(&cache->pending_edge);
3012 INIT_LIST_HEAD(&cache->useless_node);
3013 cache->fs_info = fs_info;
3014 cache->is_reloc = is_reloc;
3015}
b1818dab
QW
3016
3017struct btrfs_backref_node *btrfs_backref_alloc_node(
3018 struct btrfs_backref_cache *cache, u64 bytenr, int level)
3019{
3020 struct btrfs_backref_node *node;
3021
3022 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
3023 node = kzalloc(sizeof(*node), GFP_NOFS);
3024 if (!node)
3025 return node;
3026
3027 INIT_LIST_HEAD(&node->list);
3028 INIT_LIST_HEAD(&node->upper);
3029 INIT_LIST_HEAD(&node->lower);
3030 RB_CLEAR_NODE(&node->rb_node);
3031 cache->nr_nodes++;
3032 node->level = level;
3033 node->bytenr = bytenr;
3034
3035 return node;
3036}
47254d07
QW
3037
3038struct btrfs_backref_edge *btrfs_backref_alloc_edge(
3039 struct btrfs_backref_cache *cache)
3040{
3041 struct btrfs_backref_edge *edge;
3042
3043 edge = kzalloc(sizeof(*edge), GFP_NOFS);
3044 if (edge)
3045 cache->nr_edges++;
3046 return edge;
3047}
023acb07
QW
3048
3049/*
3050 * Drop the backref node from cache, also cleaning up all its
3051 * upper edges and any uncached nodes in the path.
3052 *
3053 * This cleanup happens bottom up, thus the node should either
3054 * be the lowest node in the cache or a detached node.
3055 */
3056void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
3057 struct btrfs_backref_node *node)
3058{
3059 struct btrfs_backref_node *upper;
3060 struct btrfs_backref_edge *edge;
3061
3062 if (!node)
3063 return;
3064
3065 BUG_ON(!node->lowest && !node->detached);
3066 while (!list_empty(&node->upper)) {
3067 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
3068 list[LOWER]);
3069 upper = edge->node[UPPER];
3070 list_del(&edge->list[LOWER]);
3071 list_del(&edge->list[UPPER]);
3072 btrfs_backref_free_edge(cache, edge);
3073
023acb07
QW
3074 /*
3075 * Add the node to leaf node list if no other child block
3076 * cached.
3077 */
3078 if (list_empty(&upper->lower)) {
3079 list_add_tail(&upper->lower, &cache->leaves);
3080 upper->lowest = 1;
3081 }
3082 }
3083
3084 btrfs_backref_drop_node(cache, node);
3085}
13fe1bdb
QW
3086
3087/*
3088 * Release all nodes/edges from current cache
3089 */
3090void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
3091{
3092 struct btrfs_backref_node *node;
3093 int i;
3094
3095 while (!list_empty(&cache->detached)) {
3096 node = list_entry(cache->detached.next,
3097 struct btrfs_backref_node, list);
3098 btrfs_backref_cleanup_node(cache, node);
3099 }
3100
3101 while (!list_empty(&cache->leaves)) {
3102 node = list_entry(cache->leaves.next,
3103 struct btrfs_backref_node, lower);
3104 btrfs_backref_cleanup_node(cache, node);
3105 }
3106
3107 cache->last_trans = 0;
3108
3109 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3110 ASSERT(list_empty(&cache->pending[i]));
3111 ASSERT(list_empty(&cache->pending_edge));
3112 ASSERT(list_empty(&cache->useless_node));
3113 ASSERT(list_empty(&cache->changed));
3114 ASSERT(list_empty(&cache->detached));
3115 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
3116 ASSERT(!cache->nr_nodes);
3117 ASSERT(!cache->nr_edges);
3118}
1b60d2ec
QW
3119
3120/*
3121 * Handle direct tree backref
3122 *
3123 * Direct tree backref means, the backref item shows its parent bytenr
3124 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
3125 *
3126 * @ref_key: The converted backref key.
3127 * For keyed backref, it's the item key.
3128 * For inlined backref, objectid is the bytenr,
3129 * type is btrfs_inline_ref_type, offset is
3130 * btrfs_inline_ref_offset.
3131 */
3132static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
3133 struct btrfs_key *ref_key,
3134 struct btrfs_backref_node *cur)
3135{
3136 struct btrfs_backref_edge *edge;
3137 struct btrfs_backref_node *upper;
3138 struct rb_node *rb_node;
3139
3140 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
3141
3142 /* Only reloc root uses backref pointing to itself */
3143 if (ref_key->objectid == ref_key->offset) {
3144 struct btrfs_root *root;
3145
3146 cur->is_reloc_root = 1;
3147 /* Only reloc backref cache cares about a specific root */
3148 if (cache->is_reloc) {
3149 root = find_reloc_root(cache->fs_info, cur->bytenr);
f78743fb 3150 if (!root)
1b60d2ec
QW
3151 return -ENOENT;
3152 cur->root = root;
3153 } else {
3154 /*
3155 * For generic purpose backref cache, reloc root node
3156 * is useless.
3157 */
3158 list_add(&cur->list, &cache->useless_node);
3159 }
3160 return 0;
3161 }
3162
3163 edge = btrfs_backref_alloc_edge(cache);
3164 if (!edge)
3165 return -ENOMEM;
3166
3167 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
3168 if (!rb_node) {
3169 /* Parent node not yet cached */
3170 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
3171 cur->level + 1);
3172 if (!upper) {
3173 btrfs_backref_free_edge(cache, edge);
3174 return -ENOMEM;
3175 }
3176
3177 /*
3178 * Backrefs for the upper level block isn't cached, add the
3179 * block to pending list
3180 */
3181 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3182 } else {
3183 /* Parent node already cached */
3184 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
3185 ASSERT(upper->checked);
3186 INIT_LIST_HEAD(&edge->list[UPPER]);
3187 }
3188 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
3189 return 0;
3190}
3191
3192/*
3193 * Handle indirect tree backref
3194 *
3195 * Indirect tree backref means, we only know which tree the node belongs to.
3196 * We still need to do a tree search to find out the parents. This is for
3197 * TREE_BLOCK_REF backref (keyed or inlined).
3198 *
3199 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
3200 * @tree_key: The first key of this tree block.
1a9fd417 3201 * @path: A clean (released) path, to avoid allocating path every time
1b60d2ec
QW
3202 * the function get called.
3203 */
3204static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
3205 struct btrfs_path *path,
3206 struct btrfs_key *ref_key,
3207 struct btrfs_key *tree_key,
3208 struct btrfs_backref_node *cur)
3209{
3210 struct btrfs_fs_info *fs_info = cache->fs_info;
3211 struct btrfs_backref_node *upper;
3212 struct btrfs_backref_node *lower;
3213 struct btrfs_backref_edge *edge;
3214 struct extent_buffer *eb;
3215 struct btrfs_root *root;
1b60d2ec
QW
3216 struct rb_node *rb_node;
3217 int level;
3218 bool need_check = true;
3219 int ret;
3220
56e9357a 3221 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
1b60d2ec
QW
3222 if (IS_ERR(root))
3223 return PTR_ERR(root);
92a7cc42 3224 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
1b60d2ec
QW
3225 cur->cowonly = 1;
3226
3227 if (btrfs_root_level(&root->root_item) == cur->level) {
3228 /* Tree root */
3229 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
876de781
QW
3230 /*
3231 * For reloc backref cache, we may ignore reloc root. But for
3232 * general purpose backref cache, we can't rely on
3233 * btrfs_should_ignore_reloc_root() as it may conflict with
3234 * current running relocation and lead to missing root.
3235 *
3236 * For general purpose backref cache, reloc root detection is
3237 * completely relying on direct backref (key->offset is parent
3238 * bytenr), thus only do such check for reloc cache.
3239 */
3240 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
1b60d2ec
QW
3241 btrfs_put_root(root);
3242 list_add(&cur->list, &cache->useless_node);
3243 } else {
3244 cur->root = root;
3245 }
3246 return 0;
3247 }
3248
3249 level = cur->level + 1;
3250
3251 /* Search the tree to find parent blocks referring to the block */
3252 path->search_commit_root = 1;
3253 path->skip_locking = 1;
3254 path->lowest_level = level;
3255 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
3256 path->lowest_level = 0;
3257 if (ret < 0) {
3258 btrfs_put_root(root);
3259 return ret;
3260 }
3261 if (ret > 0 && path->slots[level] > 0)
3262 path->slots[level]--;
3263
3264 eb = path->nodes[level];
3265 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
3266 btrfs_err(fs_info,
3267"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
3268 cur->bytenr, level - 1, root->root_key.objectid,
3269 tree_key->objectid, tree_key->type, tree_key->offset);
3270 btrfs_put_root(root);
3271 ret = -ENOENT;
3272 goto out;
3273 }
3274 lower = cur;
3275
3276 /* Add all nodes and edges in the path */
3277 for (; level < BTRFS_MAX_LEVEL; level++) {
3278 if (!path->nodes[level]) {
3279 ASSERT(btrfs_root_bytenr(&root->root_item) ==
3280 lower->bytenr);
876de781
QW
3281 /* Same as previous should_ignore_reloc_root() call */
3282 if (btrfs_should_ignore_reloc_root(root) &&
3283 cache->is_reloc) {
1b60d2ec
QW
3284 btrfs_put_root(root);
3285 list_add(&lower->list, &cache->useless_node);
3286 } else {
3287 lower->root = root;
3288 }
3289 break;
3290 }
3291
3292 edge = btrfs_backref_alloc_edge(cache);
3293 if (!edge) {
3294 btrfs_put_root(root);
3295 ret = -ENOMEM;
3296 goto out;
3297 }
3298
3299 eb = path->nodes[level];
3300 rb_node = rb_simple_search(&cache->rb_root, eb->start);
3301 if (!rb_node) {
3302 upper = btrfs_backref_alloc_node(cache, eb->start,
3303 lower->level + 1);
3304 if (!upper) {
3305 btrfs_put_root(root);
3306 btrfs_backref_free_edge(cache, edge);
3307 ret = -ENOMEM;
3308 goto out;
3309 }
3310 upper->owner = btrfs_header_owner(eb);
92a7cc42 3311 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
1b60d2ec
QW
3312 upper->cowonly = 1;
3313
3314 /*
3315 * If we know the block isn't shared we can avoid
3316 * checking its backrefs.
3317 */
3318 if (btrfs_block_can_be_shared(root, eb))
3319 upper->checked = 0;
3320 else
3321 upper->checked = 1;
3322
3323 /*
3324 * Add the block to pending list if we need to check its
3325 * backrefs, we only do this once while walking up a
3326 * tree as we will catch anything else later on.
3327 */
3328 if (!upper->checked && need_check) {
3329 need_check = false;
3330 list_add_tail(&edge->list[UPPER],
3331 &cache->pending_edge);
3332 } else {
3333 if (upper->checked)
3334 need_check = true;
3335 INIT_LIST_HEAD(&edge->list[UPPER]);
3336 }
3337 } else {
3338 upper = rb_entry(rb_node, struct btrfs_backref_node,
3339 rb_node);
3340 ASSERT(upper->checked);
3341 INIT_LIST_HEAD(&edge->list[UPPER]);
3342 if (!upper->owner)
3343 upper->owner = btrfs_header_owner(eb);
3344 }
3345 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
3346
3347 if (rb_node) {
3348 btrfs_put_root(root);
3349 break;
3350 }
3351 lower = upper;
3352 upper = NULL;
3353 }
3354out:
3355 btrfs_release_path(path);
3356 return ret;
3357}
3358
3359/*
3360 * Add backref node @cur into @cache.
3361 *
3362 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
3363 * links aren't yet bi-directional. Needs to finish such links.
fc997ed0 3364 * Use btrfs_backref_finish_upper_links() to finish such linkage.
1b60d2ec
QW
3365 *
3366 * @path: Released path for indirect tree backref lookup
3367 * @iter: Released backref iter for extent tree search
3368 * @node_key: The first key of the tree block
3369 */
3370int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
3371 struct btrfs_path *path,
3372 struct btrfs_backref_iter *iter,
3373 struct btrfs_key *node_key,
3374 struct btrfs_backref_node *cur)
3375{
1b60d2ec
QW
3376 struct btrfs_backref_edge *edge;
3377 struct btrfs_backref_node *exist;
3378 int ret;
3379
3380 ret = btrfs_backref_iter_start(iter, cur->bytenr);
3381 if (ret < 0)
3382 return ret;
3383 /*
3384 * We skip the first btrfs_tree_block_info, as we don't use the key
3385 * stored in it, but fetch it from the tree block
3386 */
3387 if (btrfs_backref_has_tree_block_info(iter)) {
3388 ret = btrfs_backref_iter_next(iter);
3389 if (ret < 0)
3390 goto out;
3391 /* No extra backref? This means the tree block is corrupted */
3392 if (ret > 0) {
3393 ret = -EUCLEAN;
3394 goto out;
3395 }
3396 }
3397 WARN_ON(cur->checked);
3398 if (!list_empty(&cur->upper)) {
3399 /*
3400 * The backref was added previously when processing backref of
3401 * type BTRFS_TREE_BLOCK_REF_KEY
3402 */
3403 ASSERT(list_is_singular(&cur->upper));
3404 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
3405 list[LOWER]);
3406 ASSERT(list_empty(&edge->list[UPPER]));
3407 exist = edge->node[UPPER];
3408 /*
3409 * Add the upper level block to pending list if we need check
3410 * its backrefs
3411 */
3412 if (!exist->checked)
3413 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3414 } else {
3415 exist = NULL;
3416 }
3417
3418 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
3419 struct extent_buffer *eb;
3420 struct btrfs_key key;
3421 int type;
3422
3423 cond_resched();
3424 eb = btrfs_backref_get_eb(iter);
3425
3426 key.objectid = iter->bytenr;
3427 if (btrfs_backref_iter_is_inline_ref(iter)) {
3428 struct btrfs_extent_inline_ref *iref;
3429
3430 /* Update key for inline backref */
3431 iref = (struct btrfs_extent_inline_ref *)
3432 ((unsigned long)iter->cur_ptr);
3433 type = btrfs_get_extent_inline_ref_type(eb, iref,
3434 BTRFS_REF_TYPE_BLOCK);
3435 if (type == BTRFS_REF_TYPE_INVALID) {
3436 ret = -EUCLEAN;
3437 goto out;
3438 }
3439 key.type = type;
3440 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3441 } else {
3442 key.type = iter->cur_key.type;
3443 key.offset = iter->cur_key.offset;
3444 }
3445
3446 /*
3447 * Parent node found and matches current inline ref, no need to
3448 * rebuild this node for this inline ref
3449 */
3450 if (exist &&
3451 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
3452 exist->owner == key.offset) ||
3453 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
3454 exist->bytenr == key.offset))) {
3455 exist = NULL;
3456 continue;
3457 }
3458
3459 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3460 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3461 ret = handle_direct_tree_backref(cache, &key, cur);
3462 if (ret < 0)
3463 goto out;
182741d2
QW
3464 } else if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
3465 /*
3466 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref
3467 * offset means the root objectid. We need to search
3468 * the tree to get its parent bytenr.
3469 */
3470 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
3471 cur);
3472 if (ret < 0)
3473 goto out;
1b60d2ec 3474 }
1b60d2ec 3475 /*
182741d2
QW
3476 * Unrecognized tree backref items (if it can pass tree-checker)
3477 * would be ignored.
1b60d2ec 3478 */
1b60d2ec
QW
3479 }
3480 ret = 0;
3481 cur->checked = 1;
3482 WARN_ON(exist);
3483out:
3484 btrfs_backref_iter_release(iter);
3485 return ret;
3486}
fc997ed0
QW
3487
3488/*
3489 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3490 */
3491int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3492 struct btrfs_backref_node *start)
3493{
3494 struct list_head *useless_node = &cache->useless_node;
3495 struct btrfs_backref_edge *edge;
3496 struct rb_node *rb_node;
3497 LIST_HEAD(pending_edge);
3498
3499 ASSERT(start->checked);
3500
3501 /* Insert this node to cache if it's not COW-only */
3502 if (!start->cowonly) {
3503 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3504 &start->rb_node);
3505 if (rb_node)
3506 btrfs_backref_panic(cache->fs_info, start->bytenr,
3507 -EEXIST);
3508 list_add_tail(&start->lower, &cache->leaves);
3509 }
3510
3511 /*
3512 * Use breadth first search to iterate all related edges.
3513 *
3514 * The starting points are all the edges of this node
3515 */
3516 list_for_each_entry(edge, &start->upper, list[LOWER])
3517 list_add_tail(&edge->list[UPPER], &pending_edge);
3518
3519 while (!list_empty(&pending_edge)) {
3520 struct btrfs_backref_node *upper;
3521 struct btrfs_backref_node *lower;
fc997ed0
QW
3522
3523 edge = list_first_entry(&pending_edge,
3524 struct btrfs_backref_edge, list[UPPER]);
3525 list_del_init(&edge->list[UPPER]);
3526 upper = edge->node[UPPER];
3527 lower = edge->node[LOWER];
3528
3529 /* Parent is detached, no need to keep any edges */
3530 if (upper->detached) {
3531 list_del(&edge->list[LOWER]);
3532 btrfs_backref_free_edge(cache, edge);
3533
3534 /* Lower node is orphan, queue for cleanup */
3535 if (list_empty(&lower->upper))
3536 list_add(&lower->list, useless_node);
3537 continue;
3538 }
3539
3540 /*
3541 * All new nodes added in current build_backref_tree() haven't
3542 * been linked to the cache rb tree.
3543 * So if we have upper->rb_node populated, this means a cache
3544 * hit. We only need to link the edge, as @upper and all its
3545 * parents have already been linked.
3546 */
3547 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3548 if (upper->lowest) {
3549 list_del_init(&upper->lower);
3550 upper->lowest = 0;
3551 }
3552
3553 list_add_tail(&edge->list[UPPER], &upper->lower);
3554 continue;
3555 }
3556
3557 /* Sanity check, we shouldn't have any unchecked nodes */
3558 if (!upper->checked) {
3559 ASSERT(0);
3560 return -EUCLEAN;
3561 }
3562
3563 /* Sanity check, COW-only node has non-COW-only parent */
3564 if (start->cowonly != upper->cowonly) {
3565 ASSERT(0);
3566 return -EUCLEAN;
3567 }
3568
3569 /* Only cache non-COW-only (subvolume trees) tree blocks */
3570 if (!upper->cowonly) {
3571 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3572 &upper->rb_node);
3573 if (rb_node) {
3574 btrfs_backref_panic(cache->fs_info,
3575 upper->bytenr, -EEXIST);
3576 return -EUCLEAN;
3577 }
3578 }
3579
3580 list_add_tail(&edge->list[UPPER], &upper->lower);
3581
3582 /*
3583 * Also queue all the parent edges of this uncached node
3584 * to finish the upper linkage
3585 */
3586 list_for_each_entry(edge, &upper->upper, list[LOWER])
3587 list_add_tail(&edge->list[UPPER], &pending_edge);
3588 }
3589 return 0;
3590}
1b23ea18
QW
3591
3592void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3593 struct btrfs_backref_node *node)
3594{
3595 struct btrfs_backref_node *lower;
3596 struct btrfs_backref_node *upper;
3597 struct btrfs_backref_edge *edge;
3598
3599 while (!list_empty(&cache->useless_node)) {
3600 lower = list_first_entry(&cache->useless_node,
3601 struct btrfs_backref_node, list);
3602 list_del_init(&lower->list);
3603 }
3604 while (!list_empty(&cache->pending_edge)) {
3605 edge = list_first_entry(&cache->pending_edge,
3606 struct btrfs_backref_edge, list[UPPER]);
3607 list_del(&edge->list[UPPER]);
3608 list_del(&edge->list[LOWER]);
3609 lower = edge->node[LOWER];
3610 upper = edge->node[UPPER];
3611 btrfs_backref_free_edge(cache, edge);
3612
3613 /*
3614 * Lower is no longer linked to any upper backref nodes and
3615 * isn't in the cache, we can free it ourselves.
3616 */
3617 if (list_empty(&lower->upper) &&
3618 RB_EMPTY_NODE(&lower->rb_node))
3619 list_add(&lower->list, &cache->useless_node);
3620
3621 if (!RB_EMPTY_NODE(&upper->rb_node))
3622 continue;
3623
3624 /* Add this guy's upper edges to the list to process */
3625 list_for_each_entry(edge, &upper->upper, list[LOWER])
3626 list_add_tail(&edge->list[UPPER],
3627 &cache->pending_edge);
3628 if (list_empty(&upper->upper))
3629 list_add(&upper->list, &cache->useless_node);
3630 }
3631
3632 while (!list_empty(&cache->useless_node)) {
3633 lower = list_first_entry(&cache->useless_node,
3634 struct btrfs_backref_node, list);
3635 list_del_init(&lower->list);
3636 if (lower == node)
3637 node = NULL;
49ecc679 3638 btrfs_backref_drop_node(cache, lower);
1b23ea18
QW
3639 }
3640
3641 btrfs_backref_cleanup_node(cache, node);
3642 ASSERT(list_empty(&cache->useless_node) &&
3643 list_empty(&cache->pending_edge));
3644}