Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a542ad1b JS |
2 | /* |
3 | * Copyright (C) 2011 STRATO. All rights reserved. | |
a542ad1b JS |
4 | */ |
5 | ||
f54de068 | 6 | #include <linux/mm.h> |
afce772e | 7 | #include <linux/rbtree.h> |
00142756 | 8 | #include <trace/events/btrfs.h> |
a542ad1b JS |
9 | #include "ctree.h" |
10 | #include "disk-io.h" | |
11 | #include "backref.h" | |
8da6d581 JS |
12 | #include "ulist.h" |
13 | #include "transaction.h" | |
14 | #include "delayed-ref.h" | |
b916a59a | 15 | #include "locking.h" |
1b60d2ec | 16 | #include "misc.h" |
f3a84ccd | 17 | #include "tree-mod-log.h" |
c7f13d42 | 18 | #include "fs.h" |
07e81dc9 | 19 | #include "accessors.h" |
a0231804 | 20 | #include "extent-tree.h" |
67707479 | 21 | #include "relocation.h" |
27137fac | 22 | #include "tree-checker.h" |
a542ad1b | 23 | |
877c1476 FM |
24 | /* Just arbitrary numbers so we can be sure one of these happened. */ |
25 | #define BACKREF_FOUND_SHARED 6 | |
26 | #define BACKREF_FOUND_NOT_SHARED 7 | |
dc046b10 | 27 | |
976b1908 JS |
28 | struct extent_inode_elem { |
29 | u64 inum; | |
30 | u64 offset; | |
c7499a64 | 31 | u64 num_bytes; |
976b1908 JS |
32 | struct extent_inode_elem *next; |
33 | }; | |
34 | ||
88ffb665 FM |
35 | static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx, |
36 | const struct btrfs_key *key, | |
73980bec JM |
37 | const struct extent_buffer *eb, |
38 | const struct btrfs_file_extent_item *fi, | |
6ce6ba53 | 39 | struct extent_inode_elem **eie) |
976b1908 | 40 | { |
c7499a64 | 41 | const u64 data_len = btrfs_file_extent_num_bytes(eb, fi); |
88ffb665 | 42 | u64 offset = key->offset; |
976b1908 | 43 | struct extent_inode_elem *e; |
88ffb665 FM |
44 | const u64 *root_ids; |
45 | int root_count; | |
46 | bool cached; | |
976b1908 | 47 | |
0cad8f14 FM |
48 | if (!ctx->ignore_extent_item_pos && |
49 | !btrfs_file_extent_compression(eb, fi) && | |
8ca15e05 JB |
50 | !btrfs_file_extent_encryption(eb, fi) && |
51 | !btrfs_file_extent_other_encoding(eb, fi)) { | |
52 | u64 data_offset; | |
976b1908 | 53 | |
8ca15e05 | 54 | data_offset = btrfs_file_extent_offset(eb, fi); |
8ca15e05 | 55 | |
88ffb665 FM |
56 | if (ctx->extent_item_pos < data_offset || |
57 | ctx->extent_item_pos >= data_offset + data_len) | |
8ca15e05 | 58 | return 1; |
88ffb665 | 59 | offset += ctx->extent_item_pos - data_offset; |
8ca15e05 | 60 | } |
976b1908 | 61 | |
88ffb665 FM |
62 | if (!ctx->indirect_ref_iterator || !ctx->cache_lookup) |
63 | goto add_inode_elem; | |
64 | ||
65 | cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids, | |
66 | &root_count); | |
67 | if (!cached) | |
68 | goto add_inode_elem; | |
69 | ||
70 | for (int i = 0; i < root_count; i++) { | |
71 | int ret; | |
72 | ||
73 | ret = ctx->indirect_ref_iterator(key->objectid, offset, | |
74 | data_len, root_ids[i], | |
75 | ctx->user_ctx); | |
76 | if (ret) | |
77 | return ret; | |
78 | } | |
79 | ||
80 | add_inode_elem: | |
976b1908 JS |
81 | e = kmalloc(sizeof(*e), GFP_NOFS); |
82 | if (!e) | |
83 | return -ENOMEM; | |
84 | ||
85 | e->next = *eie; | |
86 | e->inum = key->objectid; | |
88ffb665 | 87 | e->offset = offset; |
c7499a64 | 88 | e->num_bytes = data_len; |
976b1908 JS |
89 | *eie = e; |
90 | ||
91 | return 0; | |
92 | } | |
93 | ||
f05c4746 WS |
94 | static void free_inode_elem_list(struct extent_inode_elem *eie) |
95 | { | |
96 | struct extent_inode_elem *eie_next; | |
97 | ||
98 | for (; eie; eie = eie_next) { | |
99 | eie_next = eie->next; | |
100 | kfree(eie); | |
101 | } | |
102 | } | |
103 | ||
88ffb665 FM |
104 | static int find_extent_in_eb(struct btrfs_backref_walk_ctx *ctx, |
105 | const struct extent_buffer *eb, | |
6ce6ba53 | 106 | struct extent_inode_elem **eie) |
976b1908 JS |
107 | { |
108 | u64 disk_byte; | |
109 | struct btrfs_key key; | |
110 | struct btrfs_file_extent_item *fi; | |
111 | int slot; | |
112 | int nritems; | |
113 | int extent_type; | |
114 | int ret; | |
115 | ||
116 | /* | |
117 | * from the shared data ref, we only have the leaf but we need | |
118 | * the key. thus, we must look into all items and see that we | |
119 | * find one (some) with a reference to our extent item. | |
120 | */ | |
121 | nritems = btrfs_header_nritems(eb); | |
122 | for (slot = 0; slot < nritems; ++slot) { | |
123 | btrfs_item_key_to_cpu(eb, &key, slot); | |
124 | if (key.type != BTRFS_EXTENT_DATA_KEY) | |
125 | continue; | |
126 | fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); | |
127 | extent_type = btrfs_file_extent_type(eb, fi); | |
128 | if (extent_type == BTRFS_FILE_EXTENT_INLINE) | |
129 | continue; | |
130 | /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */ | |
131 | disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); | |
88ffb665 | 132 | if (disk_byte != ctx->bytenr) |
976b1908 JS |
133 | continue; |
134 | ||
88ffb665 FM |
135 | ret = check_extent_in_eb(ctx, &key, eb, fi, eie); |
136 | if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0) | |
976b1908 JS |
137 | return ret; |
138 | } | |
139 | ||
140 | return 0; | |
141 | } | |
142 | ||
86d5f994 | 143 | struct preftree { |
ecf160b4 | 144 | struct rb_root_cached root; |
6c336b21 | 145 | unsigned int count; |
86d5f994 EN |
146 | }; |
147 | ||
ecf160b4 | 148 | #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 } |
86d5f994 EN |
149 | |
150 | struct preftrees { | |
151 | struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */ | |
152 | struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */ | |
153 | struct preftree indirect_missing_keys; | |
154 | }; | |
155 | ||
3ec4d323 EN |
156 | /* |
157 | * Checks for a shared extent during backref search. | |
158 | * | |
159 | * The share_count tracks prelim_refs (direct and indirect) having a | |
160 | * ref->count >0: | |
161 | * - incremented when a ref->count transitions to >0 | |
162 | * - decremented when a ref->count transitions to <1 | |
163 | */ | |
164 | struct share_check { | |
877c1476 FM |
165 | struct btrfs_backref_share_check_ctx *ctx; |
166 | struct btrfs_root *root; | |
3ec4d323 | 167 | u64 inum; |
73e339e6 | 168 | u64 data_bytenr; |
6976201f | 169 | u64 data_extent_gen; |
73e339e6 FM |
170 | /* |
171 | * Counts number of inodes that refer to an extent (different inodes in | |
172 | * the same root or different roots) that we could find. The sharedness | |
173 | * check typically stops once this counter gets greater than 1, so it | |
174 | * may not reflect the total number of inodes. | |
175 | */ | |
3ec4d323 | 176 | int share_count; |
73e339e6 FM |
177 | /* |
178 | * The number of times we found our inode refers to the data extent we | |
179 | * are determining the sharedness. In other words, how many file extent | |
180 | * items we could find for our inode that point to our target data | |
181 | * extent. The value we get here after finishing the extent sharedness | |
182 | * check may be smaller than reality, but if it ends up being greater | |
183 | * than 1, then we know for sure the inode has multiple file extent | |
184 | * items that point to our inode, and we can safely assume it's useful | |
185 | * to cache the sharedness check result. | |
186 | */ | |
187 | int self_ref_count; | |
4fc7b572 | 188 | bool have_delayed_delete_refs; |
3ec4d323 EN |
189 | }; |
190 | ||
191 | static inline int extent_is_shared(struct share_check *sc) | |
192 | { | |
193 | return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0; | |
194 | } | |
195 | ||
b9e9a6cb WS |
196 | static struct kmem_cache *btrfs_prelim_ref_cache; |
197 | ||
198 | int __init btrfs_prelim_ref_init(void) | |
199 | { | |
200 | btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref", | |
ef5a05c5 | 201 | sizeof(struct prelim_ref), 0, 0, NULL); |
b9e9a6cb WS |
202 | if (!btrfs_prelim_ref_cache) |
203 | return -ENOMEM; | |
204 | return 0; | |
205 | } | |
206 | ||
e67c718b | 207 | void __cold btrfs_prelim_ref_exit(void) |
b9e9a6cb | 208 | { |
5598e900 | 209 | kmem_cache_destroy(btrfs_prelim_ref_cache); |
b9e9a6cb WS |
210 | } |
211 | ||
86d5f994 EN |
212 | static void free_pref(struct prelim_ref *ref) |
213 | { | |
214 | kmem_cache_free(btrfs_prelim_ref_cache, ref); | |
215 | } | |
216 | ||
217 | /* | |
218 | * Return 0 when both refs are for the same block (and can be merged). | |
219 | * A -1 return indicates ref1 is a 'lower' block than ref2, while 1 | |
220 | * indicates a 'higher' block. | |
221 | */ | |
ca283ea9 DS |
222 | static int prelim_ref_compare(const struct prelim_ref *ref1, |
223 | const struct prelim_ref *ref2) | |
86d5f994 EN |
224 | { |
225 | if (ref1->level < ref2->level) | |
226 | return -1; | |
227 | if (ref1->level > ref2->level) | |
228 | return 1; | |
229 | if (ref1->root_id < ref2->root_id) | |
230 | return -1; | |
231 | if (ref1->root_id > ref2->root_id) | |
232 | return 1; | |
233 | if (ref1->key_for_search.type < ref2->key_for_search.type) | |
234 | return -1; | |
235 | if (ref1->key_for_search.type > ref2->key_for_search.type) | |
236 | return 1; | |
237 | if (ref1->key_for_search.objectid < ref2->key_for_search.objectid) | |
238 | return -1; | |
239 | if (ref1->key_for_search.objectid > ref2->key_for_search.objectid) | |
240 | return 1; | |
241 | if (ref1->key_for_search.offset < ref2->key_for_search.offset) | |
242 | return -1; | |
243 | if (ref1->key_for_search.offset > ref2->key_for_search.offset) | |
244 | return 1; | |
245 | if (ref1->parent < ref2->parent) | |
246 | return -1; | |
247 | if (ref1->parent > ref2->parent) | |
248 | return 1; | |
249 | ||
250 | return 0; | |
251 | } | |
252 | ||
14ae60c7 RBI |
253 | static int prelim_ref_rb_add_cmp(const struct rb_node *new, |
254 | const struct rb_node *exist) | |
255 | { | |
256 | const struct prelim_ref *ref_new = | |
257 | rb_entry(new, struct prelim_ref, rbnode); | |
258 | const struct prelim_ref *ref_exist = | |
259 | rb_entry(exist, struct prelim_ref, rbnode); | |
260 | ||
261 | /* | |
262 | * prelim_ref_compare() expects the first parameter as the existing one, | |
263 | * different from the rb_find_add_cached() order. | |
264 | */ | |
265 | return prelim_ref_compare(ref_exist, ref_new); | |
266 | } | |
267 | ||
ccc8dc75 | 268 | static void update_share_count(struct share_check *sc, int oldcount, |
ca283ea9 | 269 | int newcount, const struct prelim_ref *newref) |
3ec4d323 EN |
270 | { |
271 | if ((!sc) || (oldcount == 0 && newcount < 1)) | |
272 | return; | |
273 | ||
274 | if (oldcount > 0 && newcount < 1) | |
275 | sc->share_count--; | |
276 | else if (oldcount < 1 && newcount > 0) | |
277 | sc->share_count++; | |
73e339e6 | 278 | |
e094f480 | 279 | if (newref->root_id == btrfs_root_id(sc->root) && |
73e339e6 FM |
280 | newref->wanted_disk_byte == sc->data_bytenr && |
281 | newref->key_for_search.objectid == sc->inum) | |
282 | sc->self_ref_count += newref->count; | |
3ec4d323 EN |
283 | } |
284 | ||
86d5f994 EN |
285 | /* |
286 | * Add @newref to the @root rbtree, merging identical refs. | |
287 | * | |
3ec4d323 | 288 | * Callers should assume that newref has been freed after calling. |
86d5f994 | 289 | */ |
00142756 JM |
290 | static void prelim_ref_insert(const struct btrfs_fs_info *fs_info, |
291 | struct preftree *preftree, | |
3ec4d323 EN |
292 | struct prelim_ref *newref, |
293 | struct share_check *sc) | |
86d5f994 | 294 | { |
ecf160b4 | 295 | struct rb_root_cached *root; |
14ae60c7 | 296 | struct rb_node *exist; |
86d5f994 EN |
297 | |
298 | root = &preftree->root; | |
14ae60c7 RBI |
299 | exist = rb_find_add_cached(&newref->rbnode, root, prelim_ref_rb_add_cmp); |
300 | if (exist) { | |
301 | struct prelim_ref *ref = rb_entry(exist, struct prelim_ref, rbnode); | |
302 | /* Identical refs, merge them and free @newref */ | |
303 | struct extent_inode_elem *eie = ref->inode_list; | |
304 | ||
305 | while (eie && eie->next) | |
306 | eie = eie->next; | |
307 | ||
308 | if (!eie) | |
309 | ref->inode_list = newref->inode_list; | |
310 | else | |
311 | eie->next = newref->inode_list; | |
312 | trace_btrfs_prelim_ref_merge(fs_info, ref, newref, | |
313 | preftree->count); | |
314 | /* | |
315 | * A delayed ref can have newref->count < 0. | |
316 | * The ref->count is updated to follow any | |
317 | * BTRFS_[ADD|DROP]_DELAYED_REF actions. | |
318 | */ | |
319 | update_share_count(sc, ref->count, | |
320 | ref->count + newref->count, newref); | |
321 | ref->count += newref->count; | |
322 | free_pref(newref); | |
323 | return; | |
86d5f994 EN |
324 | } |
325 | ||
73e339e6 | 326 | update_share_count(sc, 0, newref->count, newref); |
6c336b21 | 327 | preftree->count++; |
00142756 | 328 | trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count); |
86d5f994 EN |
329 | } |
330 | ||
331 | /* | |
332 | * Release the entire tree. We don't care about internal consistency so | |
333 | * just free everything and then reset the tree root. | |
334 | */ | |
335 | static void prelim_release(struct preftree *preftree) | |
336 | { | |
337 | struct prelim_ref *ref, *next_ref; | |
338 | ||
ecf160b4 | 339 | rbtree_postorder_for_each_entry_safe(ref, next_ref, |
92876eec FM |
340 | &preftree->root.rb_root, rbnode) { |
341 | free_inode_elem_list(ref->inode_list); | |
86d5f994 | 342 | free_pref(ref); |
92876eec | 343 | } |
86d5f994 | 344 | |
ecf160b4 | 345 | preftree->root = RB_ROOT_CACHED; |
6c336b21 | 346 | preftree->count = 0; |
86d5f994 EN |
347 | } |
348 | ||
d5c88b73 JS |
349 | /* |
350 | * the rules for all callers of this function are: | |
351 | * - obtaining the parent is the goal | |
352 | * - if you add a key, you must know that it is a correct key | |
353 | * - if you cannot add the parent or a correct key, then we will look into the | |
354 | * block later to set a correct key | |
355 | * | |
356 | * delayed refs | |
357 | * ============ | |
358 | * backref type | shared | indirect | shared | indirect | |
359 | * information | tree | tree | data | data | |
360 | * --------------------+--------+----------+--------+---------- | |
361 | * parent logical | y | - | - | - | |
362 | * key to resolve | - | y | y | y | |
363 | * tree block logical | - | - | - | - | |
364 | * root for resolving | y | y | y | y | |
365 | * | |
366 | * - column 1: we've the parent -> done | |
367 | * - column 2, 3, 4: we use the key to find the parent | |
368 | * | |
369 | * on disk refs (inline or keyed) | |
370 | * ============================== | |
371 | * backref type | shared | indirect | shared | indirect | |
372 | * information | tree | tree | data | data | |
373 | * --------------------+--------+----------+--------+---------- | |
374 | * parent logical | y | - | y | - | |
375 | * key to resolve | - | - | - | y | |
376 | * tree block logical | y | y | y | y | |
377 | * root for resolving | - | y | y | y | |
378 | * | |
379 | * - column 1, 3: we've the parent -> done | |
380 | * - column 2: we take the first key from the block to find the parent | |
e0c476b1 | 381 | * (see add_missing_keys) |
d5c88b73 JS |
382 | * - column 4: we use the key to find the parent |
383 | * | |
384 | * additional information that's available but not required to find the parent | |
385 | * block might help in merging entries to gain some speed. | |
386 | */ | |
00142756 JM |
387 | static int add_prelim_ref(const struct btrfs_fs_info *fs_info, |
388 | struct preftree *preftree, u64 root_id, | |
e0c476b1 | 389 | const struct btrfs_key *key, int level, u64 parent, |
3ec4d323 EN |
390 | u64 wanted_disk_byte, int count, |
391 | struct share_check *sc, gfp_t gfp_mask) | |
8da6d581 | 392 | { |
e0c476b1 | 393 | struct prelim_ref *ref; |
8da6d581 | 394 | |
48ec4736 LB |
395 | if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID) |
396 | return 0; | |
397 | ||
b9e9a6cb | 398 | ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask); |
8da6d581 JS |
399 | if (!ref) |
400 | return -ENOMEM; | |
401 | ||
402 | ref->root_id = root_id; | |
7ac8b88e | 403 | if (key) |
d5c88b73 | 404 | ref->key_for_search = *key; |
7ac8b88e | 405 | else |
d5c88b73 | 406 | memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); |
8da6d581 | 407 | |
3301958b | 408 | ref->inode_list = NULL; |
8da6d581 JS |
409 | ref->level = level; |
410 | ref->count = count; | |
411 | ref->parent = parent; | |
412 | ref->wanted_disk_byte = wanted_disk_byte; | |
3ec4d323 EN |
413 | prelim_ref_insert(fs_info, preftree, ref, sc); |
414 | return extent_is_shared(sc); | |
8da6d581 JS |
415 | } |
416 | ||
86d5f994 | 417 | /* direct refs use root == 0, key == NULL */ |
00142756 JM |
418 | static int add_direct_ref(const struct btrfs_fs_info *fs_info, |
419 | struct preftrees *preftrees, int level, u64 parent, | |
3ec4d323 EN |
420 | u64 wanted_disk_byte, int count, |
421 | struct share_check *sc, gfp_t gfp_mask) | |
86d5f994 | 422 | { |
00142756 | 423 | return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level, |
3ec4d323 | 424 | parent, wanted_disk_byte, count, sc, gfp_mask); |
86d5f994 EN |
425 | } |
426 | ||
427 | /* indirect refs use parent == 0 */ | |
00142756 JM |
428 | static int add_indirect_ref(const struct btrfs_fs_info *fs_info, |
429 | struct preftrees *preftrees, u64 root_id, | |
86d5f994 | 430 | const struct btrfs_key *key, int level, |
3ec4d323 EN |
431 | u64 wanted_disk_byte, int count, |
432 | struct share_check *sc, gfp_t gfp_mask) | |
86d5f994 EN |
433 | { |
434 | struct preftree *tree = &preftrees->indirect; | |
435 | ||
436 | if (!key) | |
437 | tree = &preftrees->indirect_missing_keys; | |
00142756 | 438 | return add_prelim_ref(fs_info, tree, root_id, key, level, 0, |
3ec4d323 | 439 | wanted_disk_byte, count, sc, gfp_mask); |
86d5f994 EN |
440 | } |
441 | ||
ed58f2e6 | 442 | static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr) |
443 | { | |
444 | struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; | |
445 | struct rb_node *parent = NULL; | |
446 | struct prelim_ref *ref = NULL; | |
9c6c723f | 447 | struct prelim_ref target = {}; |
ed58f2e6 | 448 | int result; |
449 | ||
450 | target.parent = bytenr; | |
451 | ||
452 | while (*p) { | |
453 | parent = *p; | |
454 | ref = rb_entry(parent, struct prelim_ref, rbnode); | |
455 | result = prelim_ref_compare(ref, &target); | |
456 | ||
457 | if (result < 0) | |
458 | p = &(*p)->rb_left; | |
459 | else if (result > 0) | |
460 | p = &(*p)->rb_right; | |
461 | else | |
462 | return 1; | |
463 | } | |
464 | return 0; | |
465 | } | |
466 | ||
a2c8d27e FM |
467 | static int add_all_parents(struct btrfs_backref_walk_ctx *ctx, |
468 | struct btrfs_root *root, struct btrfs_path *path, | |
ed58f2e6 | 469 | struct ulist *parents, |
470 | struct preftrees *preftrees, struct prelim_ref *ref, | |
a2c8d27e | 471 | int level) |
8da6d581 | 472 | { |
69bca40d AB |
473 | int ret = 0; |
474 | int slot; | |
475 | struct extent_buffer *eb; | |
476 | struct btrfs_key key; | |
7ef81ac8 | 477 | struct btrfs_key *key_for_search = &ref->key_for_search; |
8da6d581 | 478 | struct btrfs_file_extent_item *fi; |
ed8c4913 | 479 | struct extent_inode_elem *eie = NULL, *old = NULL; |
8da6d581 | 480 | u64 disk_byte; |
7ef81ac8 JB |
481 | u64 wanted_disk_byte = ref->wanted_disk_byte; |
482 | u64 count = 0; | |
7ac8b88e | 483 | u64 data_offset; |
560840af | 484 | u8 type; |
8da6d581 | 485 | |
69bca40d AB |
486 | if (level != 0) { |
487 | eb = path->nodes[level]; | |
488 | ret = ulist_add(parents, eb->start, 0, GFP_NOFS); | |
3301958b JS |
489 | if (ret < 0) |
490 | return ret; | |
8da6d581 | 491 | return 0; |
69bca40d | 492 | } |
8da6d581 JS |
493 | |
494 | /* | |
ed58f2e6 | 495 | * 1. We normally enter this function with the path already pointing to |
496 | * the first item to check. But sometimes, we may enter it with | |
497 | * slot == nritems. | |
498 | * 2. We are searching for normal backref but bytenr of this leaf | |
499 | * matches shared data backref | |
cfc0eed0 | 500 | * 3. The leaf owner is not equal to the root we are searching |
501 | * | |
ed58f2e6 | 502 | * For these cases, go to the next leaf before we continue. |
8da6d581 | 503 | */ |
ed58f2e6 | 504 | eb = path->nodes[0]; |
505 | if (path->slots[0] >= btrfs_header_nritems(eb) || | |
cfc0eed0 | 506 | is_shared_data_backref(preftrees, eb->start) || |
507 | ref->root_id != btrfs_header_owner(eb)) { | |
a2c8d27e | 508 | if (ctx->time_seq == BTRFS_SEQ_LAST) |
21633fc6 QW |
509 | ret = btrfs_next_leaf(root, path); |
510 | else | |
a2c8d27e | 511 | ret = btrfs_next_old_leaf(root, path, ctx->time_seq); |
21633fc6 | 512 | } |
8da6d581 | 513 | |
b25b0b87 | 514 | while (!ret && count < ref->count) { |
8da6d581 | 515 | eb = path->nodes[0]; |
69bca40d AB |
516 | slot = path->slots[0]; |
517 | ||
518 | btrfs_item_key_to_cpu(eb, &key, slot); | |
519 | ||
520 | if (key.objectid != key_for_search->objectid || | |
521 | key.type != BTRFS_EXTENT_DATA_KEY) | |
522 | break; | |
523 | ||
ed58f2e6 | 524 | /* |
525 | * We are searching for normal backref but bytenr of this leaf | |
cfc0eed0 | 526 | * matches shared data backref, OR |
527 | * the leaf owner is not equal to the root we are searching for | |
ed58f2e6 | 528 | */ |
cfc0eed0 | 529 | if (slot == 0 && |
530 | (is_shared_data_backref(preftrees, eb->start) || | |
531 | ref->root_id != btrfs_header_owner(eb))) { | |
a2c8d27e | 532 | if (ctx->time_seq == BTRFS_SEQ_LAST) |
ed58f2e6 | 533 | ret = btrfs_next_leaf(root, path); |
534 | else | |
a2c8d27e | 535 | ret = btrfs_next_old_leaf(root, path, ctx->time_seq); |
ed58f2e6 | 536 | continue; |
537 | } | |
69bca40d | 538 | fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); |
560840af BB |
539 | type = btrfs_file_extent_type(eb, fi); |
540 | if (type == BTRFS_FILE_EXTENT_INLINE) | |
541 | goto next; | |
69bca40d | 542 | disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); |
7ac8b88e | 543 | data_offset = btrfs_file_extent_offset(eb, fi); |
69bca40d AB |
544 | |
545 | if (disk_byte == wanted_disk_byte) { | |
546 | eie = NULL; | |
ed8c4913 | 547 | old = NULL; |
7ac8b88e | 548 | if (ref->key_for_search.offset == key.offset - data_offset) |
549 | count++; | |
550 | else | |
551 | goto next; | |
0cad8f14 | 552 | if (!ctx->skip_inode_ref_list) { |
88ffb665 FM |
553 | ret = check_extent_in_eb(ctx, &key, eb, fi, &eie); |
554 | if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || | |
555 | ret < 0) | |
69bca40d AB |
556 | break; |
557 | } | |
ed8c4913 JB |
558 | if (ret > 0) |
559 | goto next; | |
4eb1f66d TI |
560 | ret = ulist_add_merge_ptr(parents, eb->start, |
561 | eie, (void **)&old, GFP_NOFS); | |
ed8c4913 JB |
562 | if (ret < 0) |
563 | break; | |
0cad8f14 | 564 | if (!ret && !ctx->skip_inode_ref_list) { |
ed8c4913 JB |
565 | while (old->next) |
566 | old = old->next; | |
567 | old->next = eie; | |
69bca40d | 568 | } |
f05c4746 | 569 | eie = NULL; |
8da6d581 | 570 | } |
ed8c4913 | 571 | next: |
a2c8d27e | 572 | if (ctx->time_seq == BTRFS_SEQ_LAST) |
21633fc6 QW |
573 | ret = btrfs_next_item(root, path); |
574 | else | |
a2c8d27e | 575 | ret = btrfs_next_old_item(root, path, ctx->time_seq); |
8da6d581 JS |
576 | } |
577 | ||
88ffb665 | 578 | if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0) |
f05c4746 | 579 | free_inode_elem_list(eie); |
88ffb665 FM |
580 | else if (ret > 0) |
581 | ret = 0; | |
582 | ||
69bca40d | 583 | return ret; |
8da6d581 JS |
584 | } |
585 | ||
586 | /* | |
587 | * resolve an indirect backref in the form (root_id, key, level) | |
588 | * to a logical address | |
589 | */ | |
a2c8d27e FM |
590 | static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx, |
591 | struct btrfs_path *path, | |
ed58f2e6 | 592 | struct preftrees *preftrees, |
a2c8d27e | 593 | struct prelim_ref *ref, struct ulist *parents) |
8da6d581 | 594 | { |
8da6d581 | 595 | struct btrfs_root *root; |
8da6d581 JS |
596 | struct extent_buffer *eb; |
597 | int ret = 0; | |
598 | int root_level; | |
599 | int level = ref->level; | |
7ac8b88e | 600 | struct btrfs_key search_key = ref->key_for_search; |
8da6d581 | 601 | |
49d11bea JB |
602 | /* |
603 | * If we're search_commit_root we could possibly be holding locks on | |
604 | * other tree nodes. This happens when qgroups does backref walks when | |
605 | * adding new delayed refs. To deal with this we need to look in cache | |
606 | * for the root, and if we don't find it then we need to search the | |
607 | * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage | |
608 | * here. | |
609 | */ | |
610 | if (path->search_commit_root) | |
a2c8d27e | 611 | root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id); |
49d11bea | 612 | else |
a2c8d27e | 613 | root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false); |
8da6d581 JS |
614 | if (IS_ERR(root)) { |
615 | ret = PTR_ERR(root); | |
9326f76f JB |
616 | goto out_free; |
617 | } | |
618 | ||
39dba873 JB |
619 | if (!path->search_commit_root && |
620 | test_bit(BTRFS_ROOT_DELETING, &root->state)) { | |
621 | ret = -ENOENT; | |
622 | goto out; | |
623 | } | |
624 | ||
a2c8d27e | 625 | if (btrfs_is_testing(ctx->fs_info)) { |
d9ee522b JB |
626 | ret = -ENOENT; |
627 | goto out; | |
628 | } | |
629 | ||
9e351cc8 JB |
630 | if (path->search_commit_root) |
631 | root_level = btrfs_header_level(root->commit_root); | |
a2c8d27e | 632 | else if (ctx->time_seq == BTRFS_SEQ_LAST) |
21633fc6 | 633 | root_level = btrfs_header_level(root->node); |
9e351cc8 | 634 | else |
a2c8d27e | 635 | root_level = btrfs_old_root_level(root, ctx->time_seq); |
8da6d581 | 636 | |
c75e8394 | 637 | if (root_level + 1 == level) |
8da6d581 JS |
638 | goto out; |
639 | ||
7ac8b88e | 640 | /* |
641 | * We can often find data backrefs with an offset that is too large | |
642 | * (>= LLONG_MAX, maximum allowed file offset) due to underflows when | |
643 | * subtracting a file's offset with the data offset of its | |
644 | * corresponding extent data item. This can happen for example in the | |
645 | * clone ioctl. | |
646 | * | |
647 | * So if we detect such case we set the search key's offset to zero to | |
648 | * make sure we will find the matching file extent item at | |
649 | * add_all_parents(), otherwise we will miss it because the offset | |
650 | * taken form the backref is much larger then the offset of the file | |
651 | * extent item. This can make us scan a very large number of file | |
652 | * extent items, but at least it will not make us miss any. | |
653 | * | |
654 | * This is an ugly workaround for a behaviour that should have never | |
655 | * existed, but it does and a fix for the clone ioctl would touch a lot | |
656 | * of places, cause backwards incompatibility and would not fix the | |
657 | * problem for extents cloned with older kernels. | |
658 | */ | |
659 | if (search_key.type == BTRFS_EXTENT_DATA_KEY && | |
660 | search_key.offset >= LLONG_MAX) | |
661 | search_key.offset = 0; | |
8da6d581 | 662 | path->lowest_level = level; |
a2c8d27e | 663 | if (ctx->time_seq == BTRFS_SEQ_LAST) |
7ac8b88e | 664 | ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); |
21633fc6 | 665 | else |
a2c8d27e | 666 | ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq); |
538f72cd | 667 | |
a2c8d27e | 668 | btrfs_debug(ctx->fs_info, |
ab8d0fc4 | 669 | "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)", |
c1c9ff7c GU |
670 | ref->root_id, level, ref->count, ret, |
671 | ref->key_for_search.objectid, ref->key_for_search.type, | |
672 | ref->key_for_search.offset); | |
8da6d581 JS |
673 | if (ret < 0) |
674 | goto out; | |
675 | ||
676 | eb = path->nodes[level]; | |
9345457f | 677 | while (!eb) { |
fae7f21c | 678 | if (WARN_ON(!level)) { |
9345457f JS |
679 | ret = 1; |
680 | goto out; | |
681 | } | |
682 | level--; | |
683 | eb = path->nodes[level]; | |
8da6d581 JS |
684 | } |
685 | ||
a2c8d27e | 686 | ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level); |
8da6d581 | 687 | out: |
00246528 | 688 | btrfs_put_root(root); |
9326f76f | 689 | out_free: |
da61d31a JB |
690 | path->lowest_level = 0; |
691 | btrfs_release_path(path); | |
8da6d581 JS |
692 | return ret; |
693 | } | |
694 | ||
4dae077a JM |
695 | static struct extent_inode_elem * |
696 | unode_aux_to_inode_list(struct ulist_node *node) | |
697 | { | |
698 | if (!node) | |
699 | return NULL; | |
700 | return (struct extent_inode_elem *)(uintptr_t)node->aux; | |
701 | } | |
702 | ||
5614dc3a FM |
703 | static void free_leaf_list(struct ulist *ulist) |
704 | { | |
705 | struct ulist_node *node; | |
706 | struct ulist_iterator uiter; | |
707 | ||
708 | ULIST_ITER_INIT(&uiter); | |
709 | while ((node = ulist_next(ulist, &uiter))) | |
710 | free_inode_elem_list(unode_aux_to_inode_list(node)); | |
711 | ||
712 | ulist_free(ulist); | |
713 | } | |
714 | ||
8da6d581 | 715 | /* |
52042d8e | 716 | * We maintain three separate rbtrees: one for direct refs, one for |
86d5f994 EN |
717 | * indirect refs which have a key, and one for indirect refs which do not |
718 | * have a key. Each tree does merge on insertion. | |
719 | * | |
720 | * Once all of the references are located, we iterate over the tree of | |
721 | * indirect refs with missing keys. An appropriate key is located and | |
722 | * the ref is moved onto the tree for indirect refs. After all missing | |
723 | * keys are thus located, we iterate over the indirect ref tree, resolve | |
724 | * each reference, and then insert the resolved reference onto the | |
725 | * direct tree (merging there too). | |
726 | * | |
727 | * New backrefs (i.e., for parent nodes) are added to the appropriate | |
728 | * rbtree as they are encountered. The new backrefs are subsequently | |
729 | * resolved as above. | |
8da6d581 | 730 | */ |
a2c8d27e FM |
731 | static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx, |
732 | struct btrfs_path *path, | |
86d5f994 | 733 | struct preftrees *preftrees, |
6ce6ba53 | 734 | struct share_check *sc) |
8da6d581 JS |
735 | { |
736 | int err; | |
737 | int ret = 0; | |
8da6d581 JS |
738 | struct ulist *parents; |
739 | struct ulist_node *node; | |
cd1b413c | 740 | struct ulist_iterator uiter; |
86d5f994 | 741 | struct rb_node *rnode; |
8da6d581 JS |
742 | |
743 | parents = ulist_alloc(GFP_NOFS); | |
744 | if (!parents) | |
745 | return -ENOMEM; | |
746 | ||
747 | /* | |
86d5f994 EN |
748 | * We could trade memory usage for performance here by iterating |
749 | * the tree, allocating new refs for each insertion, and then | |
750 | * freeing the entire indirect tree when we're done. In some test | |
751 | * cases, the tree can grow quite large (~200k objects). | |
8da6d581 | 752 | */ |
ecf160b4 | 753 | while ((rnode = rb_first_cached(&preftrees->indirect.root))) { |
86d5f994 EN |
754 | struct prelim_ref *ref; |
755 | ||
756 | ref = rb_entry(rnode, struct prelim_ref, rbnode); | |
757 | if (WARN(ref->parent, | |
758 | "BUG: direct ref found in indirect tree")) { | |
759 | ret = -EINVAL; | |
760 | goto out; | |
761 | } | |
762 | ||
ecf160b4 | 763 | rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); |
6c336b21 | 764 | preftrees->indirect.count--; |
86d5f994 EN |
765 | |
766 | if (ref->count == 0) { | |
767 | free_pref(ref); | |
8da6d581 | 768 | continue; |
86d5f994 EN |
769 | } |
770 | ||
e094f480 | 771 | if (sc && ref->root_id != btrfs_root_id(sc->root)) { |
86d5f994 | 772 | free_pref(ref); |
dc046b10 JB |
773 | ret = BACKREF_FOUND_SHARED; |
774 | goto out; | |
775 | } | |
a2c8d27e | 776 | err = resolve_indirect_ref(ctx, path, preftrees, ref, parents); |
95def2ed WS |
777 | /* |
778 | * we can only tolerate ENOENT,otherwise,we should catch error | |
779 | * and return directly. | |
780 | */ | |
781 | if (err == -ENOENT) { | |
a2c8d27e | 782 | prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, |
3ec4d323 | 783 | NULL); |
8da6d581 | 784 | continue; |
95def2ed | 785 | } else if (err) { |
86d5f994 | 786 | free_pref(ref); |
95def2ed WS |
787 | ret = err; |
788 | goto out; | |
789 | } | |
8da6d581 JS |
790 | |
791 | /* we put the first parent into the ref at hand */ | |
cd1b413c JS |
792 | ULIST_ITER_INIT(&uiter); |
793 | node = ulist_next(parents, &uiter); | |
8da6d581 | 794 | ref->parent = node ? node->val : 0; |
4dae077a | 795 | ref->inode_list = unode_aux_to_inode_list(node); |
8da6d581 | 796 | |
86d5f994 | 797 | /* Add a prelim_ref(s) for any other parent(s). */ |
cd1b413c | 798 | while ((node = ulist_next(parents, &uiter))) { |
86d5f994 EN |
799 | struct prelim_ref *new_ref; |
800 | ||
b9e9a6cb WS |
801 | new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache, |
802 | GFP_NOFS); | |
8da6d581 | 803 | if (!new_ref) { |
86d5f994 | 804 | free_pref(ref); |
8da6d581 | 805 | ret = -ENOMEM; |
e36902d4 | 806 | goto out; |
8da6d581 JS |
807 | } |
808 | memcpy(new_ref, ref, sizeof(*ref)); | |
809 | new_ref->parent = node->val; | |
4dae077a | 810 | new_ref->inode_list = unode_aux_to_inode_list(node); |
a2c8d27e | 811 | prelim_ref_insert(ctx->fs_info, &preftrees->direct, |
3ec4d323 | 812 | new_ref, NULL); |
8da6d581 | 813 | } |
86d5f994 | 814 | |
3ec4d323 | 815 | /* |
52042d8e | 816 | * Now it's a direct ref, put it in the direct tree. We must |
3ec4d323 EN |
817 | * do this last because the ref could be merged/freed here. |
818 | */ | |
a2c8d27e | 819 | prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL); |
86d5f994 | 820 | |
8da6d581 | 821 | ulist_reinit(parents); |
9dd14fd6 | 822 | cond_resched(); |
8da6d581 | 823 | } |
e36902d4 | 824 | out: |
5614dc3a FM |
825 | /* |
826 | * We may have inode lists attached to refs in the parents ulist, so we | |
827 | * must free them before freeing the ulist and its refs. | |
828 | */ | |
829 | free_leaf_list(parents); | |
8da6d581 JS |
830 | return ret; |
831 | } | |
832 | ||
d5c88b73 JS |
833 | /* |
834 | * read tree blocks and add keys where required. | |
835 | */ | |
e0c476b1 | 836 | static int add_missing_keys(struct btrfs_fs_info *fs_info, |
38e3eebf | 837 | struct preftrees *preftrees, bool lock) |
d5c88b73 | 838 | { |
e0c476b1 | 839 | struct prelim_ref *ref; |
d5c88b73 | 840 | struct extent_buffer *eb; |
86d5f994 EN |
841 | struct preftree *tree = &preftrees->indirect_missing_keys; |
842 | struct rb_node *node; | |
d5c88b73 | 843 | |
ecf160b4 | 844 | while ((node = rb_first_cached(&tree->root))) { |
789d6a3a QW |
845 | struct btrfs_tree_parent_check check = { 0 }; |
846 | ||
86d5f994 | 847 | ref = rb_entry(node, struct prelim_ref, rbnode); |
ecf160b4 | 848 | rb_erase_cached(node, &tree->root); |
86d5f994 EN |
849 | |
850 | BUG_ON(ref->parent); /* should not be a direct ref */ | |
851 | BUG_ON(ref->key_for_search.type); | |
d5c88b73 | 852 | BUG_ON(!ref->wanted_disk_byte); |
86d5f994 | 853 | |
789d6a3a QW |
854 | check.level = ref->level - 1; |
855 | check.owner_root = ref->root_id; | |
856 | ||
857 | eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check); | |
64c043de | 858 | if (IS_ERR(eb)) { |
86d5f994 | 859 | free_pref(ref); |
64c043de | 860 | return PTR_ERR(eb); |
4eb150d6 QW |
861 | } |
862 | if (!extent_buffer_uptodate(eb)) { | |
86d5f994 | 863 | free_pref(ref); |
416bc658 JB |
864 | free_extent_buffer(eb); |
865 | return -EIO; | |
866 | } | |
4eb150d6 | 867 | |
38e3eebf JB |
868 | if (lock) |
869 | btrfs_tree_read_lock(eb); | |
d5c88b73 JS |
870 | if (btrfs_header_level(eb) == 0) |
871 | btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); | |
872 | else | |
873 | btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); | |
38e3eebf JB |
874 | if (lock) |
875 | btrfs_tree_read_unlock(eb); | |
d5c88b73 | 876 | free_extent_buffer(eb); |
3ec4d323 | 877 | prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); |
9dd14fd6 | 878 | cond_resched(); |
d5c88b73 JS |
879 | } |
880 | return 0; | |
881 | } | |
882 | ||
8da6d581 JS |
883 | /* |
884 | * add all currently queued delayed refs from this head whose seq nr is | |
885 | * smaller or equal that seq to the list | |
886 | */ | |
00142756 JM |
887 | static int add_delayed_refs(const struct btrfs_fs_info *fs_info, |
888 | struct btrfs_delayed_ref_head *head, u64 seq, | |
b25b0b87 | 889 | struct preftrees *preftrees, struct share_check *sc) |
8da6d581 | 890 | { |
c6fc2454 | 891 | struct btrfs_delayed_ref_node *node; |
d5c88b73 | 892 | struct btrfs_key key; |
0e0adbcf | 893 | struct rb_node *n; |
01747e92 | 894 | int count; |
b1375d64 | 895 | int ret = 0; |
8da6d581 | 896 | |
d7df2c79 | 897 | spin_lock(&head->lock); |
e3d03965 | 898 | for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { |
0e0adbcf JB |
899 | node = rb_entry(n, struct btrfs_delayed_ref_node, |
900 | ref_node); | |
8da6d581 JS |
901 | if (node->seq > seq) |
902 | continue; | |
903 | ||
904 | switch (node->action) { | |
905 | case BTRFS_ADD_DELAYED_EXTENT: | |
906 | case BTRFS_UPDATE_DELAYED_HEAD: | |
907 | WARN_ON(1); | |
908 | continue; | |
909 | case BTRFS_ADD_DELAYED_REF: | |
01747e92 | 910 | count = node->ref_mod; |
8da6d581 JS |
911 | break; |
912 | case BTRFS_DROP_DELAYED_REF: | |
01747e92 | 913 | count = node->ref_mod * -1; |
8da6d581 JS |
914 | break; |
915 | default: | |
290342f6 | 916 | BUG(); |
8da6d581 JS |
917 | } |
918 | switch (node->type) { | |
919 | case BTRFS_TREE_BLOCK_REF_KEY: { | |
86d5f994 | 920 | /* NORMAL INDIRECT METADATA backref */ |
943553ef | 921 | struct btrfs_key *key_ptr = NULL; |
efc7d5db JB |
922 | /* The owner of a tree block ref is the level. */ |
923 | int level = btrfs_delayed_ref_owner(node); | |
943553ef FM |
924 | |
925 | if (head->extent_op && head->extent_op->update_key) { | |
926 | btrfs_disk_key_to_cpu(&key, &head->extent_op->key); | |
927 | key_ptr = &key; | |
928 | } | |
8da6d581 | 929 | |
cf4f0432 | 930 | ret = add_indirect_ref(fs_info, preftrees, node->ref_root, |
efc7d5db JB |
931 | key_ptr, level + 1, node->bytenr, |
932 | count, sc, GFP_ATOMIC); | |
8da6d581 JS |
933 | break; |
934 | } | |
935 | case BTRFS_SHARED_BLOCK_REF_KEY: { | |
efc7d5db JB |
936 | /* |
937 | * SHARED DIRECT METADATA backref | |
938 | * | |
939 | * The owner of a tree block ref is the level. | |
940 | */ | |
941 | int level = btrfs_delayed_ref_owner(node); | |
86d5f994 | 942 | |
efc7d5db | 943 | ret = add_direct_ref(fs_info, preftrees, level + 1, |
cf4f0432 | 944 | node->parent, node->bytenr, count, |
3ec4d323 | 945 | sc, GFP_ATOMIC); |
8da6d581 JS |
946 | break; |
947 | } | |
948 | case BTRFS_EXTENT_DATA_REF_KEY: { | |
86d5f994 | 949 | /* NORMAL INDIRECT DATA backref */ |
44cc2e38 | 950 | key.objectid = btrfs_delayed_ref_owner(node); |
8da6d581 | 951 | key.type = BTRFS_EXTENT_DATA_KEY; |
44cc2e38 | 952 | key.offset = btrfs_delayed_ref_offset(node); |
dc046b10 JB |
953 | |
954 | /* | |
4fc7b572 FM |
955 | * If we have a share check context and a reference for |
956 | * another inode, we can't exit immediately. This is | |
957 | * because even if this is a BTRFS_ADD_DELAYED_REF | |
958 | * reference we may find next a BTRFS_DROP_DELAYED_REF | |
959 | * which cancels out this ADD reference. | |
960 | * | |
961 | * If this is a DROP reference and there was no previous | |
962 | * ADD reference, then we need to signal that when we | |
963 | * process references from the extent tree (through | |
964 | * add_inline_refs() and add_keyed_refs()), we should | |
965 | * not exit early if we find a reference for another | |
966 | * inode, because one of the delayed DROP references | |
967 | * may cancel that reference in the extent tree. | |
dc046b10 | 968 | */ |
4fc7b572 FM |
969 | if (sc && count < 0) |
970 | sc->have_delayed_delete_refs = true; | |
dc046b10 | 971 | |
cf4f0432 | 972 | ret = add_indirect_ref(fs_info, preftrees, node->ref_root, |
01747e92 EN |
973 | &key, 0, node->bytenr, count, sc, |
974 | GFP_ATOMIC); | |
8da6d581 JS |
975 | break; |
976 | } | |
977 | case BTRFS_SHARED_DATA_REF_KEY: { | |
86d5f994 | 978 | /* SHARED DIRECT FULL backref */ |
cf4f0432 | 979 | ret = add_direct_ref(fs_info, preftrees, 0, node->parent, |
01747e92 EN |
980 | node->bytenr, count, sc, |
981 | GFP_ATOMIC); | |
8da6d581 JS |
982 | break; |
983 | } | |
984 | default: | |
985 | WARN_ON(1); | |
986 | } | |
3ec4d323 EN |
987 | /* |
988 | * We must ignore BACKREF_FOUND_SHARED until all delayed | |
989 | * refs have been checked. | |
990 | */ | |
991 | if (ret && (ret != BACKREF_FOUND_SHARED)) | |
d7df2c79 | 992 | break; |
8da6d581 | 993 | } |
3ec4d323 EN |
994 | if (!ret) |
995 | ret = extent_is_shared(sc); | |
4fc7b572 | 996 | |
d7df2c79 JB |
997 | spin_unlock(&head->lock); |
998 | return ret; | |
8da6d581 JS |
999 | } |
1000 | ||
1001 | /* | |
1002 | * add all inline backrefs for bytenr to the list | |
3ec4d323 EN |
1003 | * |
1004 | * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED. | |
8da6d581 | 1005 | */ |
f73853c7 FM |
1006 | static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx, |
1007 | struct btrfs_path *path, | |
86d5f994 | 1008 | int *info_level, struct preftrees *preftrees, |
b25b0b87 | 1009 | struct share_check *sc) |
8da6d581 | 1010 | { |
b1375d64 | 1011 | int ret = 0; |
8da6d581 JS |
1012 | int slot; |
1013 | struct extent_buffer *leaf; | |
1014 | struct btrfs_key key; | |
261c84b6 | 1015 | struct btrfs_key found_key; |
8da6d581 JS |
1016 | unsigned long ptr; |
1017 | unsigned long end; | |
1018 | struct btrfs_extent_item *ei; | |
1019 | u64 flags; | |
1020 | u64 item_size; | |
1021 | ||
1022 | /* | |
1023 | * enumerate all inline refs | |
1024 | */ | |
1025 | leaf = path->nodes[0]; | |
dadcaf78 | 1026 | slot = path->slots[0]; |
8da6d581 | 1027 | |
3212fa14 | 1028 | item_size = btrfs_item_size(leaf, slot); |
8da6d581 | 1029 | ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); |
f73853c7 FM |
1030 | |
1031 | if (ctx->check_extent_item) { | |
1032 | ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx); | |
1033 | if (ret) | |
1034 | return ret; | |
1035 | } | |
1036 | ||
8da6d581 | 1037 | flags = btrfs_extent_flags(leaf, ei); |
261c84b6 | 1038 | btrfs_item_key_to_cpu(leaf, &found_key, slot); |
8da6d581 JS |
1039 | |
1040 | ptr = (unsigned long)(ei + 1); | |
1041 | end = (unsigned long)ei + item_size; | |
1042 | ||
261c84b6 JB |
1043 | if (found_key.type == BTRFS_EXTENT_ITEM_KEY && |
1044 | flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | |
8da6d581 | 1045 | struct btrfs_tree_block_info *info; |
8da6d581 JS |
1046 | |
1047 | info = (struct btrfs_tree_block_info *)ptr; | |
1048 | *info_level = btrfs_tree_block_level(leaf, info); | |
8da6d581 JS |
1049 | ptr += sizeof(struct btrfs_tree_block_info); |
1050 | BUG_ON(ptr > end); | |
261c84b6 JB |
1051 | } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) { |
1052 | *info_level = found_key.offset; | |
8da6d581 JS |
1053 | } else { |
1054 | BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); | |
1055 | } | |
1056 | ||
1057 | while (ptr < end) { | |
1058 | struct btrfs_extent_inline_ref *iref; | |
1059 | u64 offset; | |
1060 | int type; | |
1061 | ||
1062 | iref = (struct btrfs_extent_inline_ref *)ptr; | |
3de28d57 LB |
1063 | type = btrfs_get_extent_inline_ref_type(leaf, iref, |
1064 | BTRFS_REF_TYPE_ANY); | |
1065 | if (type == BTRFS_REF_TYPE_INVALID) | |
af431dcb | 1066 | return -EUCLEAN; |
3de28d57 | 1067 | |
8da6d581 JS |
1068 | offset = btrfs_extent_inline_ref_offset(leaf, iref); |
1069 | ||
1070 | switch (type) { | |
1071 | case BTRFS_SHARED_BLOCK_REF_KEY: | |
f73853c7 | 1072 | ret = add_direct_ref(ctx->fs_info, preftrees, |
00142756 | 1073 | *info_level + 1, offset, |
f73853c7 | 1074 | ctx->bytenr, 1, NULL, GFP_NOFS); |
8da6d581 JS |
1075 | break; |
1076 | case BTRFS_SHARED_DATA_REF_KEY: { | |
1077 | struct btrfs_shared_data_ref *sdref; | |
1078 | int count; | |
1079 | ||
1080 | sdref = (struct btrfs_shared_data_ref *)(iref + 1); | |
1081 | count = btrfs_shared_data_ref_count(leaf, sdref); | |
86d5f994 | 1082 | |
f73853c7 FM |
1083 | ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset, |
1084 | ctx->bytenr, count, sc, GFP_NOFS); | |
8da6d581 JS |
1085 | break; |
1086 | } | |
1087 | case BTRFS_TREE_BLOCK_REF_KEY: | |
f73853c7 | 1088 | ret = add_indirect_ref(ctx->fs_info, preftrees, offset, |
00142756 | 1089 | NULL, *info_level + 1, |
f73853c7 | 1090 | ctx->bytenr, 1, NULL, GFP_NOFS); |
8da6d581 JS |
1091 | break; |
1092 | case BTRFS_EXTENT_DATA_REF_KEY: { | |
1093 | struct btrfs_extent_data_ref *dref; | |
1094 | int count; | |
1095 | u64 root; | |
1096 | ||
1097 | dref = (struct btrfs_extent_data_ref *)(&iref->offset); | |
1098 | count = btrfs_extent_data_ref_count(leaf, dref); | |
1099 | key.objectid = btrfs_extent_data_ref_objectid(leaf, | |
1100 | dref); | |
1101 | key.type = BTRFS_EXTENT_DATA_KEY; | |
1102 | key.offset = btrfs_extent_data_ref_offset(leaf, dref); | |
dc046b10 | 1103 | |
a0a5472a | 1104 | if (sc && key.objectid != sc->inum && |
4fc7b572 | 1105 | !sc->have_delayed_delete_refs) { |
dc046b10 JB |
1106 | ret = BACKREF_FOUND_SHARED; |
1107 | break; | |
1108 | } | |
1109 | ||
8da6d581 | 1110 | root = btrfs_extent_data_ref_root(leaf, dref); |
86d5f994 | 1111 | |
adf02418 FM |
1112 | if (!ctx->skip_data_ref || |
1113 | !ctx->skip_data_ref(root, key.objectid, key.offset, | |
1114 | ctx->user_ctx)) | |
1115 | ret = add_indirect_ref(ctx->fs_info, preftrees, | |
1116 | root, &key, 0, ctx->bytenr, | |
1117 | count, sc, GFP_NOFS); | |
8da6d581 JS |
1118 | break; |
1119 | } | |
d9a620f7 BB |
1120 | case BTRFS_EXTENT_OWNER_REF_KEY: |
1121 | ASSERT(btrfs_fs_incompat(ctx->fs_info, SIMPLE_QUOTA)); | |
1122 | break; | |
8da6d581 JS |
1123 | default: |
1124 | WARN_ON(1); | |
1125 | } | |
1149ab6b WS |
1126 | if (ret) |
1127 | return ret; | |
8da6d581 JS |
1128 | ptr += btrfs_extent_inline_ref_size(type); |
1129 | } | |
1130 | ||
1131 | return 0; | |
1132 | } | |
1133 | ||
1134 | /* | |
1135 | * add all non-inline backrefs for bytenr to the list | |
3ec4d323 EN |
1136 | * |
1137 | * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED. | |
8da6d581 | 1138 | */ |
adf02418 FM |
1139 | static int add_keyed_refs(struct btrfs_backref_walk_ctx *ctx, |
1140 | struct btrfs_root *extent_root, | |
1141 | struct btrfs_path *path, | |
86d5f994 | 1142 | int info_level, struct preftrees *preftrees, |
3ec4d323 | 1143 | struct share_check *sc) |
8da6d581 | 1144 | { |
98cc4222 | 1145 | struct btrfs_fs_info *fs_info = extent_root->fs_info; |
8da6d581 JS |
1146 | int ret; |
1147 | int slot; | |
1148 | struct extent_buffer *leaf; | |
1149 | struct btrfs_key key; | |
1150 | ||
1151 | while (1) { | |
1152 | ret = btrfs_next_item(extent_root, path); | |
1153 | if (ret < 0) | |
1154 | break; | |
1155 | if (ret) { | |
1156 | ret = 0; | |
1157 | break; | |
1158 | } | |
1159 | ||
1160 | slot = path->slots[0]; | |
1161 | leaf = path->nodes[0]; | |
1162 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
1163 | ||
adf02418 | 1164 | if (key.objectid != ctx->bytenr) |
8da6d581 JS |
1165 | break; |
1166 | if (key.type < BTRFS_TREE_BLOCK_REF_KEY) | |
1167 | continue; | |
1168 | if (key.type > BTRFS_SHARED_DATA_REF_KEY) | |
1169 | break; | |
1170 | ||
1171 | switch (key.type) { | |
1172 | case BTRFS_SHARED_BLOCK_REF_KEY: | |
86d5f994 | 1173 | /* SHARED DIRECT METADATA backref */ |
00142756 JM |
1174 | ret = add_direct_ref(fs_info, preftrees, |
1175 | info_level + 1, key.offset, | |
adf02418 | 1176 | ctx->bytenr, 1, NULL, GFP_NOFS); |
8da6d581 JS |
1177 | break; |
1178 | case BTRFS_SHARED_DATA_REF_KEY: { | |
86d5f994 | 1179 | /* SHARED DIRECT FULL backref */ |
8da6d581 JS |
1180 | struct btrfs_shared_data_ref *sdref; |
1181 | int count; | |
1182 | ||
1183 | sdref = btrfs_item_ptr(leaf, slot, | |
1184 | struct btrfs_shared_data_ref); | |
1185 | count = btrfs_shared_data_ref_count(leaf, sdref); | |
00142756 | 1186 | ret = add_direct_ref(fs_info, preftrees, 0, |
adf02418 | 1187 | key.offset, ctx->bytenr, count, |
3ec4d323 | 1188 | sc, GFP_NOFS); |
8da6d581 JS |
1189 | break; |
1190 | } | |
1191 | case BTRFS_TREE_BLOCK_REF_KEY: | |
86d5f994 | 1192 | /* NORMAL INDIRECT METADATA backref */ |
00142756 | 1193 | ret = add_indirect_ref(fs_info, preftrees, key.offset, |
adf02418 | 1194 | NULL, info_level + 1, ctx->bytenr, |
3ec4d323 | 1195 | 1, NULL, GFP_NOFS); |
8da6d581 JS |
1196 | break; |
1197 | case BTRFS_EXTENT_DATA_REF_KEY: { | |
86d5f994 | 1198 | /* NORMAL INDIRECT DATA backref */ |
8da6d581 JS |
1199 | struct btrfs_extent_data_ref *dref; |
1200 | int count; | |
1201 | u64 root; | |
1202 | ||
1203 | dref = btrfs_item_ptr(leaf, slot, | |
1204 | struct btrfs_extent_data_ref); | |
1205 | count = btrfs_extent_data_ref_count(leaf, dref); | |
1206 | key.objectid = btrfs_extent_data_ref_objectid(leaf, | |
1207 | dref); | |
1208 | key.type = BTRFS_EXTENT_DATA_KEY; | |
1209 | key.offset = btrfs_extent_data_ref_offset(leaf, dref); | |
dc046b10 | 1210 | |
a0a5472a | 1211 | if (sc && key.objectid != sc->inum && |
4fc7b572 | 1212 | !sc->have_delayed_delete_refs) { |
dc046b10 JB |
1213 | ret = BACKREF_FOUND_SHARED; |
1214 | break; | |
1215 | } | |
1216 | ||
8da6d581 | 1217 | root = btrfs_extent_data_ref_root(leaf, dref); |
adf02418 FM |
1218 | |
1219 | if (!ctx->skip_data_ref || | |
1220 | !ctx->skip_data_ref(root, key.objectid, key.offset, | |
1221 | ctx->user_ctx)) | |
1222 | ret = add_indirect_ref(fs_info, preftrees, root, | |
1223 | &key, 0, ctx->bytenr, | |
1224 | count, sc, GFP_NOFS); | |
8da6d581 JS |
1225 | break; |
1226 | } | |
1227 | default: | |
1228 | WARN_ON(1); | |
1229 | } | |
1149ab6b WS |
1230 | if (ret) |
1231 | return ret; | |
1232 | ||
8da6d581 JS |
1233 | } |
1234 | ||
1235 | return ret; | |
1236 | } | |
1237 | ||
583f4ac5 FM |
1238 | /* |
1239 | * The caller has joined a transaction or is holding a read lock on the | |
1240 | * fs_info->commit_root_sem semaphore, so no need to worry about the root's last | |
1241 | * snapshot field changing while updating or checking the cache. | |
1242 | */ | |
1243 | static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx, | |
1244 | struct btrfs_root *root, | |
1245 | u64 bytenr, int level, bool *is_shared) | |
1246 | { | |
4e4488d4 | 1247 | const struct btrfs_fs_info *fs_info = root->fs_info; |
583f4ac5 FM |
1248 | struct btrfs_backref_shared_cache_entry *entry; |
1249 | ||
4e4488d4 FM |
1250 | if (!current->journal_info) |
1251 | lockdep_assert_held(&fs_info->commit_root_sem); | |
1252 | ||
583f4ac5 FM |
1253 | if (!ctx->use_path_cache) |
1254 | return false; | |
1255 | ||
1256 | if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL)) | |
1257 | return false; | |
1258 | ||
1259 | /* | |
1260 | * Level -1 is used for the data extent, which is not reliable to cache | |
1261 | * because its reference count can increase or decrease without us | |
1262 | * realizing. We cache results only for extent buffers that lead from | |
1263 | * the root node down to the leaf with the file extent item. | |
1264 | */ | |
1265 | ASSERT(level >= 0); | |
1266 | ||
1267 | entry = &ctx->path_cache_entries[level]; | |
1268 | ||
1269 | /* Unused cache entry or being used for some other extent buffer. */ | |
1270 | if (entry->bytenr != bytenr) | |
1271 | return false; | |
1272 | ||
1273 | /* | |
1274 | * We cached a false result, but the last snapshot generation of the | |
1275 | * root changed, so we now have a snapshot. Don't trust the result. | |
1276 | */ | |
1277 | if (!entry->is_shared && | |
1278 | entry->gen != btrfs_root_last_snapshot(&root->root_item)) | |
1279 | return false; | |
1280 | ||
1281 | /* | |
1282 | * If we cached a true result and the last generation used for dropping | |
1283 | * a root changed, we can not trust the result, because the dropped root | |
1284 | * could be a snapshot sharing this extent buffer. | |
1285 | */ | |
1286 | if (entry->is_shared && | |
4e4488d4 | 1287 | entry->gen != btrfs_get_last_root_drop_gen(fs_info)) |
583f4ac5 FM |
1288 | return false; |
1289 | ||
1290 | *is_shared = entry->is_shared; | |
1291 | /* | |
1292 | * If the node at this level is shared, than all nodes below are also | |
1293 | * shared. Currently some of the nodes below may be marked as not shared | |
1294 | * because we have just switched from one leaf to another, and switched | |
1295 | * also other nodes above the leaf and below the current level, so mark | |
1296 | * them as shared. | |
1297 | */ | |
1298 | if (*is_shared) { | |
1299 | for (int i = 0; i < level; i++) { | |
1300 | ctx->path_cache_entries[i].is_shared = true; | |
1301 | ctx->path_cache_entries[i].gen = entry->gen; | |
1302 | } | |
1303 | } | |
1304 | ||
1305 | return true; | |
1306 | } | |
1307 | ||
1308 | /* | |
1309 | * The caller has joined a transaction or is holding a read lock on the | |
1310 | * fs_info->commit_root_sem semaphore, so no need to worry about the root's last | |
1311 | * snapshot field changing while updating or checking the cache. | |
1312 | */ | |
1313 | static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx, | |
1314 | struct btrfs_root *root, | |
1315 | u64 bytenr, int level, bool is_shared) | |
1316 | { | |
4e4488d4 | 1317 | const struct btrfs_fs_info *fs_info = root->fs_info; |
583f4ac5 FM |
1318 | struct btrfs_backref_shared_cache_entry *entry; |
1319 | u64 gen; | |
1320 | ||
4e4488d4 FM |
1321 | if (!current->journal_info) |
1322 | lockdep_assert_held(&fs_info->commit_root_sem); | |
1323 | ||
583f4ac5 FM |
1324 | if (!ctx->use_path_cache) |
1325 | return; | |
1326 | ||
1327 | if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL)) | |
1328 | return; | |
1329 | ||
1330 | /* | |
1331 | * Level -1 is used for the data extent, which is not reliable to cache | |
1332 | * because its reference count can increase or decrease without us | |
1333 | * realizing. We cache results only for extent buffers that lead from | |
1334 | * the root node down to the leaf with the file extent item. | |
1335 | */ | |
1336 | ASSERT(level >= 0); | |
1337 | ||
1338 | if (is_shared) | |
4e4488d4 | 1339 | gen = btrfs_get_last_root_drop_gen(fs_info); |
583f4ac5 FM |
1340 | else |
1341 | gen = btrfs_root_last_snapshot(&root->root_item); | |
1342 | ||
1343 | entry = &ctx->path_cache_entries[level]; | |
1344 | entry->bytenr = bytenr; | |
1345 | entry->is_shared = is_shared; | |
1346 | entry->gen = gen; | |
1347 | ||
1348 | /* | |
1349 | * If we found an extent buffer is shared, set the cache result for all | |
1350 | * extent buffers below it to true. As nodes in the path are COWed, | |
1351 | * their sharedness is moved to their children, and if a leaf is COWed, | |
1352 | * then the sharedness of a data extent becomes direct, the refcount of | |
1353 | * data extent is increased in the extent item at the extent tree. | |
1354 | */ | |
1355 | if (is_shared) { | |
1356 | for (int i = 0; i < level; i++) { | |
1357 | entry = &ctx->path_cache_entries[i]; | |
1358 | entry->is_shared = is_shared; | |
1359 | entry->gen = gen; | |
1360 | } | |
1361 | } | |
1362 | } | |
1363 | ||
8da6d581 JS |
1364 | /* |
1365 | * this adds all existing backrefs (inline backrefs, backrefs and delayed | |
1366 | * refs) for the given bytenr to the refs list, merges duplicates and resolves | |
1367 | * indirect refs to their parent bytenr. | |
1368 | * When roots are found, they're added to the roots list | |
1369 | * | |
a2c8d27e FM |
1370 | * @ctx: Backref walking context object, must be not NULL. |
1371 | * @sc: If !NULL, then immediately return BACKREF_FOUND_SHARED when a | |
1372 | * shared extent is detected. | |
3ec4d323 EN |
1373 | * |
1374 | * Otherwise this returns 0 for success and <0 for an error. | |
1375 | * | |
8da6d581 JS |
1376 | * FIXME some caching might speed things up |
1377 | */ | |
a2c8d27e | 1378 | static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx, |
6ce6ba53 | 1379 | struct share_check *sc) |
8da6d581 | 1380 | { |
a2c8d27e | 1381 | struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr); |
8da6d581 JS |
1382 | struct btrfs_key key; |
1383 | struct btrfs_path *path; | |
8da6d581 | 1384 | struct btrfs_delayed_ref_root *delayed_refs = NULL; |
d3b01064 | 1385 | struct btrfs_delayed_ref_head *head; |
8da6d581 JS |
1386 | int info_level = 0; |
1387 | int ret; | |
e0c476b1 | 1388 | struct prelim_ref *ref; |
86d5f994 | 1389 | struct rb_node *node; |
f05c4746 | 1390 | struct extent_inode_elem *eie = NULL; |
86d5f994 EN |
1391 | struct preftrees preftrees = { |
1392 | .direct = PREFTREE_INIT, | |
1393 | .indirect = PREFTREE_INIT, | |
1394 | .indirect_missing_keys = PREFTREE_INIT | |
1395 | }; | |
8da6d581 | 1396 | |
56f5c199 FM |
1397 | /* Roots ulist is not needed when using a sharedness check context. */ |
1398 | if (sc) | |
a2c8d27e | 1399 | ASSERT(ctx->roots == NULL); |
56f5c199 | 1400 | |
a2c8d27e | 1401 | key.objectid = ctx->bytenr; |
a2c8d27e | 1402 | if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA)) |
261c84b6 JB |
1403 | key.type = BTRFS_METADATA_ITEM_KEY; |
1404 | else | |
1405 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
dba6ae0b | 1406 | key.offset = (u64)-1; |
8da6d581 JS |
1407 | |
1408 | path = btrfs_alloc_path(); | |
1409 | if (!path) | |
1410 | return -ENOMEM; | |
a2c8d27e | 1411 | if (!ctx->trans) { |
da61d31a | 1412 | path->search_commit_root = 1; |
e84752d4 WS |
1413 | path->skip_locking = 1; |
1414 | } | |
8da6d581 | 1415 | |
a2c8d27e | 1416 | if (ctx->time_seq == BTRFS_SEQ_LAST) |
21633fc6 QW |
1417 | path->skip_locking = 1; |
1418 | ||
8da6d581 | 1419 | again: |
d3b01064 LZ |
1420 | head = NULL; |
1421 | ||
98cc4222 | 1422 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
8da6d581 JS |
1423 | if (ret < 0) |
1424 | goto out; | |
fcba0120 | 1425 | if (ret == 0) { |
5b957989 DS |
1426 | /* |
1427 | * Key with offset -1 found, there would have to exist an extent | |
1428 | * item with such offset, but this is out of the valid range. | |
1429 | */ | |
fcba0120 JB |
1430 | ret = -EUCLEAN; |
1431 | goto out; | |
1432 | } | |
8da6d581 | 1433 | |
a2c8d27e FM |
1434 | if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) && |
1435 | ctx->time_seq != BTRFS_SEQ_LAST) { | |
7a3ae2f8 | 1436 | /* |
9665ebd5 JB |
1437 | * We have a specific time_seq we care about and trans which |
1438 | * means we have the path lock, we need to grab the ref head and | |
1439 | * lock it so we have a consistent view of the refs at the given | |
1440 | * time. | |
7a3ae2f8 | 1441 | */ |
a2c8d27e | 1442 | delayed_refs = &ctx->trans->transaction->delayed_refs; |
7a3ae2f8 | 1443 | spin_lock(&delayed_refs->lock); |
765f8289 FM |
1444 | head = btrfs_find_delayed_ref_head(ctx->fs_info, delayed_refs, |
1445 | ctx->bytenr); | |
7a3ae2f8 JS |
1446 | if (head) { |
1447 | if (!mutex_trylock(&head->mutex)) { | |
d278850e | 1448 | refcount_inc(&head->refs); |
7a3ae2f8 JS |
1449 | spin_unlock(&delayed_refs->lock); |
1450 | ||
1451 | btrfs_release_path(path); | |
1452 | ||
1453 | /* | |
1454 | * Mutex was contended, block until it's | |
1455 | * released and try again | |
1456 | */ | |
1457 | mutex_lock(&head->mutex); | |
1458 | mutex_unlock(&head->mutex); | |
d278850e | 1459 | btrfs_put_delayed_ref_head(head); |
7a3ae2f8 JS |
1460 | goto again; |
1461 | } | |
d7df2c79 | 1462 | spin_unlock(&delayed_refs->lock); |
a2c8d27e | 1463 | ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq, |
b25b0b87 | 1464 | &preftrees, sc); |
155725c9 | 1465 | mutex_unlock(&head->mutex); |
d7df2c79 | 1466 | if (ret) |
7a3ae2f8 | 1467 | goto out; |
d7df2c79 JB |
1468 | } else { |
1469 | spin_unlock(&delayed_refs->lock); | |
d3b01064 | 1470 | } |
8da6d581 | 1471 | } |
8da6d581 JS |
1472 | |
1473 | if (path->slots[0]) { | |
1474 | struct extent_buffer *leaf; | |
1475 | int slot; | |
1476 | ||
dadcaf78 | 1477 | path->slots[0]--; |
8da6d581 | 1478 | leaf = path->nodes[0]; |
dadcaf78 | 1479 | slot = path->slots[0]; |
8da6d581 | 1480 | btrfs_item_key_to_cpu(leaf, &key, slot); |
a2c8d27e | 1481 | if (key.objectid == ctx->bytenr && |
261c84b6 JB |
1482 | (key.type == BTRFS_EXTENT_ITEM_KEY || |
1483 | key.type == BTRFS_METADATA_ITEM_KEY)) { | |
f73853c7 FM |
1484 | ret = add_inline_refs(ctx, path, &info_level, |
1485 | &preftrees, sc); | |
8da6d581 JS |
1486 | if (ret) |
1487 | goto out; | |
adf02418 | 1488 | ret = add_keyed_refs(ctx, root, path, info_level, |
3ec4d323 | 1489 | &preftrees, sc); |
8da6d581 JS |
1490 | if (ret) |
1491 | goto out; | |
1492 | } | |
1493 | } | |
8da6d581 | 1494 | |
56f5c199 FM |
1495 | /* |
1496 | * If we have a share context and we reached here, it means the extent | |
1497 | * is not directly shared (no multiple reference items for it), | |
1498 | * otherwise we would have exited earlier with a return value of | |
1499 | * BACKREF_FOUND_SHARED after processing delayed references or while | |
1500 | * processing inline or keyed references from the extent tree. | |
1501 | * The extent may however be indirectly shared through shared subtrees | |
1502 | * as a result from creating snapshots, so we determine below what is | |
1503 | * its parent node, in case we are dealing with a metadata extent, or | |
1504 | * what's the leaf (or leaves), from a fs tree, that has a file extent | |
1505 | * item pointing to it in case we are dealing with a data extent. | |
1506 | */ | |
1507 | ASSERT(extent_is_shared(sc) == 0); | |
1508 | ||
877c1476 FM |
1509 | /* |
1510 | * If we are here for a data extent and we have a share_check structure | |
1511 | * it means the data extent is not directly shared (does not have | |
1512 | * multiple reference items), so we have to check if a path in the fs | |
1513 | * tree (going from the root node down to the leaf that has the file | |
1514 | * extent item pointing to the data extent) is shared, that is, if any | |
1515 | * of the extent buffers in the path is referenced by other trees. | |
1516 | */ | |
a2c8d27e | 1517 | if (sc && ctx->bytenr == sc->data_bytenr) { |
6976201f FM |
1518 | /* |
1519 | * If our data extent is from a generation more recent than the | |
1520 | * last generation used to snapshot the root, then we know that | |
1521 | * it can not be shared through subtrees, so we can skip | |
1522 | * resolving indirect references, there's no point in | |
1523 | * determining the extent buffers for the path from the fs tree | |
1524 | * root node down to the leaf that has the file extent item that | |
1525 | * points to the data extent. | |
1526 | */ | |
1527 | if (sc->data_extent_gen > | |
1528 | btrfs_root_last_snapshot(&sc->root->root_item)) { | |
1529 | ret = BACKREF_FOUND_NOT_SHARED; | |
1530 | goto out; | |
1531 | } | |
1532 | ||
877c1476 FM |
1533 | /* |
1534 | * If we are only determining if a data extent is shared or not | |
1535 | * and the corresponding file extent item is located in the same | |
1536 | * leaf as the previous file extent item, we can skip resolving | |
1537 | * indirect references for a data extent, since the fs tree path | |
1538 | * is the same (same leaf, so same path). We skip as long as the | |
1539 | * cached result for the leaf is valid and only if there's only | |
1540 | * one file extent item pointing to the data extent, because in | |
1541 | * the case of multiple file extent items, they may be located | |
1542 | * in different leaves and therefore we have multiple paths. | |
1543 | */ | |
1544 | if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr && | |
1545 | sc->self_ref_count == 1) { | |
1546 | bool cached; | |
1547 | bool is_shared; | |
1548 | ||
1549 | cached = lookup_backref_shared_cache(sc->ctx, sc->root, | |
1550 | sc->ctx->curr_leaf_bytenr, | |
1551 | 0, &is_shared); | |
1552 | if (cached) { | |
1553 | if (is_shared) | |
1554 | ret = BACKREF_FOUND_SHARED; | |
1555 | else | |
1556 | ret = BACKREF_FOUND_NOT_SHARED; | |
1557 | goto out; | |
1558 | } | |
1559 | } | |
1560 | } | |
1561 | ||
86d5f994 | 1562 | btrfs_release_path(path); |
8da6d581 | 1563 | |
a2c8d27e | 1564 | ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0); |
d5c88b73 JS |
1565 | if (ret) |
1566 | goto out; | |
1567 | ||
ecf160b4 | 1568 | WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root)); |
8da6d581 | 1569 | |
a2c8d27e | 1570 | ret = resolve_indirect_refs(ctx, path, &preftrees, sc); |
8da6d581 JS |
1571 | if (ret) |
1572 | goto out; | |
1573 | ||
ecf160b4 | 1574 | WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root)); |
8da6d581 | 1575 | |
86d5f994 EN |
1576 | /* |
1577 | * This walks the tree of merged and resolved refs. Tree blocks are | |
1578 | * read in as needed. Unique entries are added to the ulist, and | |
1579 | * the list of found roots is updated. | |
1580 | * | |
1581 | * We release the entire tree in one go before returning. | |
1582 | */ | |
ecf160b4 | 1583 | node = rb_first_cached(&preftrees.direct.root); |
86d5f994 EN |
1584 | while (node) { |
1585 | ref = rb_entry(node, struct prelim_ref, rbnode); | |
1586 | node = rb_next(&ref->rbnode); | |
c8195a7b ZB |
1587 | /* |
1588 | * ref->count < 0 can happen here if there are delayed | |
1589 | * refs with a node->action of BTRFS_DROP_DELAYED_REF. | |
1590 | * prelim_ref_insert() relies on this when merging | |
1591 | * identical refs to keep the overall count correct. | |
1592 | * prelim_ref_insert() will merge only those refs | |
1593 | * which compare identically. Any refs having | |
1594 | * e.g. different offsets would not be merged, | |
1595 | * and would retain their original ref->count < 0. | |
1596 | */ | |
a2c8d27e | 1597 | if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) { |
8da6d581 | 1598 | /* no parent == root of tree */ |
a2c8d27e | 1599 | ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS); |
f1723939 WS |
1600 | if (ret < 0) |
1601 | goto out; | |
8da6d581 JS |
1602 | } |
1603 | if (ref->count && ref->parent) { | |
0cad8f14 | 1604 | if (!ctx->skip_inode_ref_list && !ref->inode_list && |
a2c8d27e | 1605 | ref->level == 0) { |
789d6a3a | 1606 | struct btrfs_tree_parent_check check = { 0 }; |
976b1908 | 1607 | struct extent_buffer *eb; |
707e8a07 | 1608 | |
789d6a3a QW |
1609 | check.level = ref->level; |
1610 | ||
1611 | eb = read_tree_block(ctx->fs_info, ref->parent, | |
1612 | &check); | |
64c043de LB |
1613 | if (IS_ERR(eb)) { |
1614 | ret = PTR_ERR(eb); | |
1615 | goto out; | |
4eb150d6 QW |
1616 | } |
1617 | if (!extent_buffer_uptodate(eb)) { | |
416bc658 | 1618 | free_extent_buffer(eb); |
c16c2e2e WS |
1619 | ret = -EIO; |
1620 | goto out; | |
416bc658 | 1621 | } |
38e3eebf | 1622 | |
ac5887c8 | 1623 | if (!path->skip_locking) |
38e3eebf | 1624 | btrfs_tree_read_lock(eb); |
88ffb665 | 1625 | ret = find_extent_in_eb(ctx, eb, &eie); |
38e3eebf | 1626 | if (!path->skip_locking) |
ac5887c8 | 1627 | btrfs_tree_read_unlock(eb); |
976b1908 | 1628 | free_extent_buffer(eb); |
88ffb665 FM |
1629 | if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || |
1630 | ret < 0) | |
f5929cd8 FDBM |
1631 | goto out; |
1632 | ref->inode_list = eie; | |
92876eec FM |
1633 | /* |
1634 | * We transferred the list ownership to the ref, | |
1635 | * so set to NULL to avoid a double free in case | |
1636 | * an error happens after this. | |
1637 | */ | |
1638 | eie = NULL; | |
976b1908 | 1639 | } |
a2c8d27e | 1640 | ret = ulist_add_merge_ptr(ctx->refs, ref->parent, |
4eb1f66d TI |
1641 | ref->inode_list, |
1642 | (void **)&eie, GFP_NOFS); | |
f1723939 WS |
1643 | if (ret < 0) |
1644 | goto out; | |
0cad8f14 | 1645 | if (!ret && !ctx->skip_inode_ref_list) { |
3301958b | 1646 | /* |
9f05c09d JB |
1647 | * We've recorded that parent, so we must extend |
1648 | * its inode list here. | |
1649 | * | |
1650 | * However if there was corruption we may not | |
1651 | * have found an eie, return an error in this | |
1652 | * case. | |
3301958b | 1653 | */ |
9f05c09d JB |
1654 | ASSERT(eie); |
1655 | if (!eie) { | |
1656 | ret = -EUCLEAN; | |
1657 | goto out; | |
1658 | } | |
3301958b JS |
1659 | while (eie->next) |
1660 | eie = eie->next; | |
1661 | eie->next = ref->inode_list; | |
1662 | } | |
f05c4746 | 1663 | eie = NULL; |
92876eec FM |
1664 | /* |
1665 | * We have transferred the inode list ownership from | |
1666 | * this ref to the ref we added to the 'refs' ulist. | |
1667 | * So set this ref's inode list to NULL to avoid | |
1668 | * use-after-free when our caller uses it or double | |
1669 | * frees in case an error happens before we return. | |
1670 | */ | |
1671 | ref->inode_list = NULL; | |
8da6d581 | 1672 | } |
9dd14fd6 | 1673 | cond_resched(); |
8da6d581 JS |
1674 | } |
1675 | ||
1676 | out: | |
8da6d581 | 1677 | btrfs_free_path(path); |
86d5f994 EN |
1678 | |
1679 | prelim_release(&preftrees.direct); | |
1680 | prelim_release(&preftrees.indirect); | |
1681 | prelim_release(&preftrees.indirect_missing_keys); | |
1682 | ||
88ffb665 | 1683 | if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0) |
f05c4746 | 1684 | free_inode_elem_list(eie); |
8da6d581 JS |
1685 | return ret; |
1686 | } | |
1687 | ||
1688 | /* | |
a2c8d27e FM |
1689 | * Finds all leaves with a reference to the specified combination of |
1690 | * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are | |
1691 | * added to the ulist at @ctx->refs, and that ulist is allocated by this | |
1692 | * function. The caller should free the ulist with free_leaf_list() if | |
1693 | * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is | |
1694 | * enough. | |
8da6d581 | 1695 | * |
a2c8d27e | 1696 | * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated. |
8da6d581 | 1697 | */ |
a2c8d27e | 1698 | int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx) |
8da6d581 | 1699 | { |
8da6d581 JS |
1700 | int ret; |
1701 | ||
a2c8d27e FM |
1702 | ASSERT(ctx->refs == NULL); |
1703 | ||
1704 | ctx->refs = ulist_alloc(GFP_NOFS); | |
1705 | if (!ctx->refs) | |
8da6d581 | 1706 | return -ENOMEM; |
8da6d581 | 1707 | |
a2c8d27e | 1708 | ret = find_parent_nodes(ctx, NULL); |
88ffb665 FM |
1709 | if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || |
1710 | (ret < 0 && ret != -ENOENT)) { | |
a2c8d27e FM |
1711 | free_leaf_list(ctx->refs); |
1712 | ctx->refs = NULL; | |
8da6d581 JS |
1713 | return ret; |
1714 | } | |
1715 | ||
1716 | return 0; | |
1717 | } | |
1718 | ||
1719 | /* | |
a2c8d27e | 1720 | * Walk all backrefs for a given extent to find all roots that reference this |
8da6d581 JS |
1721 | * extent. Walking a backref means finding all extents that reference this |
1722 | * extent and in turn walk the backrefs of those, too. Naturally this is a | |
1723 | * recursive process, but here it is implemented in an iterative fashion: We | |
1724 | * find all referencing extents for the extent in question and put them on a | |
1725 | * list. In turn, we find all referencing extents for those, further appending | |
1726 | * to the list. The way we iterate the list allows adding more elements after | |
1727 | * the current while iterating. The process stops when we reach the end of the | |
a2c8d27e FM |
1728 | * list. |
1729 | * | |
1baea6f1 FM |
1730 | * Found roots are added to @ctx->roots, which is allocated by this function if |
1731 | * it points to NULL, in which case the caller is responsible for freeing it | |
1732 | * after it's not needed anymore. | |
1733 | * This function requires @ctx->refs to be NULL, as it uses it for allocating a | |
1734 | * ulist to do temporary work, and frees it before returning. | |
8da6d581 | 1735 | * |
1baea6f1 | 1736 | * Returns 0 on success, < 0 on error. |
8da6d581 | 1737 | */ |
a2c8d27e | 1738 | static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx) |
8da6d581 | 1739 | { |
a2c8d27e | 1740 | const u64 orig_bytenr = ctx->bytenr; |
0cad8f14 | 1741 | const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list; |
1baea6f1 | 1742 | bool roots_ulist_allocated = false; |
cd1b413c | 1743 | struct ulist_iterator uiter; |
a2c8d27e FM |
1744 | int ret = 0; |
1745 | ||
1746 | ASSERT(ctx->refs == NULL); | |
8da6d581 | 1747 | |
a2c8d27e FM |
1748 | ctx->refs = ulist_alloc(GFP_NOFS); |
1749 | if (!ctx->refs) | |
8da6d581 | 1750 | return -ENOMEM; |
a2c8d27e | 1751 | |
a2c8d27e | 1752 | if (!ctx->roots) { |
1baea6f1 FM |
1753 | ctx->roots = ulist_alloc(GFP_NOFS); |
1754 | if (!ctx->roots) { | |
1755 | ulist_free(ctx->refs); | |
1756 | ctx->refs = NULL; | |
1757 | return -ENOMEM; | |
1758 | } | |
1759 | roots_ulist_allocated = true; | |
8da6d581 JS |
1760 | } |
1761 | ||
0cad8f14 | 1762 | ctx->skip_inode_ref_list = true; |
a2c8d27e | 1763 | |
cd1b413c | 1764 | ULIST_ITER_INIT(&uiter); |
8da6d581 | 1765 | while (1) { |
a2c8d27e FM |
1766 | struct ulist_node *node; |
1767 | ||
1768 | ret = find_parent_nodes(ctx, NULL); | |
8da6d581 | 1769 | if (ret < 0 && ret != -ENOENT) { |
1baea6f1 FM |
1770 | if (roots_ulist_allocated) { |
1771 | ulist_free(ctx->roots); | |
1772 | ctx->roots = NULL; | |
1773 | } | |
a2c8d27e | 1774 | break; |
8da6d581 | 1775 | } |
a2c8d27e FM |
1776 | ret = 0; |
1777 | node = ulist_next(ctx->refs, &uiter); | |
8da6d581 JS |
1778 | if (!node) |
1779 | break; | |
a2c8d27e | 1780 | ctx->bytenr = node->val; |
bca1a290 | 1781 | cond_resched(); |
8da6d581 JS |
1782 | } |
1783 | ||
a2c8d27e FM |
1784 | ulist_free(ctx->refs); |
1785 | ctx->refs = NULL; | |
1786 | ctx->bytenr = orig_bytenr; | |
0cad8f14 | 1787 | ctx->skip_inode_ref_list = orig_skip_inode_ref_list; |
a2c8d27e FM |
1788 | |
1789 | return ret; | |
8da6d581 JS |
1790 | } |
1791 | ||
a2c8d27e | 1792 | int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx, |
c7bcbb21 | 1793 | bool skip_commit_root_sem) |
9e351cc8 JB |
1794 | { |
1795 | int ret; | |
1796 | ||
a2c8d27e FM |
1797 | if (!ctx->trans && !skip_commit_root_sem) |
1798 | down_read(&ctx->fs_info->commit_root_sem); | |
1799 | ret = btrfs_find_all_roots_safe(ctx); | |
1800 | if (!ctx->trans && !skip_commit_root_sem) | |
1801 | up_read(&ctx->fs_info->commit_root_sem); | |
9e351cc8 JB |
1802 | return ret; |
1803 | } | |
1804 | ||
84a7949d FM |
1805 | struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void) |
1806 | { | |
1807 | struct btrfs_backref_share_check_ctx *ctx; | |
1808 | ||
1809 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | |
1810 | if (!ctx) | |
1811 | return NULL; | |
1812 | ||
1813 | ulist_init(&ctx->refs); | |
84a7949d FM |
1814 | |
1815 | return ctx; | |
1816 | } | |
1817 | ||
1818 | void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx) | |
1819 | { | |
1820 | if (!ctx) | |
1821 | return; | |
1822 | ||
1823 | ulist_release(&ctx->refs); | |
84a7949d FM |
1824 | kfree(ctx); |
1825 | } | |
1826 | ||
8eedadda FM |
1827 | /* |
1828 | * Check if a data extent is shared or not. | |
6e353e3b | 1829 | * |
ceb707da | 1830 | * @inode: The inode whose extent we are checking. |
b8f164e3 FM |
1831 | * @bytenr: Logical bytenr of the extent we are checking. |
1832 | * @extent_gen: Generation of the extent (file extent item) or 0 if it is | |
1833 | * not known. | |
61dbb952 | 1834 | * @ctx: A backref sharedness check context. |
2c2ed5aa | 1835 | * |
8eedadda | 1836 | * btrfs_is_data_extent_shared uses the backref walking code but will short |
2c2ed5aa MF |
1837 | * circuit as soon as it finds a root or inode that doesn't match the |
1838 | * one passed in. This provides a significant performance benefit for | |
1839 | * callers (such as fiemap) which want to know whether the extent is | |
1840 | * shared but do not need a ref count. | |
1841 | * | |
03628cdb FM |
1842 | * This attempts to attach to the running transaction in order to account for |
1843 | * delayed refs, but continues on even when no running transaction exists. | |
bb739cf0 | 1844 | * |
2c2ed5aa MF |
1845 | * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error. |
1846 | */ | |
ceb707da | 1847 | int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr, |
b8f164e3 | 1848 | u64 extent_gen, |
61dbb952 | 1849 | struct btrfs_backref_share_check_ctx *ctx) |
dc046b10 | 1850 | { |
a2c8d27e | 1851 | struct btrfs_backref_walk_ctx walk_ctx = { 0 }; |
ceb707da | 1852 | struct btrfs_root *root = inode->root; |
bb739cf0 EN |
1853 | struct btrfs_fs_info *fs_info = root->fs_info; |
1854 | struct btrfs_trans_handle *trans; | |
dc046b10 JB |
1855 | struct ulist_iterator uiter; |
1856 | struct ulist_node *node; | |
f3a84ccd | 1857 | struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem); |
dc046b10 | 1858 | int ret = 0; |
3ec4d323 | 1859 | struct share_check shared = { |
877c1476 FM |
1860 | .ctx = ctx, |
1861 | .root = root, | |
ceb707da | 1862 | .inum = btrfs_ino(inode), |
73e339e6 | 1863 | .data_bytenr = bytenr, |
6976201f | 1864 | .data_extent_gen = extent_gen, |
3ec4d323 | 1865 | .share_count = 0, |
73e339e6 | 1866 | .self_ref_count = 0, |
4fc7b572 | 1867 | .have_delayed_delete_refs = false, |
3ec4d323 | 1868 | }; |
12a824dc | 1869 | int level; |
e2fd8306 FM |
1870 | bool leaf_cached; |
1871 | bool leaf_is_shared; | |
dc046b10 | 1872 | |
73e339e6 FM |
1873 | for (int i = 0; i < BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; i++) { |
1874 | if (ctx->prev_extents_cache[i].bytenr == bytenr) | |
1875 | return ctx->prev_extents_cache[i].is_shared; | |
1876 | } | |
1877 | ||
84a7949d | 1878 | ulist_init(&ctx->refs); |
dc046b10 | 1879 | |
a6d155d2 | 1880 | trans = btrfs_join_transaction_nostart(root); |
bb739cf0 | 1881 | if (IS_ERR(trans)) { |
03628cdb FM |
1882 | if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { |
1883 | ret = PTR_ERR(trans); | |
1884 | goto out; | |
1885 | } | |
bb739cf0 | 1886 | trans = NULL; |
dc046b10 | 1887 | down_read(&fs_info->commit_root_sem); |
bb739cf0 EN |
1888 | } else { |
1889 | btrfs_get_tree_mod_seq(fs_info, &elem); | |
a2c8d27e | 1890 | walk_ctx.time_seq = elem.seq; |
bb739cf0 EN |
1891 | } |
1892 | ||
e2fd8306 FM |
1893 | ctx->use_path_cache = true; |
1894 | ||
1895 | /* | |
1896 | * We may have previously determined that the current leaf is shared. | |
1897 | * If it is, then we have a data extent that is shared due to a shared | |
1898 | * subtree (caused by snapshotting) and we don't need to check for data | |
1899 | * backrefs. If the leaf is not shared, then we must do backref walking | |
1900 | * to determine if the data extent is shared through reflinks. | |
1901 | */ | |
1902 | leaf_cached = lookup_backref_shared_cache(ctx, root, | |
1903 | ctx->curr_leaf_bytenr, 0, | |
1904 | &leaf_is_shared); | |
1905 | if (leaf_cached && leaf_is_shared) { | |
1906 | ret = 1; | |
1907 | goto out_trans; | |
1908 | } | |
1909 | ||
0cad8f14 | 1910 | walk_ctx.skip_inode_ref_list = true; |
a2c8d27e FM |
1911 | walk_ctx.trans = trans; |
1912 | walk_ctx.fs_info = fs_info; | |
1913 | walk_ctx.refs = &ctx->refs; | |
1914 | ||
12a824dc FM |
1915 | /* -1 means we are in the bytenr of the data extent. */ |
1916 | level = -1; | |
dc046b10 JB |
1917 | ULIST_ITER_INIT(&uiter); |
1918 | while (1) { | |
2280d425 | 1919 | const unsigned long prev_ref_count = ctx->refs.nnodes; |
12a824dc | 1920 | |
a2c8d27e FM |
1921 | walk_ctx.bytenr = bytenr; |
1922 | ret = find_parent_nodes(&walk_ctx, &shared); | |
877c1476 FM |
1923 | if (ret == BACKREF_FOUND_SHARED || |
1924 | ret == BACKREF_FOUND_NOT_SHARED) { | |
1925 | /* If shared must return 1, otherwise return 0. */ | |
1926 | ret = (ret == BACKREF_FOUND_SHARED) ? 1 : 0; | |
12a824dc | 1927 | if (level >= 0) |
61dbb952 | 1928 | store_backref_shared_cache(ctx, root, bytenr, |
877c1476 | 1929 | level, ret == 1); |
dc046b10 JB |
1930 | break; |
1931 | } | |
1932 | if (ret < 0 && ret != -ENOENT) | |
1933 | break; | |
2c2ed5aa | 1934 | ret = 0; |
b8f164e3 | 1935 | |
63c84b46 | 1936 | /* |
2280d425 FM |
1937 | * More than one extent buffer (bytenr) may have been added to |
1938 | * the ctx->refs ulist, in which case we have to check multiple | |
1939 | * tree paths in case the first one is not shared, so we can not | |
1940 | * use the path cache which is made for a single path. Multiple | |
1941 | * extent buffers at the current level happen when: | |
1942 | * | |
1943 | * 1) level -1, the data extent: If our data extent was not | |
1944 | * directly shared (without multiple reference items), then | |
1945 | * it might have a single reference item with a count > 1 for | |
1946 | * the same offset, which means there are 2 (or more) file | |
1947 | * extent items that point to the data extent - this happens | |
1948 | * when a file extent item needs to be split and then one | |
1949 | * item gets moved to another leaf due to a b+tree leaf split | |
1950 | * when inserting some item. In this case the file extent | |
1951 | * items may be located in different leaves and therefore | |
1952 | * some of the leaves may be referenced through shared | |
1953 | * subtrees while others are not. Since our extent buffer | |
1954 | * cache only works for a single path (by far the most common | |
1955 | * case and simpler to deal with), we can not use it if we | |
1956 | * have multiple leaves (which implies multiple paths). | |
1957 | * | |
1958 | * 2) level >= 0, a tree node/leaf: We can have a mix of direct | |
1959 | * and indirect references on a b+tree node/leaf, so we have | |
1960 | * to check multiple paths, and the extent buffer (the | |
1961 | * current bytenr) may be shared or not. One example is | |
1962 | * during relocation as we may get a shared tree block ref | |
1963 | * (direct ref) and a non-shared tree block ref (indirect | |
1964 | * ref) for the same node/leaf. | |
63c84b46 | 1965 | */ |
2280d425 | 1966 | if ((ctx->refs.nnodes - prev_ref_count) > 1) |
61dbb952 | 1967 | ctx->use_path_cache = false; |
63c84b46 | 1968 | |
12a824dc | 1969 | if (level >= 0) |
61dbb952 | 1970 | store_backref_shared_cache(ctx, root, bytenr, |
12a824dc | 1971 | level, false); |
84a7949d | 1972 | node = ulist_next(&ctx->refs, &uiter); |
dc046b10 JB |
1973 | if (!node) |
1974 | break; | |
1975 | bytenr = node->val; | |
2280d425 FM |
1976 | if (ctx->use_path_cache) { |
1977 | bool is_shared; | |
1978 | bool cached; | |
1979 | ||
1980 | level++; | |
1981 | cached = lookup_backref_shared_cache(ctx, root, bytenr, | |
1982 | level, &is_shared); | |
1983 | if (cached) { | |
1984 | ret = (is_shared ? 1 : 0); | |
1985 | break; | |
1986 | } | |
12a824dc | 1987 | } |
18bf591b | 1988 | shared.share_count = 0; |
4fc7b572 | 1989 | shared.have_delayed_delete_refs = false; |
dc046b10 JB |
1990 | cond_resched(); |
1991 | } | |
bb739cf0 | 1992 | |
2280d425 FM |
1993 | /* |
1994 | * If the path cache is disabled, then it means at some tree level we | |
1995 | * got multiple parents due to a mix of direct and indirect backrefs or | |
1996 | * multiple leaves with file extent items pointing to the same data | |
1997 | * extent. We have to invalidate the cache and cache only the sharedness | |
1998 | * result for the levels where we got only one node/reference. | |
1999 | */ | |
2000 | if (!ctx->use_path_cache) { | |
2001 | int i = 0; | |
2002 | ||
2003 | level--; | |
2004 | if (ret >= 0 && level >= 0) { | |
2005 | bytenr = ctx->path_cache_entries[level].bytenr; | |
2006 | ctx->use_path_cache = true; | |
2007 | store_backref_shared_cache(ctx, root, bytenr, level, ret); | |
2008 | i = level + 1; | |
2009 | } | |
2010 | ||
2011 | for ( ; i < BTRFS_MAX_LEVEL; i++) | |
2012 | ctx->path_cache_entries[i].bytenr = 0; | |
2013 | } | |
2014 | ||
73e339e6 FM |
2015 | /* |
2016 | * Cache the sharedness result for the data extent if we know our inode | |
2017 | * has more than 1 file extent item that refers to the data extent. | |
2018 | */ | |
2019 | if (ret >= 0 && shared.self_ref_count > 1) { | |
2020 | int slot = ctx->prev_extents_cache_slot; | |
2021 | ||
2022 | ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr; | |
2023 | ctx->prev_extents_cache[slot].is_shared = (ret == 1); | |
2024 | ||
2025 | slot = (slot + 1) % BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; | |
2026 | ctx->prev_extents_cache_slot = slot; | |
2027 | } | |
2028 | ||
e2fd8306 | 2029 | out_trans: |
bb739cf0 | 2030 | if (trans) { |
dc046b10 | 2031 | btrfs_put_tree_mod_seq(fs_info, &elem); |
bb739cf0 EN |
2032 | btrfs_end_transaction(trans); |
2033 | } else { | |
dc046b10 | 2034 | up_read(&fs_info->commit_root_sem); |
bb739cf0 | 2035 | } |
03628cdb | 2036 | out: |
84a7949d | 2037 | ulist_release(&ctx->refs); |
877c1476 FM |
2038 | ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr; |
2039 | ||
dc046b10 JB |
2040 | return ret; |
2041 | } | |
2042 | ||
f186373f MF |
2043 | int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, |
2044 | u64 start_off, struct btrfs_path *path, | |
2045 | struct btrfs_inode_extref **ret_extref, | |
2046 | u64 *found_off) | |
2047 | { | |
2048 | int ret, slot; | |
2049 | struct btrfs_key key; | |
2050 | struct btrfs_key found_key; | |
2051 | struct btrfs_inode_extref *extref; | |
73980bec | 2052 | const struct extent_buffer *leaf; |
f186373f MF |
2053 | unsigned long ptr; |
2054 | ||
2055 | key.objectid = inode_objectid; | |
962a298f | 2056 | key.type = BTRFS_INODE_EXTREF_KEY; |
f186373f MF |
2057 | key.offset = start_off; |
2058 | ||
2059 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
2060 | if (ret < 0) | |
2061 | return ret; | |
2062 | ||
2063 | while (1) { | |
2064 | leaf = path->nodes[0]; | |
2065 | slot = path->slots[0]; | |
2066 | if (slot >= btrfs_header_nritems(leaf)) { | |
2067 | /* | |
2068 | * If the item at offset is not found, | |
2069 | * btrfs_search_slot will point us to the slot | |
2070 | * where it should be inserted. In our case | |
2071 | * that will be the slot directly before the | |
2072 | * next INODE_REF_KEY_V2 item. In the case | |
2073 | * that we're pointing to the last slot in a | |
2074 | * leaf, we must move one leaf over. | |
2075 | */ | |
2076 | ret = btrfs_next_leaf(root, path); | |
2077 | if (ret) { | |
2078 | if (ret >= 1) | |
2079 | ret = -ENOENT; | |
2080 | break; | |
2081 | } | |
2082 | continue; | |
2083 | } | |
2084 | ||
2085 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
2086 | ||
2087 | /* | |
2088 | * Check that we're still looking at an extended ref key for | |
2089 | * this particular objectid. If we have different | |
2090 | * objectid or type then there are no more to be found | |
2091 | * in the tree and we can exit. | |
2092 | */ | |
2093 | ret = -ENOENT; | |
2094 | if (found_key.objectid != inode_objectid) | |
2095 | break; | |
962a298f | 2096 | if (found_key.type != BTRFS_INODE_EXTREF_KEY) |
f186373f MF |
2097 | break; |
2098 | ||
2099 | ret = 0; | |
2100 | ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); | |
2101 | extref = (struct btrfs_inode_extref *)ptr; | |
2102 | *ret_extref = extref; | |
2103 | if (found_off) | |
2104 | *found_off = found_key.offset; | |
2105 | break; | |
2106 | } | |
2107 | ||
2108 | return ret; | |
2109 | } | |
2110 | ||
48a3b636 ES |
2111 | /* |
2112 | * this iterates to turn a name (from iref/extref) into a full filesystem path. | |
2113 | * Elements of the path are separated by '/' and the path is guaranteed to be | |
2114 | * 0-terminated. the path is only given within the current file system. | |
2115 | * Therefore, it never starts with a '/'. the caller is responsible to provide | |
2116 | * "size" bytes in "dest". the dest buffer will be filled backwards. finally, | |
2117 | * the start point of the resulting string is returned. this pointer is within | |
2118 | * dest, normally. | |
2119 | * in case the path buffer would overflow, the pointer is decremented further | |
2120 | * as if output was written to the buffer, though no more output is actually | |
2121 | * generated. that way, the caller can determine how much space would be | |
2122 | * required for the path to fit into the buffer. in that case, the returned | |
2123 | * value will be smaller than dest. callers must check this! | |
2124 | */ | |
96b5bd77 JS |
2125 | char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, |
2126 | u32 name_len, unsigned long name_off, | |
2127 | struct extent_buffer *eb_in, u64 parent, | |
2128 | char *dest, u32 size) | |
a542ad1b | 2129 | { |
a542ad1b JS |
2130 | int slot; |
2131 | u64 next_inum; | |
2132 | int ret; | |
661bec6b | 2133 | s64 bytes_left = ((s64)size) - 1; |
a542ad1b JS |
2134 | struct extent_buffer *eb = eb_in; |
2135 | struct btrfs_key found_key; | |
d24bec3a | 2136 | struct btrfs_inode_ref *iref; |
a542ad1b JS |
2137 | |
2138 | if (bytes_left >= 0) | |
2139 | dest[bytes_left] = '\0'; | |
2140 | ||
2141 | while (1) { | |
d24bec3a | 2142 | bytes_left -= name_len; |
a542ad1b JS |
2143 | if (bytes_left >= 0) |
2144 | read_extent_buffer(eb, dest + bytes_left, | |
d24bec3a | 2145 | name_off, name_len); |
b916a59a | 2146 | if (eb != eb_in) { |
0c0fe3b0 | 2147 | if (!path->skip_locking) |
ac5887c8 | 2148 | btrfs_tree_read_unlock(eb); |
a542ad1b | 2149 | free_extent_buffer(eb); |
b916a59a | 2150 | } |
c234a24d DS |
2151 | ret = btrfs_find_item(fs_root, path, parent, 0, |
2152 | BTRFS_INODE_REF_KEY, &found_key); | |
8f24b496 JS |
2153 | if (ret > 0) |
2154 | ret = -ENOENT; | |
a542ad1b JS |
2155 | if (ret) |
2156 | break; | |
d24bec3a | 2157 | |
a542ad1b JS |
2158 | next_inum = found_key.offset; |
2159 | ||
2160 | /* regular exit ahead */ | |
2161 | if (parent == next_inum) | |
2162 | break; | |
2163 | ||
2164 | slot = path->slots[0]; | |
2165 | eb = path->nodes[0]; | |
2166 | /* make sure we can use eb after releasing the path */ | |
b916a59a | 2167 | if (eb != eb_in) { |
0c0fe3b0 FM |
2168 | path->nodes[0] = NULL; |
2169 | path->locks[0] = 0; | |
b916a59a | 2170 | } |
a542ad1b | 2171 | btrfs_release_path(path); |
a542ad1b | 2172 | iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); |
d24bec3a MF |
2173 | |
2174 | name_len = btrfs_inode_ref_name_len(eb, iref); | |
2175 | name_off = (unsigned long)(iref + 1); | |
2176 | ||
a542ad1b JS |
2177 | parent = next_inum; |
2178 | --bytes_left; | |
2179 | if (bytes_left >= 0) | |
2180 | dest[bytes_left] = '/'; | |
2181 | } | |
2182 | ||
2183 | btrfs_release_path(path); | |
2184 | ||
2185 | if (ret) | |
2186 | return ERR_PTR(ret); | |
2187 | ||
2188 | return dest + bytes_left; | |
2189 | } | |
2190 | ||
2191 | /* | |
2192 | * this makes the path point to (logical EXTENT_ITEM *) | |
2193 | * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for | |
2194 | * tree blocks and <0 on error. | |
2195 | */ | |
2196 | int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, | |
69917e43 LB |
2197 | struct btrfs_path *path, struct btrfs_key *found_key, |
2198 | u64 *flags_ret) | |
a542ad1b | 2199 | { |
29cbcf40 | 2200 | struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical); |
a542ad1b JS |
2201 | int ret; |
2202 | u64 flags; | |
261c84b6 | 2203 | u64 size = 0; |
a542ad1b | 2204 | u32 item_size; |
73980bec | 2205 | const struct extent_buffer *eb; |
a542ad1b JS |
2206 | struct btrfs_extent_item *ei; |
2207 | struct btrfs_key key; | |
2208 | ||
dba6ae0b | 2209 | key.objectid = logical; |
261c84b6 JB |
2210 | if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) |
2211 | key.type = BTRFS_METADATA_ITEM_KEY; | |
2212 | else | |
2213 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
a542ad1b JS |
2214 | key.offset = (u64)-1; |
2215 | ||
29cbcf40 | 2216 | ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); |
a542ad1b JS |
2217 | if (ret < 0) |
2218 | return ret; | |
11dcc86e DS |
2219 | if (ret == 0) { |
2220 | /* | |
2221 | * Key with offset -1 found, there would have to exist an extent | |
2222 | * item with such offset, but this is out of the valid range. | |
2223 | */ | |
2224 | return -EUCLEAN; | |
2225 | } | |
a542ad1b | 2226 | |
29cbcf40 | 2227 | ret = btrfs_previous_extent_item(extent_root, path, 0); |
850a8cdf WS |
2228 | if (ret) { |
2229 | if (ret > 0) | |
2230 | ret = -ENOENT; | |
2231 | return ret; | |
580f0a67 | 2232 | } |
850a8cdf | 2233 | btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); |
261c84b6 | 2234 | if (found_key->type == BTRFS_METADATA_ITEM_KEY) |
da17066c | 2235 | size = fs_info->nodesize; |
261c84b6 JB |
2236 | else if (found_key->type == BTRFS_EXTENT_ITEM_KEY) |
2237 | size = found_key->offset; | |
2238 | ||
580f0a67 | 2239 | if (found_key->objectid > logical || |
261c84b6 | 2240 | found_key->objectid + size <= logical) { |
ab8d0fc4 JM |
2241 | btrfs_debug(fs_info, |
2242 | "logical %llu is not within any extent", logical); | |
a542ad1b | 2243 | return -ENOENT; |
4692cf58 | 2244 | } |
a542ad1b JS |
2245 | |
2246 | eb = path->nodes[0]; | |
3212fa14 | 2247 | item_size = btrfs_item_size(eb, path->slots[0]); |
a542ad1b JS |
2248 | |
2249 | ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); | |
2250 | flags = btrfs_extent_flags(eb, ei); | |
2251 | ||
ab8d0fc4 JM |
2252 | btrfs_debug(fs_info, |
2253 | "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u", | |
c1c9ff7c GU |
2254 | logical, logical - found_key->objectid, found_key->objectid, |
2255 | found_key->offset, flags, item_size); | |
69917e43 LB |
2256 | |
2257 | WARN_ON(!flags_ret); | |
2258 | if (flags_ret) { | |
2259 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) | |
2260 | *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK; | |
2261 | else if (flags & BTRFS_EXTENT_FLAG_DATA) | |
2262 | *flags_ret = BTRFS_EXTENT_FLAG_DATA; | |
2263 | else | |
290342f6 | 2264 | BUG(); |
69917e43 LB |
2265 | return 0; |
2266 | } | |
a542ad1b JS |
2267 | |
2268 | return -EIO; | |
2269 | } | |
2270 | ||
2271 | /* | |
2272 | * helper function to iterate extent inline refs. ptr must point to a 0 value | |
2273 | * for the first call and may be modified. it is used to track state. | |
2274 | * if more refs exist, 0 is returned and the next call to | |
e0c476b1 | 2275 | * get_extent_inline_ref must pass the modified ptr parameter to get the |
a542ad1b JS |
2276 | * next ref. after the last ref was processed, 1 is returned. |
2277 | * returns <0 on error | |
2278 | */ | |
e0c476b1 JM |
2279 | static int get_extent_inline_ref(unsigned long *ptr, |
2280 | const struct extent_buffer *eb, | |
2281 | const struct btrfs_key *key, | |
2282 | const struct btrfs_extent_item *ei, | |
2283 | u32 item_size, | |
2284 | struct btrfs_extent_inline_ref **out_eiref, | |
2285 | int *out_type) | |
a542ad1b JS |
2286 | { |
2287 | unsigned long end; | |
2288 | u64 flags; | |
2289 | struct btrfs_tree_block_info *info; | |
2290 | ||
2291 | if (!*ptr) { | |
2292 | /* first call */ | |
2293 | flags = btrfs_extent_flags(eb, ei); | |
2294 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | |
6eda71d0 LB |
2295 | if (key->type == BTRFS_METADATA_ITEM_KEY) { |
2296 | /* a skinny metadata extent */ | |
2297 | *out_eiref = | |
2298 | (struct btrfs_extent_inline_ref *)(ei + 1); | |
2299 | } else { | |
2300 | WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY); | |
2301 | info = (struct btrfs_tree_block_info *)(ei + 1); | |
2302 | *out_eiref = | |
2303 | (struct btrfs_extent_inline_ref *)(info + 1); | |
2304 | } | |
a542ad1b JS |
2305 | } else { |
2306 | *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); | |
2307 | } | |
2308 | *ptr = (unsigned long)*out_eiref; | |
cd857dd6 | 2309 | if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size) |
a542ad1b JS |
2310 | return -ENOENT; |
2311 | } | |
2312 | ||
2313 | end = (unsigned long)ei + item_size; | |
6eda71d0 | 2314 | *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr); |
3de28d57 LB |
2315 | *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref, |
2316 | BTRFS_REF_TYPE_ANY); | |
2317 | if (*out_type == BTRFS_REF_TYPE_INVALID) | |
af431dcb | 2318 | return -EUCLEAN; |
a542ad1b JS |
2319 | |
2320 | *ptr += btrfs_extent_inline_ref_size(*out_type); | |
2321 | WARN_ON(*ptr > end); | |
2322 | if (*ptr == end) | |
2323 | return 1; /* last */ | |
2324 | ||
2325 | return 0; | |
2326 | } | |
2327 | ||
2328 | /* | |
2329 | * reads the tree block backref for an extent. tree level and root are returned | |
2330 | * through out_level and out_root. ptr must point to a 0 value for the first | |
e0c476b1 | 2331 | * call and may be modified (see get_extent_inline_ref comment). |
a542ad1b JS |
2332 | * returns 0 if data was provided, 1 if there was no more data to provide or |
2333 | * <0 on error. | |
2334 | */ | |
2335 | int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, | |
6eda71d0 LB |
2336 | struct btrfs_key *key, struct btrfs_extent_item *ei, |
2337 | u32 item_size, u64 *out_root, u8 *out_level) | |
a542ad1b JS |
2338 | { |
2339 | int ret; | |
2340 | int type; | |
a542ad1b JS |
2341 | struct btrfs_extent_inline_ref *eiref; |
2342 | ||
2343 | if (*ptr == (unsigned long)-1) | |
2344 | return 1; | |
2345 | ||
2346 | while (1) { | |
e0c476b1 | 2347 | ret = get_extent_inline_ref(ptr, eb, key, ei, item_size, |
6eda71d0 | 2348 | &eiref, &type); |
a542ad1b JS |
2349 | if (ret < 0) |
2350 | return ret; | |
2351 | ||
2352 | if (type == BTRFS_TREE_BLOCK_REF_KEY || | |
2353 | type == BTRFS_SHARED_BLOCK_REF_KEY) | |
2354 | break; | |
2355 | ||
2356 | if (ret == 1) | |
2357 | return 1; | |
2358 | } | |
2359 | ||
2360 | /* we can treat both ref types equally here */ | |
a542ad1b | 2361 | *out_root = btrfs_extent_inline_ref_offset(eb, eiref); |
a1317f45 FM |
2362 | |
2363 | if (key->type == BTRFS_EXTENT_ITEM_KEY) { | |
2364 | struct btrfs_tree_block_info *info; | |
2365 | ||
2366 | info = (struct btrfs_tree_block_info *)(ei + 1); | |
2367 | *out_level = btrfs_tree_block_level(eb, info); | |
2368 | } else { | |
2369 | ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); | |
2370 | *out_level = (u8)key->offset; | |
2371 | } | |
a542ad1b JS |
2372 | |
2373 | if (ret == 1) | |
2374 | *ptr = (unsigned long)-1; | |
2375 | ||
2376 | return 0; | |
2377 | } | |
2378 | ||
ab8d0fc4 JM |
2379 | static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, |
2380 | struct extent_inode_elem *inode_list, | |
2381 | u64 root, u64 extent_item_objectid, | |
2382 | iterate_extent_inodes_t *iterate, void *ctx) | |
a542ad1b | 2383 | { |
976b1908 | 2384 | struct extent_inode_elem *eie; |
4692cf58 | 2385 | int ret = 0; |
4692cf58 | 2386 | |
976b1908 | 2387 | for (eie = inode_list; eie; eie = eie->next) { |
ab8d0fc4 JM |
2388 | btrfs_debug(fs_info, |
2389 | "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu", | |
2390 | extent_item_objectid, eie->inum, | |
2391 | eie->offset, root); | |
c7499a64 | 2392 | ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx); |
4692cf58 | 2393 | if (ret) { |
ab8d0fc4 JM |
2394 | btrfs_debug(fs_info, |
2395 | "stopping iteration for %llu due to ret=%d", | |
2396 | extent_item_objectid, ret); | |
4692cf58 JS |
2397 | break; |
2398 | } | |
a542ad1b JS |
2399 | } |
2400 | ||
a542ad1b JS |
2401 | return ret; |
2402 | } | |
2403 | ||
2404 | /* | |
2405 | * calls iterate() for every inode that references the extent identified by | |
4692cf58 | 2406 | * the given parameters. |
a542ad1b JS |
2407 | * when the iterator function returns a non-zero value, iteration stops. |
2408 | */ | |
a2c8d27e FM |
2409 | int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx, |
2410 | bool search_commit_root, | |
2411 | iterate_extent_inodes_t *iterate, void *user_ctx) | |
a542ad1b | 2412 | { |
a542ad1b | 2413 | int ret; |
a2c8d27e FM |
2414 | struct ulist *refs; |
2415 | struct ulist_node *ref_node; | |
f3a84ccd | 2416 | struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem); |
cd1b413c | 2417 | struct ulist_iterator ref_uiter; |
a542ad1b | 2418 | |
a2c8d27e FM |
2419 | btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu", |
2420 | ctx->bytenr); | |
2421 | ||
2422 | ASSERT(ctx->trans == NULL); | |
1baea6f1 FM |
2423 | ASSERT(ctx->roots == NULL); |
2424 | ||
da61d31a | 2425 | if (!search_commit_root) { |
a2c8d27e FM |
2426 | struct btrfs_trans_handle *trans; |
2427 | ||
2428 | trans = btrfs_attach_transaction(ctx->fs_info->tree_root); | |
bfc61c36 FM |
2429 | if (IS_ERR(trans)) { |
2430 | if (PTR_ERR(trans) != -ENOENT && | |
66d04209 | 2431 | PTR_ERR(trans) != -EROFS) |
bfc61c36 FM |
2432 | return PTR_ERR(trans); |
2433 | trans = NULL; | |
2434 | } | |
a2c8d27e | 2435 | ctx->trans = trans; |
bfc61c36 FM |
2436 | } |
2437 | ||
a2c8d27e FM |
2438 | if (ctx->trans) { |
2439 | btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem); | |
2440 | ctx->time_seq = seq_elem.seq; | |
2441 | } else { | |
2442 | down_read(&ctx->fs_info->commit_root_sem); | |
2443 | } | |
a542ad1b | 2444 | |
a2c8d27e | 2445 | ret = btrfs_find_all_leafs(ctx); |
4692cf58 JS |
2446 | if (ret) |
2447 | goto out; | |
a2c8d27e FM |
2448 | refs = ctx->refs; |
2449 | ctx->refs = NULL; | |
a542ad1b | 2450 | |
cd1b413c JS |
2451 | ULIST_ITER_INIT(&ref_uiter); |
2452 | while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { | |
66d04209 | 2453 | const u64 leaf_bytenr = ref_node->val; |
a2c8d27e FM |
2454 | struct ulist_node *root_node; |
2455 | struct ulist_iterator root_uiter; | |
66d04209 FM |
2456 | struct extent_inode_elem *inode_list; |
2457 | ||
2458 | inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux; | |
2459 | ||
2460 | if (ctx->cache_lookup) { | |
2461 | const u64 *root_ids; | |
2462 | int root_count; | |
2463 | bool cached; | |
2464 | ||
2465 | cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx, | |
2466 | &root_ids, &root_count); | |
2467 | if (cached) { | |
2468 | for (int i = 0; i < root_count; i++) { | |
2469 | ret = iterate_leaf_refs(ctx->fs_info, | |
2470 | inode_list, | |
2471 | root_ids[i], | |
2472 | leaf_bytenr, | |
2473 | iterate, | |
2474 | user_ctx); | |
2475 | if (ret) | |
2476 | break; | |
2477 | } | |
2478 | continue; | |
2479 | } | |
2480 | } | |
2481 | ||
2482 | if (!ctx->roots) { | |
2483 | ctx->roots = ulist_alloc(GFP_NOFS); | |
2484 | if (!ctx->roots) { | |
2485 | ret = -ENOMEM; | |
2486 | break; | |
2487 | } | |
2488 | } | |
a2c8d27e | 2489 | |
66d04209 | 2490 | ctx->bytenr = leaf_bytenr; |
a2c8d27e | 2491 | ret = btrfs_find_all_roots_safe(ctx); |
4692cf58 JS |
2492 | if (ret) |
2493 | break; | |
a2c8d27e | 2494 | |
66d04209 FM |
2495 | if (ctx->cache_store) |
2496 | ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx); | |
2497 | ||
cd1b413c | 2498 | ULIST_ITER_INIT(&root_uiter); |
a2c8d27e FM |
2499 | while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) { |
2500 | btrfs_debug(ctx->fs_info, | |
ab8d0fc4 JM |
2501 | "root %llu references leaf %llu, data list %#llx", |
2502 | root_node->val, ref_node->val, | |
2503 | ref_node->aux); | |
66d04209 | 2504 | ret = iterate_leaf_refs(ctx->fs_info, inode_list, |
a2c8d27e FM |
2505 | root_node->val, ctx->bytenr, |
2506 | iterate, user_ctx); | |
4692cf58 | 2507 | } |
1baea6f1 | 2508 | ulist_reinit(ctx->roots); |
a542ad1b JS |
2509 | } |
2510 | ||
976b1908 | 2511 | free_leaf_list(refs); |
4692cf58 | 2512 | out: |
a2c8d27e FM |
2513 | if (ctx->trans) { |
2514 | btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem); | |
2515 | btrfs_end_transaction(ctx->trans); | |
2516 | ctx->trans = NULL; | |
9e351cc8 | 2517 | } else { |
a2c8d27e | 2518 | up_read(&ctx->fs_info->commit_root_sem); |
7a3ae2f8 JS |
2519 | } |
2520 | ||
1baea6f1 FM |
2521 | ulist_free(ctx->roots); |
2522 | ctx->roots = NULL; | |
2523 | ||
88ffb665 FM |
2524 | if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP) |
2525 | ret = 0; | |
2526 | ||
a542ad1b JS |
2527 | return ret; |
2528 | } | |
2529 | ||
c7499a64 | 2530 | static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx) |
e3059ec0 DS |
2531 | { |
2532 | struct btrfs_data_container *inodes = ctx; | |
2533 | const size_t c = 3 * sizeof(u64); | |
2534 | ||
2535 | if (inodes->bytes_left >= c) { | |
2536 | inodes->bytes_left -= c; | |
2537 | inodes->val[inodes->elem_cnt] = inum; | |
2538 | inodes->val[inodes->elem_cnt + 1] = offset; | |
2539 | inodes->val[inodes->elem_cnt + 2] = root; | |
2540 | inodes->elem_cnt += 3; | |
2541 | } else { | |
2542 | inodes->bytes_missing += c - inodes->bytes_left; | |
2543 | inodes->bytes_left = 0; | |
2544 | inodes->elem_missed += 3; | |
2545 | } | |
2546 | ||
2547 | return 0; | |
2548 | } | |
2549 | ||
a542ad1b JS |
2550 | int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, |
2551 | struct btrfs_path *path, | |
e3059ec0 | 2552 | void *ctx, bool ignore_offset) |
a542ad1b | 2553 | { |
a2c8d27e | 2554 | struct btrfs_backref_walk_ctx walk_ctx = { 0 }; |
a542ad1b | 2555 | int ret; |
69917e43 | 2556 | u64 flags = 0; |
a542ad1b | 2557 | struct btrfs_key found_key; |
7a3ae2f8 | 2558 | int search_commit_root = path->search_commit_root; |
a542ad1b | 2559 | |
69917e43 | 2560 | ret = extent_from_logical(fs_info, logical, path, &found_key, &flags); |
4692cf58 | 2561 | btrfs_release_path(path); |
a542ad1b JS |
2562 | if (ret < 0) |
2563 | return ret; | |
69917e43 | 2564 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) |
3627bf45 | 2565 | return -EINVAL; |
a542ad1b | 2566 | |
a2c8d27e | 2567 | walk_ctx.bytenr = found_key.objectid; |
6ce6ba53 | 2568 | if (ignore_offset) |
a2c8d27e | 2569 | walk_ctx.ignore_extent_item_pos = true; |
6ce6ba53 | 2570 | else |
a2c8d27e FM |
2571 | walk_ctx.extent_item_pos = logical - found_key.objectid; |
2572 | walk_ctx.fs_info = fs_info; | |
6ce6ba53 | 2573 | |
a2c8d27e FM |
2574 | return iterate_extent_inodes(&walk_ctx, search_commit_root, |
2575 | build_ino_list, ctx); | |
a542ad1b JS |
2576 | } |
2577 | ||
ad6240f6 | 2578 | static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, |
875d1daa | 2579 | struct extent_buffer *eb, struct inode_fs_paths *ipath); |
d24bec3a | 2580 | |
875d1daa | 2581 | static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath) |
a542ad1b | 2582 | { |
aefc1eb1 | 2583 | int ret = 0; |
a542ad1b JS |
2584 | int slot; |
2585 | u32 cur; | |
2586 | u32 len; | |
2587 | u32 name_len; | |
2588 | u64 parent = 0; | |
2589 | int found = 0; | |
875d1daa DS |
2590 | struct btrfs_root *fs_root = ipath->fs_root; |
2591 | struct btrfs_path *path = ipath->btrfs_path; | |
a542ad1b | 2592 | struct extent_buffer *eb; |
a542ad1b JS |
2593 | struct btrfs_inode_ref *iref; |
2594 | struct btrfs_key found_key; | |
2595 | ||
aefc1eb1 | 2596 | while (!ret) { |
c234a24d DS |
2597 | ret = btrfs_find_item(fs_root, path, inum, |
2598 | parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY, | |
2599 | &found_key); | |
2600 | ||
a542ad1b JS |
2601 | if (ret < 0) |
2602 | break; | |
2603 | if (ret) { | |
2604 | ret = found ? 0 : -ENOENT; | |
2605 | break; | |
2606 | } | |
2607 | ++found; | |
2608 | ||
2609 | parent = found_key.offset; | |
2610 | slot = path->slots[0]; | |
3fe81ce2 FDBM |
2611 | eb = btrfs_clone_extent_buffer(path->nodes[0]); |
2612 | if (!eb) { | |
2613 | ret = -ENOMEM; | |
2614 | break; | |
2615 | } | |
a542ad1b JS |
2616 | btrfs_release_path(path); |
2617 | ||
a542ad1b JS |
2618 | iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); |
2619 | ||
3212fa14 | 2620 | for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) { |
a542ad1b JS |
2621 | name_len = btrfs_inode_ref_name_len(eb, iref); |
2622 | /* path must be released before calling iterate()! */ | |
ab8d0fc4 JM |
2623 | btrfs_debug(fs_root->fs_info, |
2624 | "following ref at offset %u for inode %llu in tree %llu", | |
4fd786e6 | 2625 | cur, found_key.objectid, |
e094f480 | 2626 | btrfs_root_id(fs_root)); |
ad6240f6 | 2627 | ret = inode_to_path(parent, name_len, |
875d1daa | 2628 | (unsigned long)(iref + 1), eb, ipath); |
aefc1eb1 | 2629 | if (ret) |
a542ad1b | 2630 | break; |
a542ad1b JS |
2631 | len = sizeof(*iref) + name_len; |
2632 | iref = (struct btrfs_inode_ref *)((char *)iref + len); | |
2633 | } | |
2634 | free_extent_buffer(eb); | |
2635 | } | |
2636 | ||
2637 | btrfs_release_path(path); | |
2638 | ||
2639 | return ret; | |
2640 | } | |
2641 | ||
875d1daa | 2642 | static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath) |
d24bec3a MF |
2643 | { |
2644 | int ret; | |
2645 | int slot; | |
2646 | u64 offset = 0; | |
2647 | u64 parent; | |
2648 | int found = 0; | |
875d1daa DS |
2649 | struct btrfs_root *fs_root = ipath->fs_root; |
2650 | struct btrfs_path *path = ipath->btrfs_path; | |
d24bec3a MF |
2651 | struct extent_buffer *eb; |
2652 | struct btrfs_inode_extref *extref; | |
d24bec3a MF |
2653 | u32 item_size; |
2654 | u32 cur_offset; | |
2655 | unsigned long ptr; | |
2656 | ||
2657 | while (1) { | |
2658 | ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, | |
2659 | &offset); | |
2660 | if (ret < 0) | |
2661 | break; | |
2662 | if (ret) { | |
2663 | ret = found ? 0 : -ENOENT; | |
2664 | break; | |
2665 | } | |
2666 | ++found; | |
2667 | ||
2668 | slot = path->slots[0]; | |
3fe81ce2 FDBM |
2669 | eb = btrfs_clone_extent_buffer(path->nodes[0]); |
2670 | if (!eb) { | |
2671 | ret = -ENOMEM; | |
2672 | break; | |
2673 | } | |
d24bec3a MF |
2674 | btrfs_release_path(path); |
2675 | ||
3212fa14 | 2676 | item_size = btrfs_item_size(eb, slot); |
2849a854 | 2677 | ptr = btrfs_item_ptr_offset(eb, slot); |
d24bec3a MF |
2678 | cur_offset = 0; |
2679 | ||
2680 | while (cur_offset < item_size) { | |
2681 | u32 name_len; | |
2682 | ||
2683 | extref = (struct btrfs_inode_extref *)(ptr + cur_offset); | |
2684 | parent = btrfs_inode_extref_parent(eb, extref); | |
2685 | name_len = btrfs_inode_extref_name_len(eb, extref); | |
ad6240f6 | 2686 | ret = inode_to_path(parent, name_len, |
875d1daa | 2687 | (unsigned long)&extref->name, eb, ipath); |
d24bec3a MF |
2688 | if (ret) |
2689 | break; | |
2690 | ||
2849a854 | 2691 | cur_offset += btrfs_inode_extref_name_len(eb, extref); |
d24bec3a MF |
2692 | cur_offset += sizeof(*extref); |
2693 | } | |
d24bec3a MF |
2694 | free_extent_buffer(eb); |
2695 | ||
2696 | offset++; | |
2697 | } | |
2698 | ||
2699 | btrfs_release_path(path); | |
2700 | ||
2701 | return ret; | |
2702 | } | |
2703 | ||
a542ad1b JS |
2704 | /* |
2705 | * returns 0 if the path could be dumped (probably truncated) | |
2706 | * returns <0 in case of an error | |
2707 | */ | |
d24bec3a | 2708 | static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, |
875d1daa | 2709 | struct extent_buffer *eb, struct inode_fs_paths *ipath) |
a542ad1b | 2710 | { |
a542ad1b JS |
2711 | char *fspath; |
2712 | char *fspath_min; | |
2713 | int i = ipath->fspath->elem_cnt; | |
2714 | const int s_ptr = sizeof(char *); | |
2715 | u32 bytes_left; | |
2716 | ||
2717 | bytes_left = ipath->fspath->bytes_left > s_ptr ? | |
2718 | ipath->fspath->bytes_left - s_ptr : 0; | |
2719 | ||
740c3d22 | 2720 | fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; |
96b5bd77 JS |
2721 | fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, |
2722 | name_off, eb, inum, fspath_min, bytes_left); | |
a542ad1b JS |
2723 | if (IS_ERR(fspath)) |
2724 | return PTR_ERR(fspath); | |
2725 | ||
2726 | if (fspath > fspath_min) { | |
745c4d8e | 2727 | ipath->fspath->val[i] = (u64)(unsigned long)fspath; |
a542ad1b JS |
2728 | ++ipath->fspath->elem_cnt; |
2729 | ipath->fspath->bytes_left = fspath - fspath_min; | |
2730 | } else { | |
2731 | ++ipath->fspath->elem_missed; | |
2732 | ipath->fspath->bytes_missing += fspath_min - fspath; | |
2733 | ipath->fspath->bytes_left = 0; | |
2734 | } | |
2735 | ||
2736 | return 0; | |
2737 | } | |
2738 | ||
2739 | /* | |
2740 | * this dumps all file system paths to the inode into the ipath struct, provided | |
2741 | * is has been created large enough. each path is zero-terminated and accessed | |
740c3d22 | 2742 | * from ipath->fspath->val[i]. |
a542ad1b | 2743 | * when it returns, there are ipath->fspath->elem_cnt number of paths available |
740c3d22 | 2744 | * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the |
01327610 | 2745 | * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise, |
a542ad1b JS |
2746 | * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would |
2747 | * have been needed to return all paths. | |
2748 | */ | |
2749 | int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) | |
2750 | { | |
ad6240f6 DS |
2751 | int ret; |
2752 | int found_refs = 0; | |
2753 | ||
875d1daa | 2754 | ret = iterate_inode_refs(inum, ipath); |
ad6240f6 DS |
2755 | if (!ret) |
2756 | ++found_refs; | |
2757 | else if (ret != -ENOENT) | |
2758 | return ret; | |
2759 | ||
875d1daa | 2760 | ret = iterate_inode_extrefs(inum, ipath); |
ad6240f6 DS |
2761 | if (ret == -ENOENT && found_refs) |
2762 | return 0; | |
2763 | ||
2764 | return ret; | |
a542ad1b JS |
2765 | } |
2766 | ||
a542ad1b JS |
2767 | struct btrfs_data_container *init_data_container(u32 total_bytes) |
2768 | { | |
2769 | struct btrfs_data_container *data; | |
2770 | size_t alloc_bytes; | |
2771 | ||
2772 | alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); | |
2f7ef5bb | 2773 | data = kvzalloc(alloc_bytes, GFP_KERNEL); |
a542ad1b JS |
2774 | if (!data) |
2775 | return ERR_PTR(-ENOMEM); | |
2776 | ||
2f7ef5bb | 2777 | if (total_bytes >= sizeof(*data)) |
a542ad1b | 2778 | data->bytes_left = total_bytes - sizeof(*data); |
2f7ef5bb | 2779 | else |
a542ad1b | 2780 | data->bytes_missing = sizeof(*data) - total_bytes; |
a542ad1b JS |
2781 | |
2782 | return data; | |
2783 | } | |
2784 | ||
2785 | /* | |
2786 | * allocates space to return multiple file system paths for an inode. | |
2787 | * total_bytes to allocate are passed, note that space usable for actual path | |
2788 | * information will be total_bytes - sizeof(struct inode_fs_paths). | |
2789 | * the returned pointer must be freed with free_ipath() in the end. | |
2790 | */ | |
2791 | struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, | |
2792 | struct btrfs_path *path) | |
2793 | { | |
2794 | struct inode_fs_paths *ifp; | |
2795 | struct btrfs_data_container *fspath; | |
2796 | ||
2797 | fspath = init_data_container(total_bytes); | |
2798 | if (IS_ERR(fspath)) | |
afc6961f | 2799 | return ERR_CAST(fspath); |
a542ad1b | 2800 | |
f54de068 | 2801 | ifp = kmalloc(sizeof(*ifp), GFP_KERNEL); |
a542ad1b | 2802 | if (!ifp) { |
f54de068 | 2803 | kvfree(fspath); |
a542ad1b JS |
2804 | return ERR_PTR(-ENOMEM); |
2805 | } | |
2806 | ||
2807 | ifp->btrfs_path = path; | |
2808 | ifp->fspath = fspath; | |
2809 | ifp->fs_root = fs_root; | |
2810 | ||
2811 | return ifp; | |
2812 | } | |
2813 | ||
2814 | void free_ipath(struct inode_fs_paths *ipath) | |
2815 | { | |
4735fb28 JJ |
2816 | if (!ipath) |
2817 | return; | |
f54de068 | 2818 | kvfree(ipath->fspath); |
a542ad1b JS |
2819 | kfree(ipath); |
2820 | } | |
a37f232b | 2821 | |
d68194b2 | 2822 | struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info) |
a37f232b QW |
2823 | { |
2824 | struct btrfs_backref_iter *ret; | |
2825 | ||
d68194b2 | 2826 | ret = kzalloc(sizeof(*ret), GFP_NOFS); |
a37f232b QW |
2827 | if (!ret) |
2828 | return NULL; | |
2829 | ||
2830 | ret->path = btrfs_alloc_path(); | |
c15c2ec0 | 2831 | if (!ret->path) { |
a37f232b QW |
2832 | kfree(ret); |
2833 | return NULL; | |
2834 | } | |
2835 | ||
2836 | /* Current backref iterator only supports iteration in commit root */ | |
2837 | ret->path->search_commit_root = 1; | |
2838 | ret->path->skip_locking = 1; | |
2839 | ret->fs_info = fs_info; | |
2840 | ||
2841 | return ret; | |
2842 | } | |
2843 | ||
2aa756ec DS |
2844 | static void btrfs_backref_iter_release(struct btrfs_backref_iter *iter) |
2845 | { | |
2846 | iter->bytenr = 0; | |
2847 | iter->item_ptr = 0; | |
2848 | iter->cur_ptr = 0; | |
2849 | iter->end_ptr = 0; | |
2850 | btrfs_release_path(iter->path); | |
2851 | memset(&iter->cur_key, 0, sizeof(iter->cur_key)); | |
2852 | } | |
2853 | ||
a37f232b QW |
2854 | int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr) |
2855 | { | |
2856 | struct btrfs_fs_info *fs_info = iter->fs_info; | |
29cbcf40 | 2857 | struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr); |
a37f232b QW |
2858 | struct btrfs_path *path = iter->path; |
2859 | struct btrfs_extent_item *ei; | |
2860 | struct btrfs_key key; | |
2861 | int ret; | |
2862 | ||
2863 | key.objectid = bytenr; | |
2864 | key.type = BTRFS_METADATA_ITEM_KEY; | |
2865 | key.offset = (u64)-1; | |
2866 | iter->bytenr = bytenr; | |
2867 | ||
29cbcf40 | 2868 | ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); |
a37f232b QW |
2869 | if (ret < 0) |
2870 | return ret; | |
2871 | if (ret == 0) { | |
11dcc86e DS |
2872 | /* |
2873 | * Key with offset -1 found, there would have to exist an extent | |
2874 | * item with such offset, but this is out of the valid range. | |
2875 | */ | |
a37f232b QW |
2876 | ret = -EUCLEAN; |
2877 | goto release; | |
2878 | } | |
2879 | if (path->slots[0] == 0) { | |
ed50ab0f | 2880 | DEBUG_WARN(); |
a37f232b QW |
2881 | ret = -EUCLEAN; |
2882 | goto release; | |
2883 | } | |
2884 | path->slots[0]--; | |
2885 | ||
2886 | btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); | |
2887 | if ((key.type != BTRFS_EXTENT_ITEM_KEY && | |
2888 | key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) { | |
2889 | ret = -ENOENT; | |
2890 | goto release; | |
2891 | } | |
2892 | memcpy(&iter->cur_key, &key, sizeof(key)); | |
2893 | iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], | |
2894 | path->slots[0]); | |
2895 | iter->end_ptr = (u32)(iter->item_ptr + | |
3212fa14 | 2896 | btrfs_item_size(path->nodes[0], path->slots[0])); |
a37f232b QW |
2897 | ei = btrfs_item_ptr(path->nodes[0], path->slots[0], |
2898 | struct btrfs_extent_item); | |
2899 | ||
2900 | /* | |
2901 | * Only support iteration on tree backref yet. | |
2902 | * | |
2903 | * This is an extra precaution for non skinny-metadata, where | |
2904 | * EXTENT_ITEM is also used for tree blocks, that we can only use | |
2905 | * extent flags to determine if it's a tree block. | |
2906 | */ | |
2907 | if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) { | |
2908 | ret = -ENOTSUPP; | |
2909 | goto release; | |
2910 | } | |
2911 | iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei)); | |
2912 | ||
2913 | /* If there is no inline backref, go search for keyed backref */ | |
2914 | if (iter->cur_ptr >= iter->end_ptr) { | |
29cbcf40 | 2915 | ret = btrfs_next_item(extent_root, path); |
a37f232b QW |
2916 | |
2917 | /* No inline nor keyed ref */ | |
2918 | if (ret > 0) { | |
2919 | ret = -ENOENT; | |
2920 | goto release; | |
2921 | } | |
2922 | if (ret < 0) | |
2923 | goto release; | |
2924 | ||
2925 | btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, | |
2926 | path->slots[0]); | |
2927 | if (iter->cur_key.objectid != bytenr || | |
2928 | (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY && | |
2929 | iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) { | |
2930 | ret = -ENOENT; | |
2931 | goto release; | |
2932 | } | |
2933 | iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], | |
2934 | path->slots[0]); | |
2935 | iter->item_ptr = iter->cur_ptr; | |
3212fa14 | 2936 | iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size( |
a37f232b QW |
2937 | path->nodes[0], path->slots[0])); |
2938 | } | |
2939 | ||
2940 | return 0; | |
2941 | release: | |
2942 | btrfs_backref_iter_release(iter); | |
2943 | return ret; | |
2944 | } | |
c39c2ddc | 2945 | |
2aa756ec DS |
2946 | static bool btrfs_backref_iter_is_inline_ref(struct btrfs_backref_iter *iter) |
2947 | { | |
2948 | if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY || | |
2949 | iter->cur_key.type == BTRFS_METADATA_ITEM_KEY) | |
2950 | return true; | |
2951 | return false; | |
2952 | } | |
2953 | ||
c39c2ddc QW |
2954 | /* |
2955 | * Go to the next backref item of current bytenr, can be either inlined or | |
2956 | * keyed. | |
2957 | * | |
2958 | * Caller needs to check whether it's inline ref or not by iter->cur_key. | |
2959 | * | |
2960 | * Return 0 if we get next backref without problem. | |
2961 | * Return >0 if there is no extra backref for this bytenr. | |
2962 | * Return <0 if there is something wrong happened. | |
2963 | */ | |
2964 | int btrfs_backref_iter_next(struct btrfs_backref_iter *iter) | |
2965 | { | |
ef923440 | 2966 | struct extent_buffer *eb = iter->path->nodes[0]; |
29cbcf40 | 2967 | struct btrfs_root *extent_root; |
c39c2ddc QW |
2968 | struct btrfs_path *path = iter->path; |
2969 | struct btrfs_extent_inline_ref *iref; | |
2970 | int ret; | |
2971 | u32 size; | |
2972 | ||
2973 | if (btrfs_backref_iter_is_inline_ref(iter)) { | |
2974 | /* We're still inside the inline refs */ | |
2975 | ASSERT(iter->cur_ptr < iter->end_ptr); | |
2976 | ||
2977 | if (btrfs_backref_has_tree_block_info(iter)) { | |
2978 | /* First tree block info */ | |
2979 | size = sizeof(struct btrfs_tree_block_info); | |
2980 | } else { | |
2981 | /* Use inline ref type to determine the size */ | |
2982 | int type; | |
2983 | ||
2984 | iref = (struct btrfs_extent_inline_ref *) | |
2985 | ((unsigned long)iter->cur_ptr); | |
2986 | type = btrfs_extent_inline_ref_type(eb, iref); | |
2987 | ||
2988 | size = btrfs_extent_inline_ref_size(type); | |
2989 | } | |
2990 | iter->cur_ptr += size; | |
2991 | if (iter->cur_ptr < iter->end_ptr) | |
2992 | return 0; | |
2993 | ||
2994 | /* All inline items iterated, fall through */ | |
2995 | } | |
2996 | ||
2997 | /* We're at keyed items, there is no inline item, go to the next one */ | |
29cbcf40 JB |
2998 | extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr); |
2999 | ret = btrfs_next_item(extent_root, iter->path); | |
c39c2ddc QW |
3000 | if (ret) |
3001 | return ret; | |
3002 | ||
3003 | btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]); | |
3004 | if (iter->cur_key.objectid != iter->bytenr || | |
3005 | (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY && | |
3006 | iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY)) | |
3007 | return 1; | |
3008 | iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], | |
3009 | path->slots[0]); | |
3010 | iter->cur_ptr = iter->item_ptr; | |
3212fa14 | 3011 | iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0], |
c39c2ddc QW |
3012 | path->slots[0]); |
3013 | return 0; | |
3014 | } | |
584fb121 QW |
3015 | |
3016 | void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info, | |
c71d3c69 | 3017 | struct btrfs_backref_cache *cache, bool is_reloc) |
584fb121 QW |
3018 | { |
3019 | int i; | |
3020 | ||
3021 | cache->rb_root = RB_ROOT; | |
3022 | for (i = 0; i < BTRFS_MAX_LEVEL; i++) | |
3023 | INIT_LIST_HEAD(&cache->pending[i]); | |
584fb121 QW |
3024 | INIT_LIST_HEAD(&cache->pending_edge); |
3025 | INIT_LIST_HEAD(&cache->useless_node); | |
3026 | cache->fs_info = fs_info; | |
3027 | cache->is_reloc = is_reloc; | |
3028 | } | |
b1818dab QW |
3029 | |
3030 | struct btrfs_backref_node *btrfs_backref_alloc_node( | |
3031 | struct btrfs_backref_cache *cache, u64 bytenr, int level) | |
3032 | { | |
3033 | struct btrfs_backref_node *node; | |
3034 | ||
3035 | ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL); | |
3036 | node = kzalloc(sizeof(*node), GFP_NOFS); | |
3037 | if (!node) | |
3038 | return node; | |
3039 | ||
3040 | INIT_LIST_HEAD(&node->list); | |
3041 | INIT_LIST_HEAD(&node->upper); | |
3042 | INIT_LIST_HEAD(&node->lower); | |
3043 | RB_CLEAR_NODE(&node->rb_node); | |
3044 | cache->nr_nodes++; | |
3045 | node->level = level; | |
3046 | node->bytenr = bytenr; | |
3047 | ||
3048 | return node; | |
3049 | } | |
47254d07 | 3050 | |
2aa756ec DS |
3051 | void btrfs_backref_free_node(struct btrfs_backref_cache *cache, |
3052 | struct btrfs_backref_node *node) | |
3053 | { | |
3054 | if (node) { | |
3055 | ASSERT(list_empty(&node->list)); | |
3056 | ASSERT(list_empty(&node->lower)); | |
3057 | ASSERT(node->eb == NULL); | |
3058 | cache->nr_nodes--; | |
3059 | btrfs_put_root(node->root); | |
3060 | kfree(node); | |
3061 | } | |
3062 | } | |
3063 | ||
47254d07 QW |
3064 | struct btrfs_backref_edge *btrfs_backref_alloc_edge( |
3065 | struct btrfs_backref_cache *cache) | |
3066 | { | |
3067 | struct btrfs_backref_edge *edge; | |
3068 | ||
3069 | edge = kzalloc(sizeof(*edge), GFP_NOFS); | |
3070 | if (edge) | |
3071 | cache->nr_edges++; | |
3072 | return edge; | |
3073 | } | |
023acb07 | 3074 | |
2aa756ec DS |
3075 | void btrfs_backref_free_edge(struct btrfs_backref_cache *cache, |
3076 | struct btrfs_backref_edge *edge) | |
3077 | { | |
3078 | if (edge) { | |
3079 | cache->nr_edges--; | |
3080 | kfree(edge); | |
3081 | } | |
3082 | } | |
3083 | ||
3084 | void btrfs_backref_unlock_node_buffer(struct btrfs_backref_node *node) | |
3085 | { | |
3086 | if (node->locked) { | |
3087 | btrfs_tree_unlock(node->eb); | |
3088 | node->locked = 0; | |
3089 | } | |
3090 | } | |
3091 | ||
3092 | void btrfs_backref_drop_node_buffer(struct btrfs_backref_node *node) | |
3093 | { | |
3094 | if (node->eb) { | |
3095 | btrfs_backref_unlock_node_buffer(node); | |
3096 | free_extent_buffer(node->eb); | |
3097 | node->eb = NULL; | |
3098 | } | |
3099 | } | |
3100 | ||
3101 | /* | |
3102 | * Drop the backref node from cache without cleaning up its children | |
3103 | * edges. | |
3104 | * | |
3105 | * This can only be called on node without parent edges. | |
3106 | * The children edges are still kept as is. | |
3107 | */ | |
3108 | void btrfs_backref_drop_node(struct btrfs_backref_cache *tree, | |
3109 | struct btrfs_backref_node *node) | |
3110 | { | |
3111 | ASSERT(list_empty(&node->upper)); | |
3112 | ||
3113 | btrfs_backref_drop_node_buffer(node); | |
3114 | list_del_init(&node->list); | |
3115 | list_del_init(&node->lower); | |
3116 | if (!RB_EMPTY_NODE(&node->rb_node)) | |
3117 | rb_erase(&node->rb_node, &tree->rb_root); | |
3118 | btrfs_backref_free_node(tree, node); | |
3119 | } | |
3120 | ||
023acb07 QW |
3121 | /* |
3122 | * Drop the backref node from cache, also cleaning up all its | |
3123 | * upper edges and any uncached nodes in the path. | |
3124 | * | |
3125 | * This cleanup happens bottom up, thus the node should either | |
3126 | * be the lowest node in the cache or a detached node. | |
3127 | */ | |
3128 | void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache, | |
3129 | struct btrfs_backref_node *node) | |
3130 | { | |
023acb07 QW |
3131 | struct btrfs_backref_edge *edge; |
3132 | ||
3133 | if (!node) | |
3134 | return; | |
3135 | ||
023acb07 | 3136 | while (!list_empty(&node->upper)) { |
2d44a15a DS |
3137 | edge = list_first_entry(&node->upper, struct btrfs_backref_edge, |
3138 | list[LOWER]); | |
023acb07 QW |
3139 | list_del(&edge->list[LOWER]); |
3140 | list_del(&edge->list[UPPER]); | |
3141 | btrfs_backref_free_edge(cache, edge); | |
023acb07 QW |
3142 | } |
3143 | ||
3144 | btrfs_backref_drop_node(cache, node); | |
3145 | } | |
13fe1bdb QW |
3146 | |
3147 | /* | |
3148 | * Release all nodes/edges from current cache | |
3149 | */ | |
3150 | void btrfs_backref_release_cache(struct btrfs_backref_cache *cache) | |
3151 | { | |
3152 | struct btrfs_backref_node *node; | |
13fe1bdb | 3153 | |
29e74a12 JB |
3154 | while ((node = rb_entry_safe(rb_first(&cache->rb_root), |
3155 | struct btrfs_backref_node, rb_node))) | |
13fe1bdb | 3156 | btrfs_backref_cleanup_node(cache, node); |
13fe1bdb | 3157 | |
13fe1bdb QW |
3158 | ASSERT(list_empty(&cache->pending_edge)); |
3159 | ASSERT(list_empty(&cache->useless_node)); | |
13fe1bdb QW |
3160 | ASSERT(!cache->nr_nodes); |
3161 | ASSERT(!cache->nr_edges); | |
3162 | } | |
1b60d2ec | 3163 | |
2aa756ec DS |
3164 | void btrfs_backref_link_edge(struct btrfs_backref_edge *edge, |
3165 | struct btrfs_backref_node *lower, | |
3166 | struct btrfs_backref_node *upper, | |
3167 | int link_which) | |
3168 | { | |
3169 | ASSERT(upper && lower && upper->level == lower->level + 1); | |
3170 | edge->node[LOWER] = lower; | |
3171 | edge->node[UPPER] = upper; | |
3172 | if (link_which & LINK_LOWER) | |
3173 | list_add_tail(&edge->list[LOWER], &lower->upper); | |
3174 | if (link_which & LINK_UPPER) | |
3175 | list_add_tail(&edge->list[UPPER], &upper->lower); | |
3176 | } | |
1b60d2ec QW |
3177 | /* |
3178 | * Handle direct tree backref | |
3179 | * | |
3180 | * Direct tree backref means, the backref item shows its parent bytenr | |
3181 | * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined). | |
3182 | * | |
3183 | * @ref_key: The converted backref key. | |
3184 | * For keyed backref, it's the item key. | |
3185 | * For inlined backref, objectid is the bytenr, | |
3186 | * type is btrfs_inline_ref_type, offset is | |
3187 | * btrfs_inline_ref_offset. | |
3188 | */ | |
3189 | static int handle_direct_tree_backref(struct btrfs_backref_cache *cache, | |
3190 | struct btrfs_key *ref_key, | |
3191 | struct btrfs_backref_node *cur) | |
3192 | { | |
3193 | struct btrfs_backref_edge *edge; | |
3194 | struct btrfs_backref_node *upper; | |
3195 | struct rb_node *rb_node; | |
3196 | ||
3197 | ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY); | |
3198 | ||
3199 | /* Only reloc root uses backref pointing to itself */ | |
3200 | if (ref_key->objectid == ref_key->offset) { | |
3201 | struct btrfs_root *root; | |
3202 | ||
3203 | cur->is_reloc_root = 1; | |
3204 | /* Only reloc backref cache cares about a specific root */ | |
3205 | if (cache->is_reloc) { | |
3206 | root = find_reloc_root(cache->fs_info, cur->bytenr); | |
f78743fb | 3207 | if (!root) |
1b60d2ec QW |
3208 | return -ENOENT; |
3209 | cur->root = root; | |
3210 | } else { | |
3211 | /* | |
3212 | * For generic purpose backref cache, reloc root node | |
3213 | * is useless. | |
3214 | */ | |
3215 | list_add(&cur->list, &cache->useless_node); | |
3216 | } | |
3217 | return 0; | |
3218 | } | |
3219 | ||
3220 | edge = btrfs_backref_alloc_edge(cache); | |
3221 | if (!edge) | |
3222 | return -ENOMEM; | |
3223 | ||
3224 | rb_node = rb_simple_search(&cache->rb_root, ref_key->offset); | |
3225 | if (!rb_node) { | |
3226 | /* Parent node not yet cached */ | |
3227 | upper = btrfs_backref_alloc_node(cache, ref_key->offset, | |
3228 | cur->level + 1); | |
3229 | if (!upper) { | |
3230 | btrfs_backref_free_edge(cache, edge); | |
3231 | return -ENOMEM; | |
3232 | } | |
3233 | ||
3234 | /* | |
3235 | * Backrefs for the upper level block isn't cached, add the | |
3236 | * block to pending list | |
3237 | */ | |
3238 | list_add_tail(&edge->list[UPPER], &cache->pending_edge); | |
3239 | } else { | |
3240 | /* Parent node already cached */ | |
3241 | upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node); | |
3242 | ASSERT(upper->checked); | |
3243 | INIT_LIST_HEAD(&edge->list[UPPER]); | |
3244 | } | |
3245 | btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER); | |
3246 | return 0; | |
3247 | } | |
3248 | ||
3249 | /* | |
3250 | * Handle indirect tree backref | |
3251 | * | |
3252 | * Indirect tree backref means, we only know which tree the node belongs to. | |
3253 | * We still need to do a tree search to find out the parents. This is for | |
3254 | * TREE_BLOCK_REF backref (keyed or inlined). | |
3255 | * | |
eb96e221 | 3256 | * @trans: Transaction handle. |
1b60d2ec QW |
3257 | * @ref_key: The same as @ref_key in handle_direct_tree_backref() |
3258 | * @tree_key: The first key of this tree block. | |
1a9fd417 | 3259 | * @path: A clean (released) path, to avoid allocating path every time |
1b60d2ec QW |
3260 | * the function get called. |
3261 | */ | |
eb96e221 FM |
3262 | static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans, |
3263 | struct btrfs_backref_cache *cache, | |
1b60d2ec QW |
3264 | struct btrfs_path *path, |
3265 | struct btrfs_key *ref_key, | |
3266 | struct btrfs_key *tree_key, | |
3267 | struct btrfs_backref_node *cur) | |
3268 | { | |
3269 | struct btrfs_fs_info *fs_info = cache->fs_info; | |
3270 | struct btrfs_backref_node *upper; | |
3271 | struct btrfs_backref_node *lower; | |
3272 | struct btrfs_backref_edge *edge; | |
3273 | struct extent_buffer *eb; | |
3274 | struct btrfs_root *root; | |
1b60d2ec QW |
3275 | struct rb_node *rb_node; |
3276 | int level; | |
3277 | bool need_check = true; | |
3278 | int ret; | |
3279 | ||
56e9357a | 3280 | root = btrfs_get_fs_root(fs_info, ref_key->offset, false); |
1b60d2ec QW |
3281 | if (IS_ERR(root)) |
3282 | return PTR_ERR(root); | |
4eb8064d JB |
3283 | |
3284 | /* We shouldn't be using backref cache for non-shareable roots. */ | |
3285 | if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) { | |
3286 | btrfs_put_root(root); | |
3287 | return -EUCLEAN; | |
3288 | } | |
1b60d2ec QW |
3289 | |
3290 | if (btrfs_root_level(&root->root_item) == cur->level) { | |
3291 | /* Tree root */ | |
3292 | ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr); | |
876de781 QW |
3293 | /* |
3294 | * For reloc backref cache, we may ignore reloc root. But for | |
3295 | * general purpose backref cache, we can't rely on | |
3296 | * btrfs_should_ignore_reloc_root() as it may conflict with | |
3297 | * current running relocation and lead to missing root. | |
3298 | * | |
3299 | * For general purpose backref cache, reloc root detection is | |
3300 | * completely relying on direct backref (key->offset is parent | |
3301 | * bytenr), thus only do such check for reloc cache. | |
3302 | */ | |
3303 | if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) { | |
1b60d2ec QW |
3304 | btrfs_put_root(root); |
3305 | list_add(&cur->list, &cache->useless_node); | |
3306 | } else { | |
3307 | cur->root = root; | |
3308 | } | |
3309 | return 0; | |
3310 | } | |
3311 | ||
3312 | level = cur->level + 1; | |
3313 | ||
3314 | /* Search the tree to find parent blocks referring to the block */ | |
3315 | path->search_commit_root = 1; | |
3316 | path->skip_locking = 1; | |
3317 | path->lowest_level = level; | |
3318 | ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0); | |
3319 | path->lowest_level = 0; | |
3320 | if (ret < 0) { | |
3321 | btrfs_put_root(root); | |
3322 | return ret; | |
3323 | } | |
3324 | if (ret > 0 && path->slots[level] > 0) | |
3325 | path->slots[level]--; | |
3326 | ||
3327 | eb = path->nodes[level]; | |
3328 | if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) { | |
3329 | btrfs_err(fs_info, | |
3330 | "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)", | |
e094f480 | 3331 | cur->bytenr, level - 1, btrfs_root_id(root), |
1b60d2ec QW |
3332 | tree_key->objectid, tree_key->type, tree_key->offset); |
3333 | btrfs_put_root(root); | |
3334 | ret = -ENOENT; | |
3335 | goto out; | |
3336 | } | |
3337 | lower = cur; | |
3338 | ||
3339 | /* Add all nodes and edges in the path */ | |
3340 | for (; level < BTRFS_MAX_LEVEL; level++) { | |
3341 | if (!path->nodes[level]) { | |
3342 | ASSERT(btrfs_root_bytenr(&root->root_item) == | |
3343 | lower->bytenr); | |
876de781 QW |
3344 | /* Same as previous should_ignore_reloc_root() call */ |
3345 | if (btrfs_should_ignore_reloc_root(root) && | |
3346 | cache->is_reloc) { | |
1b60d2ec QW |
3347 | btrfs_put_root(root); |
3348 | list_add(&lower->list, &cache->useless_node); | |
3349 | } else { | |
3350 | lower->root = root; | |
3351 | } | |
3352 | break; | |
3353 | } | |
3354 | ||
3355 | edge = btrfs_backref_alloc_edge(cache); | |
3356 | if (!edge) { | |
3357 | btrfs_put_root(root); | |
3358 | ret = -ENOMEM; | |
3359 | goto out; | |
3360 | } | |
3361 | ||
3362 | eb = path->nodes[level]; | |
3363 | rb_node = rb_simple_search(&cache->rb_root, eb->start); | |
3364 | if (!rb_node) { | |
3365 | upper = btrfs_backref_alloc_node(cache, eb->start, | |
3366 | lower->level + 1); | |
3367 | if (!upper) { | |
3368 | btrfs_put_root(root); | |
3369 | btrfs_backref_free_edge(cache, edge); | |
3370 | ret = -ENOMEM; | |
3371 | goto out; | |
3372 | } | |
3373 | upper->owner = btrfs_header_owner(eb); | |
4eb8064d JB |
3374 | |
3375 | /* We shouldn't be using backref cache for non shareable roots. */ | |
3376 | if (unlikely(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))) { | |
3377 | btrfs_put_root(root); | |
3378 | btrfs_backref_free_edge(cache, edge); | |
3379 | btrfs_backref_free_node(cache, upper); | |
3380 | ret = -EUCLEAN; | |
3381 | goto out; | |
3382 | } | |
1b60d2ec QW |
3383 | |
3384 | /* | |
3385 | * If we know the block isn't shared we can avoid | |
3386 | * checking its backrefs. | |
3387 | */ | |
eb96e221 | 3388 | if (btrfs_block_can_be_shared(trans, root, eb)) |
1b60d2ec QW |
3389 | upper->checked = 0; |
3390 | else | |
3391 | upper->checked = 1; | |
3392 | ||
3393 | /* | |
3394 | * Add the block to pending list if we need to check its | |
3395 | * backrefs, we only do this once while walking up a | |
3396 | * tree as we will catch anything else later on. | |
3397 | */ | |
3398 | if (!upper->checked && need_check) { | |
3399 | need_check = false; | |
3400 | list_add_tail(&edge->list[UPPER], | |
3401 | &cache->pending_edge); | |
3402 | } else { | |
3403 | if (upper->checked) | |
3404 | need_check = true; | |
3405 | INIT_LIST_HEAD(&edge->list[UPPER]); | |
3406 | } | |
3407 | } else { | |
3408 | upper = rb_entry(rb_node, struct btrfs_backref_node, | |
3409 | rb_node); | |
3410 | ASSERT(upper->checked); | |
3411 | INIT_LIST_HEAD(&edge->list[UPPER]); | |
3412 | if (!upper->owner) | |
3413 | upper->owner = btrfs_header_owner(eb); | |
3414 | } | |
3415 | btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER); | |
3416 | ||
3417 | if (rb_node) { | |
3418 | btrfs_put_root(root); | |
3419 | break; | |
3420 | } | |
3421 | lower = upper; | |
3422 | upper = NULL; | |
3423 | } | |
3424 | out: | |
3425 | btrfs_release_path(path); | |
3426 | return ret; | |
3427 | } | |
3428 | ||
3429 | /* | |
3430 | * Add backref node @cur into @cache. | |
3431 | * | |
3432 | * NOTE: Even if the function returned 0, @cur is not yet cached as its upper | |
3433 | * links aren't yet bi-directional. Needs to finish such links. | |
fc997ed0 | 3434 | * Use btrfs_backref_finish_upper_links() to finish such linkage. |
1b60d2ec | 3435 | * |
eb96e221 | 3436 | * @trans: Transaction handle. |
1b60d2ec QW |
3437 | * @path: Released path for indirect tree backref lookup |
3438 | * @iter: Released backref iter for extent tree search | |
3439 | * @node_key: The first key of the tree block | |
3440 | */ | |
eb96e221 FM |
3441 | int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans, |
3442 | struct btrfs_backref_cache *cache, | |
1b60d2ec QW |
3443 | struct btrfs_path *path, |
3444 | struct btrfs_backref_iter *iter, | |
3445 | struct btrfs_key *node_key, | |
3446 | struct btrfs_backref_node *cur) | |
3447 | { | |
1b60d2ec QW |
3448 | struct btrfs_backref_edge *edge; |
3449 | struct btrfs_backref_node *exist; | |
3450 | int ret; | |
3451 | ||
3452 | ret = btrfs_backref_iter_start(iter, cur->bytenr); | |
3453 | if (ret < 0) | |
3454 | return ret; | |
3455 | /* | |
3456 | * We skip the first btrfs_tree_block_info, as we don't use the key | |
3457 | * stored in it, but fetch it from the tree block | |
3458 | */ | |
3459 | if (btrfs_backref_has_tree_block_info(iter)) { | |
3460 | ret = btrfs_backref_iter_next(iter); | |
3461 | if (ret < 0) | |
3462 | goto out; | |
3463 | /* No extra backref? This means the tree block is corrupted */ | |
3464 | if (ret > 0) { | |
3465 | ret = -EUCLEAN; | |
3466 | goto out; | |
3467 | } | |
3468 | } | |
3469 | WARN_ON(cur->checked); | |
3470 | if (!list_empty(&cur->upper)) { | |
3471 | /* | |
3472 | * The backref was added previously when processing backref of | |
3473 | * type BTRFS_TREE_BLOCK_REF_KEY | |
3474 | */ | |
3475 | ASSERT(list_is_singular(&cur->upper)); | |
2d44a15a DS |
3476 | edge = list_first_entry(&cur->upper, struct btrfs_backref_edge, |
3477 | list[LOWER]); | |
1b60d2ec QW |
3478 | ASSERT(list_empty(&edge->list[UPPER])); |
3479 | exist = edge->node[UPPER]; | |
3480 | /* | |
3481 | * Add the upper level block to pending list if we need check | |
3482 | * its backrefs | |
3483 | */ | |
3484 | if (!exist->checked) | |
3485 | list_add_tail(&edge->list[UPPER], &cache->pending_edge); | |
3486 | } else { | |
3487 | exist = NULL; | |
3488 | } | |
3489 | ||
3490 | for (; ret == 0; ret = btrfs_backref_iter_next(iter)) { | |
3491 | struct extent_buffer *eb; | |
3492 | struct btrfs_key key; | |
3493 | int type; | |
3494 | ||
3495 | cond_resched(); | |
ef923440 | 3496 | eb = iter->path->nodes[0]; |
1b60d2ec QW |
3497 | |
3498 | key.objectid = iter->bytenr; | |
3499 | if (btrfs_backref_iter_is_inline_ref(iter)) { | |
3500 | struct btrfs_extent_inline_ref *iref; | |
3501 | ||
3502 | /* Update key for inline backref */ | |
3503 | iref = (struct btrfs_extent_inline_ref *) | |
3504 | ((unsigned long)iter->cur_ptr); | |
3505 | type = btrfs_get_extent_inline_ref_type(eb, iref, | |
3506 | BTRFS_REF_TYPE_BLOCK); | |
3507 | if (type == BTRFS_REF_TYPE_INVALID) { | |
3508 | ret = -EUCLEAN; | |
3509 | goto out; | |
3510 | } | |
3511 | key.type = type; | |
3512 | key.offset = btrfs_extent_inline_ref_offset(eb, iref); | |
3513 | } else { | |
3514 | key.type = iter->cur_key.type; | |
3515 | key.offset = iter->cur_key.offset; | |
3516 | } | |
3517 | ||
3518 | /* | |
3519 | * Parent node found and matches current inline ref, no need to | |
3520 | * rebuild this node for this inline ref | |
3521 | */ | |
3522 | if (exist && | |
3523 | ((key.type == BTRFS_TREE_BLOCK_REF_KEY && | |
3524 | exist->owner == key.offset) || | |
3525 | (key.type == BTRFS_SHARED_BLOCK_REF_KEY && | |
3526 | exist->bytenr == key.offset))) { | |
3527 | exist = NULL; | |
3528 | continue; | |
3529 | } | |
3530 | ||
3531 | /* SHARED_BLOCK_REF means key.offset is the parent bytenr */ | |
3532 | if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { | |
3533 | ret = handle_direct_tree_backref(cache, &key, cur); | |
3534 | if (ret < 0) | |
3535 | goto out; | |
182741d2 QW |
3536 | } else if (key.type == BTRFS_TREE_BLOCK_REF_KEY) { |
3537 | /* | |
3538 | * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref | |
3539 | * offset means the root objectid. We need to search | |
3540 | * the tree to get its parent bytenr. | |
3541 | */ | |
eb96e221 FM |
3542 | ret = handle_indirect_tree_backref(trans, cache, path, |
3543 | &key, node_key, cur); | |
182741d2 QW |
3544 | if (ret < 0) |
3545 | goto out; | |
1b60d2ec | 3546 | } |
1b60d2ec | 3547 | /* |
182741d2 QW |
3548 | * Unrecognized tree backref items (if it can pass tree-checker) |
3549 | * would be ignored. | |
1b60d2ec | 3550 | */ |
1b60d2ec QW |
3551 | } |
3552 | ret = 0; | |
3553 | cur->checked = 1; | |
3554 | WARN_ON(exist); | |
3555 | out: | |
3556 | btrfs_backref_iter_release(iter); | |
3557 | return ret; | |
3558 | } | |
fc997ed0 QW |
3559 | |
3560 | /* | |
3561 | * Finish the upwards linkage created by btrfs_backref_add_tree_node() | |
3562 | */ | |
3563 | int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, | |
3564 | struct btrfs_backref_node *start) | |
3565 | { | |
3566 | struct list_head *useless_node = &cache->useless_node; | |
3567 | struct btrfs_backref_edge *edge; | |
3568 | struct rb_node *rb_node; | |
3569 | LIST_HEAD(pending_edge); | |
3570 | ||
3571 | ASSERT(start->checked); | |
3572 | ||
4eb8064d JB |
3573 | rb_node = rb_simple_insert(&cache->rb_root, start->bytenr, &start->rb_node); |
3574 | if (rb_node) | |
3575 | btrfs_backref_panic(cache->fs_info, start->bytenr, -EEXIST); | |
fc997ed0 QW |
3576 | |
3577 | /* | |
3578 | * Use breadth first search to iterate all related edges. | |
3579 | * | |
3580 | * The starting points are all the edges of this node | |
3581 | */ | |
3582 | list_for_each_entry(edge, &start->upper, list[LOWER]) | |
3583 | list_add_tail(&edge->list[UPPER], &pending_edge); | |
3584 | ||
3585 | while (!list_empty(&pending_edge)) { | |
3586 | struct btrfs_backref_node *upper; | |
3587 | struct btrfs_backref_node *lower; | |
fc997ed0 QW |
3588 | |
3589 | edge = list_first_entry(&pending_edge, | |
3590 | struct btrfs_backref_edge, list[UPPER]); | |
3591 | list_del_init(&edge->list[UPPER]); | |
3592 | upper = edge->node[UPPER]; | |
3593 | lower = edge->node[LOWER]; | |
3594 | ||
3595 | /* Parent is detached, no need to keep any edges */ | |
3596 | if (upper->detached) { | |
3597 | list_del(&edge->list[LOWER]); | |
3598 | btrfs_backref_free_edge(cache, edge); | |
3599 | ||
3600 | /* Lower node is orphan, queue for cleanup */ | |
3601 | if (list_empty(&lower->upper)) | |
3602 | list_add(&lower->list, useless_node); | |
3603 | continue; | |
3604 | } | |
3605 | ||
3606 | /* | |
3607 | * All new nodes added in current build_backref_tree() haven't | |
3608 | * been linked to the cache rb tree. | |
3609 | * So if we have upper->rb_node populated, this means a cache | |
3610 | * hit. We only need to link the edge, as @upper and all its | |
3611 | * parents have already been linked. | |
3612 | */ | |
3613 | if (!RB_EMPTY_NODE(&upper->rb_node)) { | |
fc997ed0 QW |
3614 | list_add_tail(&edge->list[UPPER], &upper->lower); |
3615 | continue; | |
3616 | } | |
3617 | ||
3618 | /* Sanity check, we shouldn't have any unchecked nodes */ | |
3619 | if (!upper->checked) { | |
9e0a739a | 3620 | DEBUG_WARN("we should not have any unchecked nodes"); |
fc997ed0 QW |
3621 | return -EUCLEAN; |
3622 | } | |
3623 | ||
4eb8064d JB |
3624 | rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr, |
3625 | &upper->rb_node); | |
3626 | if (unlikely(rb_node)) { | |
3627 | btrfs_backref_panic(cache->fs_info, upper->bytenr, -EEXIST); | |
fc997ed0 QW |
3628 | return -EUCLEAN; |
3629 | } | |
3630 | ||
fc997ed0 QW |
3631 | list_add_tail(&edge->list[UPPER], &upper->lower); |
3632 | ||
3633 | /* | |
3634 | * Also queue all the parent edges of this uncached node | |
3635 | * to finish the upper linkage | |
3636 | */ | |
3637 | list_for_each_entry(edge, &upper->upper, list[LOWER]) | |
3638 | list_add_tail(&edge->list[UPPER], &pending_edge); | |
3639 | } | |
3640 | return 0; | |
3641 | } | |
1b23ea18 QW |
3642 | |
3643 | void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache, | |
3644 | struct btrfs_backref_node *node) | |
3645 | { | |
3646 | struct btrfs_backref_node *lower; | |
3647 | struct btrfs_backref_node *upper; | |
3648 | struct btrfs_backref_edge *edge; | |
3649 | ||
3650 | while (!list_empty(&cache->useless_node)) { | |
3651 | lower = list_first_entry(&cache->useless_node, | |
3652 | struct btrfs_backref_node, list); | |
3653 | list_del_init(&lower->list); | |
3654 | } | |
3655 | while (!list_empty(&cache->pending_edge)) { | |
3656 | edge = list_first_entry(&cache->pending_edge, | |
3657 | struct btrfs_backref_edge, list[UPPER]); | |
3658 | list_del(&edge->list[UPPER]); | |
3659 | list_del(&edge->list[LOWER]); | |
3660 | lower = edge->node[LOWER]; | |
3661 | upper = edge->node[UPPER]; | |
3662 | btrfs_backref_free_edge(cache, edge); | |
3663 | ||
3664 | /* | |
3665 | * Lower is no longer linked to any upper backref nodes and | |
3666 | * isn't in the cache, we can free it ourselves. | |
3667 | */ | |
3668 | if (list_empty(&lower->upper) && | |
3669 | RB_EMPTY_NODE(&lower->rb_node)) | |
3670 | list_add(&lower->list, &cache->useless_node); | |
3671 | ||
3672 | if (!RB_EMPTY_NODE(&upper->rb_node)) | |
3673 | continue; | |
3674 | ||
3675 | /* Add this guy's upper edges to the list to process */ | |
3676 | list_for_each_entry(edge, &upper->upper, list[LOWER]) | |
3677 | list_add_tail(&edge->list[UPPER], | |
3678 | &cache->pending_edge); | |
3679 | if (list_empty(&upper->upper)) | |
3680 | list_add(&upper->list, &cache->useless_node); | |
3681 | } | |
3682 | ||
3683 | while (!list_empty(&cache->useless_node)) { | |
3684 | lower = list_first_entry(&cache->useless_node, | |
3685 | struct btrfs_backref_node, list); | |
3686 | list_del_init(&lower->list); | |
3687 | if (lower == node) | |
3688 | node = NULL; | |
49ecc679 | 3689 | btrfs_backref_drop_node(cache, lower); |
1b23ea18 QW |
3690 | } |
3691 | ||
3692 | btrfs_backref_cleanup_node(cache, node); | |
3693 | ASSERT(list_empty(&cache->useless_node) && | |
3694 | list_empty(&cache->pending_edge)); | |
3695 | } |