Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a542ad1b JS |
2 | /* |
3 | * Copyright (C) 2011 STRATO. All rights reserved. | |
a542ad1b JS |
4 | */ |
5 | ||
f54de068 | 6 | #include <linux/mm.h> |
afce772e | 7 | #include <linux/rbtree.h> |
00142756 | 8 | #include <trace/events/btrfs.h> |
a542ad1b JS |
9 | #include "ctree.h" |
10 | #include "disk-io.h" | |
11 | #include "backref.h" | |
8da6d581 JS |
12 | #include "ulist.h" |
13 | #include "transaction.h" | |
14 | #include "delayed-ref.h" | |
b916a59a | 15 | #include "locking.h" |
1b60d2ec | 16 | #include "misc.h" |
a542ad1b | 17 | |
dc046b10 JB |
18 | /* Just an arbitrary number so we can be sure this happened */ |
19 | #define BACKREF_FOUND_SHARED 6 | |
20 | ||
976b1908 JS |
21 | struct extent_inode_elem { |
22 | u64 inum; | |
23 | u64 offset; | |
24 | struct extent_inode_elem *next; | |
25 | }; | |
26 | ||
73980bec JM |
27 | static int check_extent_in_eb(const struct btrfs_key *key, |
28 | const struct extent_buffer *eb, | |
29 | const struct btrfs_file_extent_item *fi, | |
30 | u64 extent_item_pos, | |
c995ab3c ZB |
31 | struct extent_inode_elem **eie, |
32 | bool ignore_offset) | |
976b1908 | 33 | { |
8ca15e05 | 34 | u64 offset = 0; |
976b1908 JS |
35 | struct extent_inode_elem *e; |
36 | ||
c995ab3c ZB |
37 | if (!ignore_offset && |
38 | !btrfs_file_extent_compression(eb, fi) && | |
8ca15e05 JB |
39 | !btrfs_file_extent_encryption(eb, fi) && |
40 | !btrfs_file_extent_other_encoding(eb, fi)) { | |
41 | u64 data_offset; | |
42 | u64 data_len; | |
976b1908 | 43 | |
8ca15e05 JB |
44 | data_offset = btrfs_file_extent_offset(eb, fi); |
45 | data_len = btrfs_file_extent_num_bytes(eb, fi); | |
46 | ||
47 | if (extent_item_pos < data_offset || | |
48 | extent_item_pos >= data_offset + data_len) | |
49 | return 1; | |
50 | offset = extent_item_pos - data_offset; | |
51 | } | |
976b1908 JS |
52 | |
53 | e = kmalloc(sizeof(*e), GFP_NOFS); | |
54 | if (!e) | |
55 | return -ENOMEM; | |
56 | ||
57 | e->next = *eie; | |
58 | e->inum = key->objectid; | |
8ca15e05 | 59 | e->offset = key->offset + offset; |
976b1908 JS |
60 | *eie = e; |
61 | ||
62 | return 0; | |
63 | } | |
64 | ||
f05c4746 WS |
65 | static void free_inode_elem_list(struct extent_inode_elem *eie) |
66 | { | |
67 | struct extent_inode_elem *eie_next; | |
68 | ||
69 | for (; eie; eie = eie_next) { | |
70 | eie_next = eie->next; | |
71 | kfree(eie); | |
72 | } | |
73 | } | |
74 | ||
73980bec JM |
75 | static int find_extent_in_eb(const struct extent_buffer *eb, |
76 | u64 wanted_disk_byte, u64 extent_item_pos, | |
c995ab3c ZB |
77 | struct extent_inode_elem **eie, |
78 | bool ignore_offset) | |
976b1908 JS |
79 | { |
80 | u64 disk_byte; | |
81 | struct btrfs_key key; | |
82 | struct btrfs_file_extent_item *fi; | |
83 | int slot; | |
84 | int nritems; | |
85 | int extent_type; | |
86 | int ret; | |
87 | ||
88 | /* | |
89 | * from the shared data ref, we only have the leaf but we need | |
90 | * the key. thus, we must look into all items and see that we | |
91 | * find one (some) with a reference to our extent item. | |
92 | */ | |
93 | nritems = btrfs_header_nritems(eb); | |
94 | for (slot = 0; slot < nritems; ++slot) { | |
95 | btrfs_item_key_to_cpu(eb, &key, slot); | |
96 | if (key.type != BTRFS_EXTENT_DATA_KEY) | |
97 | continue; | |
98 | fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); | |
99 | extent_type = btrfs_file_extent_type(eb, fi); | |
100 | if (extent_type == BTRFS_FILE_EXTENT_INLINE) | |
101 | continue; | |
102 | /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */ | |
103 | disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); | |
104 | if (disk_byte != wanted_disk_byte) | |
105 | continue; | |
106 | ||
c995ab3c | 107 | ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset); |
976b1908 JS |
108 | if (ret < 0) |
109 | return ret; | |
110 | } | |
111 | ||
112 | return 0; | |
113 | } | |
114 | ||
86d5f994 | 115 | struct preftree { |
ecf160b4 | 116 | struct rb_root_cached root; |
6c336b21 | 117 | unsigned int count; |
86d5f994 EN |
118 | }; |
119 | ||
ecf160b4 | 120 | #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 } |
86d5f994 EN |
121 | |
122 | struct preftrees { | |
123 | struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */ | |
124 | struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */ | |
125 | struct preftree indirect_missing_keys; | |
126 | }; | |
127 | ||
3ec4d323 EN |
128 | /* |
129 | * Checks for a shared extent during backref search. | |
130 | * | |
131 | * The share_count tracks prelim_refs (direct and indirect) having a | |
132 | * ref->count >0: | |
133 | * - incremented when a ref->count transitions to >0 | |
134 | * - decremented when a ref->count transitions to <1 | |
135 | */ | |
136 | struct share_check { | |
137 | u64 root_objectid; | |
138 | u64 inum; | |
139 | int share_count; | |
140 | }; | |
141 | ||
142 | static inline int extent_is_shared(struct share_check *sc) | |
143 | { | |
144 | return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0; | |
145 | } | |
146 | ||
b9e9a6cb WS |
147 | static struct kmem_cache *btrfs_prelim_ref_cache; |
148 | ||
149 | int __init btrfs_prelim_ref_init(void) | |
150 | { | |
151 | btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref", | |
e0c476b1 | 152 | sizeof(struct prelim_ref), |
b9e9a6cb | 153 | 0, |
fba4b697 | 154 | SLAB_MEM_SPREAD, |
b9e9a6cb WS |
155 | NULL); |
156 | if (!btrfs_prelim_ref_cache) | |
157 | return -ENOMEM; | |
158 | return 0; | |
159 | } | |
160 | ||
e67c718b | 161 | void __cold btrfs_prelim_ref_exit(void) |
b9e9a6cb | 162 | { |
5598e900 | 163 | kmem_cache_destroy(btrfs_prelim_ref_cache); |
b9e9a6cb WS |
164 | } |
165 | ||
86d5f994 EN |
166 | static void free_pref(struct prelim_ref *ref) |
167 | { | |
168 | kmem_cache_free(btrfs_prelim_ref_cache, ref); | |
169 | } | |
170 | ||
171 | /* | |
172 | * Return 0 when both refs are for the same block (and can be merged). | |
173 | * A -1 return indicates ref1 is a 'lower' block than ref2, while 1 | |
174 | * indicates a 'higher' block. | |
175 | */ | |
176 | static int prelim_ref_compare(struct prelim_ref *ref1, | |
177 | struct prelim_ref *ref2) | |
178 | { | |
179 | if (ref1->level < ref2->level) | |
180 | return -1; | |
181 | if (ref1->level > ref2->level) | |
182 | return 1; | |
183 | if (ref1->root_id < ref2->root_id) | |
184 | return -1; | |
185 | if (ref1->root_id > ref2->root_id) | |
186 | return 1; | |
187 | if (ref1->key_for_search.type < ref2->key_for_search.type) | |
188 | return -1; | |
189 | if (ref1->key_for_search.type > ref2->key_for_search.type) | |
190 | return 1; | |
191 | if (ref1->key_for_search.objectid < ref2->key_for_search.objectid) | |
192 | return -1; | |
193 | if (ref1->key_for_search.objectid > ref2->key_for_search.objectid) | |
194 | return 1; | |
195 | if (ref1->key_for_search.offset < ref2->key_for_search.offset) | |
196 | return -1; | |
197 | if (ref1->key_for_search.offset > ref2->key_for_search.offset) | |
198 | return 1; | |
199 | if (ref1->parent < ref2->parent) | |
200 | return -1; | |
201 | if (ref1->parent > ref2->parent) | |
202 | return 1; | |
203 | ||
204 | return 0; | |
205 | } | |
206 | ||
ccc8dc75 CIK |
207 | static void update_share_count(struct share_check *sc, int oldcount, |
208 | int newcount) | |
3ec4d323 EN |
209 | { |
210 | if ((!sc) || (oldcount == 0 && newcount < 1)) | |
211 | return; | |
212 | ||
213 | if (oldcount > 0 && newcount < 1) | |
214 | sc->share_count--; | |
215 | else if (oldcount < 1 && newcount > 0) | |
216 | sc->share_count++; | |
217 | } | |
218 | ||
86d5f994 EN |
219 | /* |
220 | * Add @newref to the @root rbtree, merging identical refs. | |
221 | * | |
3ec4d323 | 222 | * Callers should assume that newref has been freed after calling. |
86d5f994 | 223 | */ |
00142756 JM |
224 | static void prelim_ref_insert(const struct btrfs_fs_info *fs_info, |
225 | struct preftree *preftree, | |
3ec4d323 EN |
226 | struct prelim_ref *newref, |
227 | struct share_check *sc) | |
86d5f994 | 228 | { |
ecf160b4 | 229 | struct rb_root_cached *root; |
86d5f994 EN |
230 | struct rb_node **p; |
231 | struct rb_node *parent = NULL; | |
232 | struct prelim_ref *ref; | |
233 | int result; | |
ecf160b4 | 234 | bool leftmost = true; |
86d5f994 EN |
235 | |
236 | root = &preftree->root; | |
ecf160b4 | 237 | p = &root->rb_root.rb_node; |
86d5f994 EN |
238 | |
239 | while (*p) { | |
240 | parent = *p; | |
241 | ref = rb_entry(parent, struct prelim_ref, rbnode); | |
242 | result = prelim_ref_compare(ref, newref); | |
243 | if (result < 0) { | |
244 | p = &(*p)->rb_left; | |
245 | } else if (result > 0) { | |
246 | p = &(*p)->rb_right; | |
ecf160b4 | 247 | leftmost = false; |
86d5f994 EN |
248 | } else { |
249 | /* Identical refs, merge them and free @newref */ | |
250 | struct extent_inode_elem *eie = ref->inode_list; | |
251 | ||
252 | while (eie && eie->next) | |
253 | eie = eie->next; | |
254 | ||
255 | if (!eie) | |
256 | ref->inode_list = newref->inode_list; | |
257 | else | |
258 | eie->next = newref->inode_list; | |
00142756 JM |
259 | trace_btrfs_prelim_ref_merge(fs_info, ref, newref, |
260 | preftree->count); | |
3ec4d323 EN |
261 | /* |
262 | * A delayed ref can have newref->count < 0. | |
263 | * The ref->count is updated to follow any | |
264 | * BTRFS_[ADD|DROP]_DELAYED_REF actions. | |
265 | */ | |
266 | update_share_count(sc, ref->count, | |
267 | ref->count + newref->count); | |
86d5f994 EN |
268 | ref->count += newref->count; |
269 | free_pref(newref); | |
270 | return; | |
271 | } | |
272 | } | |
273 | ||
3ec4d323 | 274 | update_share_count(sc, 0, newref->count); |
6c336b21 | 275 | preftree->count++; |
00142756 | 276 | trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count); |
86d5f994 | 277 | rb_link_node(&newref->rbnode, parent, p); |
ecf160b4 | 278 | rb_insert_color_cached(&newref->rbnode, root, leftmost); |
86d5f994 EN |
279 | } |
280 | ||
281 | /* | |
282 | * Release the entire tree. We don't care about internal consistency so | |
283 | * just free everything and then reset the tree root. | |
284 | */ | |
285 | static void prelim_release(struct preftree *preftree) | |
286 | { | |
287 | struct prelim_ref *ref, *next_ref; | |
288 | ||
ecf160b4 LB |
289 | rbtree_postorder_for_each_entry_safe(ref, next_ref, |
290 | &preftree->root.rb_root, rbnode) | |
86d5f994 EN |
291 | free_pref(ref); |
292 | ||
ecf160b4 | 293 | preftree->root = RB_ROOT_CACHED; |
6c336b21 | 294 | preftree->count = 0; |
86d5f994 EN |
295 | } |
296 | ||
d5c88b73 JS |
297 | /* |
298 | * the rules for all callers of this function are: | |
299 | * - obtaining the parent is the goal | |
300 | * - if you add a key, you must know that it is a correct key | |
301 | * - if you cannot add the parent or a correct key, then we will look into the | |
302 | * block later to set a correct key | |
303 | * | |
304 | * delayed refs | |
305 | * ============ | |
306 | * backref type | shared | indirect | shared | indirect | |
307 | * information | tree | tree | data | data | |
308 | * --------------------+--------+----------+--------+---------- | |
309 | * parent logical | y | - | - | - | |
310 | * key to resolve | - | y | y | y | |
311 | * tree block logical | - | - | - | - | |
312 | * root for resolving | y | y | y | y | |
313 | * | |
314 | * - column 1: we've the parent -> done | |
315 | * - column 2, 3, 4: we use the key to find the parent | |
316 | * | |
317 | * on disk refs (inline or keyed) | |
318 | * ============================== | |
319 | * backref type | shared | indirect | shared | indirect | |
320 | * information | tree | tree | data | data | |
321 | * --------------------+--------+----------+--------+---------- | |
322 | * parent logical | y | - | y | - | |
323 | * key to resolve | - | - | - | y | |
324 | * tree block logical | y | y | y | y | |
325 | * root for resolving | - | y | y | y | |
326 | * | |
327 | * - column 1, 3: we've the parent -> done | |
328 | * - column 2: we take the first key from the block to find the parent | |
e0c476b1 | 329 | * (see add_missing_keys) |
d5c88b73 JS |
330 | * - column 4: we use the key to find the parent |
331 | * | |
332 | * additional information that's available but not required to find the parent | |
333 | * block might help in merging entries to gain some speed. | |
334 | */ | |
00142756 JM |
335 | static int add_prelim_ref(const struct btrfs_fs_info *fs_info, |
336 | struct preftree *preftree, u64 root_id, | |
e0c476b1 | 337 | const struct btrfs_key *key, int level, u64 parent, |
3ec4d323 EN |
338 | u64 wanted_disk_byte, int count, |
339 | struct share_check *sc, gfp_t gfp_mask) | |
8da6d581 | 340 | { |
e0c476b1 | 341 | struct prelim_ref *ref; |
8da6d581 | 342 | |
48ec4736 LB |
343 | if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID) |
344 | return 0; | |
345 | ||
b9e9a6cb | 346 | ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask); |
8da6d581 JS |
347 | if (!ref) |
348 | return -ENOMEM; | |
349 | ||
350 | ref->root_id = root_id; | |
7ac8b88e | 351 | if (key) |
d5c88b73 | 352 | ref->key_for_search = *key; |
7ac8b88e | 353 | else |
d5c88b73 | 354 | memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); |
8da6d581 | 355 | |
3301958b | 356 | ref->inode_list = NULL; |
8da6d581 JS |
357 | ref->level = level; |
358 | ref->count = count; | |
359 | ref->parent = parent; | |
360 | ref->wanted_disk_byte = wanted_disk_byte; | |
3ec4d323 EN |
361 | prelim_ref_insert(fs_info, preftree, ref, sc); |
362 | return extent_is_shared(sc); | |
8da6d581 JS |
363 | } |
364 | ||
86d5f994 | 365 | /* direct refs use root == 0, key == NULL */ |
00142756 JM |
366 | static int add_direct_ref(const struct btrfs_fs_info *fs_info, |
367 | struct preftrees *preftrees, int level, u64 parent, | |
3ec4d323 EN |
368 | u64 wanted_disk_byte, int count, |
369 | struct share_check *sc, gfp_t gfp_mask) | |
86d5f994 | 370 | { |
00142756 | 371 | return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level, |
3ec4d323 | 372 | parent, wanted_disk_byte, count, sc, gfp_mask); |
86d5f994 EN |
373 | } |
374 | ||
375 | /* indirect refs use parent == 0 */ | |
00142756 JM |
376 | static int add_indirect_ref(const struct btrfs_fs_info *fs_info, |
377 | struct preftrees *preftrees, u64 root_id, | |
86d5f994 | 378 | const struct btrfs_key *key, int level, |
3ec4d323 EN |
379 | u64 wanted_disk_byte, int count, |
380 | struct share_check *sc, gfp_t gfp_mask) | |
86d5f994 EN |
381 | { |
382 | struct preftree *tree = &preftrees->indirect; | |
383 | ||
384 | if (!key) | |
385 | tree = &preftrees->indirect_missing_keys; | |
00142756 | 386 | return add_prelim_ref(fs_info, tree, root_id, key, level, 0, |
3ec4d323 | 387 | wanted_disk_byte, count, sc, gfp_mask); |
86d5f994 EN |
388 | } |
389 | ||
ed58f2e6 | 390 | static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr) |
391 | { | |
392 | struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; | |
393 | struct rb_node *parent = NULL; | |
394 | struct prelim_ref *ref = NULL; | |
9c6c723f | 395 | struct prelim_ref target = {}; |
ed58f2e6 | 396 | int result; |
397 | ||
398 | target.parent = bytenr; | |
399 | ||
400 | while (*p) { | |
401 | parent = *p; | |
402 | ref = rb_entry(parent, struct prelim_ref, rbnode); | |
403 | result = prelim_ref_compare(ref, &target); | |
404 | ||
405 | if (result < 0) | |
406 | p = &(*p)->rb_left; | |
407 | else if (result > 0) | |
408 | p = &(*p)->rb_right; | |
409 | else | |
410 | return 1; | |
411 | } | |
412 | return 0; | |
413 | } | |
414 | ||
8da6d581 | 415 | static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, |
ed58f2e6 | 416 | struct ulist *parents, |
417 | struct preftrees *preftrees, struct prelim_ref *ref, | |
44853868 | 418 | int level, u64 time_seq, const u64 *extent_item_pos, |
b25b0b87 | 419 | bool ignore_offset) |
8da6d581 | 420 | { |
69bca40d AB |
421 | int ret = 0; |
422 | int slot; | |
423 | struct extent_buffer *eb; | |
424 | struct btrfs_key key; | |
7ef81ac8 | 425 | struct btrfs_key *key_for_search = &ref->key_for_search; |
8da6d581 | 426 | struct btrfs_file_extent_item *fi; |
ed8c4913 | 427 | struct extent_inode_elem *eie = NULL, *old = NULL; |
8da6d581 | 428 | u64 disk_byte; |
7ef81ac8 JB |
429 | u64 wanted_disk_byte = ref->wanted_disk_byte; |
430 | u64 count = 0; | |
7ac8b88e | 431 | u64 data_offset; |
8da6d581 | 432 | |
69bca40d AB |
433 | if (level != 0) { |
434 | eb = path->nodes[level]; | |
435 | ret = ulist_add(parents, eb->start, 0, GFP_NOFS); | |
3301958b JS |
436 | if (ret < 0) |
437 | return ret; | |
8da6d581 | 438 | return 0; |
69bca40d | 439 | } |
8da6d581 JS |
440 | |
441 | /* | |
ed58f2e6 | 442 | * 1. We normally enter this function with the path already pointing to |
443 | * the first item to check. But sometimes, we may enter it with | |
444 | * slot == nritems. | |
445 | * 2. We are searching for normal backref but bytenr of this leaf | |
446 | * matches shared data backref | |
cfc0eed0 | 447 | * 3. The leaf owner is not equal to the root we are searching |
448 | * | |
ed58f2e6 | 449 | * For these cases, go to the next leaf before we continue. |
8da6d581 | 450 | */ |
ed58f2e6 | 451 | eb = path->nodes[0]; |
452 | if (path->slots[0] >= btrfs_header_nritems(eb) || | |
cfc0eed0 | 453 | is_shared_data_backref(preftrees, eb->start) || |
454 | ref->root_id != btrfs_header_owner(eb)) { | |
de47c9d3 | 455 | if (time_seq == SEQ_LAST) |
21633fc6 QW |
456 | ret = btrfs_next_leaf(root, path); |
457 | else | |
458 | ret = btrfs_next_old_leaf(root, path, time_seq); | |
459 | } | |
8da6d581 | 460 | |
b25b0b87 | 461 | while (!ret && count < ref->count) { |
8da6d581 | 462 | eb = path->nodes[0]; |
69bca40d AB |
463 | slot = path->slots[0]; |
464 | ||
465 | btrfs_item_key_to_cpu(eb, &key, slot); | |
466 | ||
467 | if (key.objectid != key_for_search->objectid || | |
468 | key.type != BTRFS_EXTENT_DATA_KEY) | |
469 | break; | |
470 | ||
ed58f2e6 | 471 | /* |
472 | * We are searching for normal backref but bytenr of this leaf | |
cfc0eed0 | 473 | * matches shared data backref, OR |
474 | * the leaf owner is not equal to the root we are searching for | |
ed58f2e6 | 475 | */ |
cfc0eed0 | 476 | if (slot == 0 && |
477 | (is_shared_data_backref(preftrees, eb->start) || | |
478 | ref->root_id != btrfs_header_owner(eb))) { | |
ed58f2e6 | 479 | if (time_seq == SEQ_LAST) |
480 | ret = btrfs_next_leaf(root, path); | |
481 | else | |
482 | ret = btrfs_next_old_leaf(root, path, time_seq); | |
483 | continue; | |
484 | } | |
69bca40d AB |
485 | fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); |
486 | disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); | |
7ac8b88e | 487 | data_offset = btrfs_file_extent_offset(eb, fi); |
69bca40d AB |
488 | |
489 | if (disk_byte == wanted_disk_byte) { | |
490 | eie = NULL; | |
ed8c4913 | 491 | old = NULL; |
7ac8b88e | 492 | if (ref->key_for_search.offset == key.offset - data_offset) |
493 | count++; | |
494 | else | |
495 | goto next; | |
69bca40d AB |
496 | if (extent_item_pos) { |
497 | ret = check_extent_in_eb(&key, eb, fi, | |
498 | *extent_item_pos, | |
c995ab3c | 499 | &eie, ignore_offset); |
69bca40d AB |
500 | if (ret < 0) |
501 | break; | |
502 | } | |
ed8c4913 JB |
503 | if (ret > 0) |
504 | goto next; | |
4eb1f66d TI |
505 | ret = ulist_add_merge_ptr(parents, eb->start, |
506 | eie, (void **)&old, GFP_NOFS); | |
ed8c4913 JB |
507 | if (ret < 0) |
508 | break; | |
509 | if (!ret && extent_item_pos) { | |
510 | while (old->next) | |
511 | old = old->next; | |
512 | old->next = eie; | |
69bca40d | 513 | } |
f05c4746 | 514 | eie = NULL; |
8da6d581 | 515 | } |
ed8c4913 | 516 | next: |
de47c9d3 | 517 | if (time_seq == SEQ_LAST) |
21633fc6 QW |
518 | ret = btrfs_next_item(root, path); |
519 | else | |
520 | ret = btrfs_next_old_item(root, path, time_seq); | |
8da6d581 JS |
521 | } |
522 | ||
69bca40d AB |
523 | if (ret > 0) |
524 | ret = 0; | |
f05c4746 WS |
525 | else if (ret < 0) |
526 | free_inode_elem_list(eie); | |
69bca40d | 527 | return ret; |
8da6d581 JS |
528 | } |
529 | ||
530 | /* | |
531 | * resolve an indirect backref in the form (root_id, key, level) | |
532 | * to a logical address | |
533 | */ | |
e0c476b1 JM |
534 | static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, |
535 | struct btrfs_path *path, u64 time_seq, | |
ed58f2e6 | 536 | struct preftrees *preftrees, |
e0c476b1 | 537 | struct prelim_ref *ref, struct ulist *parents, |
b25b0b87 | 538 | const u64 *extent_item_pos, bool ignore_offset) |
8da6d581 | 539 | { |
8da6d581 | 540 | struct btrfs_root *root; |
8da6d581 JS |
541 | struct extent_buffer *eb; |
542 | int ret = 0; | |
543 | int root_level; | |
544 | int level = ref->level; | |
7ac8b88e | 545 | struct btrfs_key search_key = ref->key_for_search; |
8da6d581 | 546 | |
56e9357a | 547 | root = btrfs_get_fs_root(fs_info, ref->root_id, false); |
8da6d581 JS |
548 | if (IS_ERR(root)) { |
549 | ret = PTR_ERR(root); | |
9326f76f JB |
550 | goto out_free; |
551 | } | |
552 | ||
39dba873 JB |
553 | if (!path->search_commit_root && |
554 | test_bit(BTRFS_ROOT_DELETING, &root->state)) { | |
555 | ret = -ENOENT; | |
556 | goto out; | |
557 | } | |
558 | ||
f5ee5c9a | 559 | if (btrfs_is_testing(fs_info)) { |
d9ee522b JB |
560 | ret = -ENOENT; |
561 | goto out; | |
562 | } | |
563 | ||
9e351cc8 JB |
564 | if (path->search_commit_root) |
565 | root_level = btrfs_header_level(root->commit_root); | |
de47c9d3 | 566 | else if (time_seq == SEQ_LAST) |
21633fc6 | 567 | root_level = btrfs_header_level(root->node); |
9e351cc8 JB |
568 | else |
569 | root_level = btrfs_old_root_level(root, time_seq); | |
8da6d581 | 570 | |
c75e8394 | 571 | if (root_level + 1 == level) |
8da6d581 JS |
572 | goto out; |
573 | ||
7ac8b88e | 574 | /* |
575 | * We can often find data backrefs with an offset that is too large | |
576 | * (>= LLONG_MAX, maximum allowed file offset) due to underflows when | |
577 | * subtracting a file's offset with the data offset of its | |
578 | * corresponding extent data item. This can happen for example in the | |
579 | * clone ioctl. | |
580 | * | |
581 | * So if we detect such case we set the search key's offset to zero to | |
582 | * make sure we will find the matching file extent item at | |
583 | * add_all_parents(), otherwise we will miss it because the offset | |
584 | * taken form the backref is much larger then the offset of the file | |
585 | * extent item. This can make us scan a very large number of file | |
586 | * extent items, but at least it will not make us miss any. | |
587 | * | |
588 | * This is an ugly workaround for a behaviour that should have never | |
589 | * existed, but it does and a fix for the clone ioctl would touch a lot | |
590 | * of places, cause backwards incompatibility and would not fix the | |
591 | * problem for extents cloned with older kernels. | |
592 | */ | |
593 | if (search_key.type == BTRFS_EXTENT_DATA_KEY && | |
594 | search_key.offset >= LLONG_MAX) | |
595 | search_key.offset = 0; | |
8da6d581 | 596 | path->lowest_level = level; |
de47c9d3 | 597 | if (time_seq == SEQ_LAST) |
7ac8b88e | 598 | ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); |
21633fc6 | 599 | else |
7ac8b88e | 600 | ret = btrfs_search_old_slot(root, &search_key, path, time_seq); |
538f72cd | 601 | |
ab8d0fc4 JM |
602 | btrfs_debug(fs_info, |
603 | "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)", | |
c1c9ff7c GU |
604 | ref->root_id, level, ref->count, ret, |
605 | ref->key_for_search.objectid, ref->key_for_search.type, | |
606 | ref->key_for_search.offset); | |
8da6d581 JS |
607 | if (ret < 0) |
608 | goto out; | |
609 | ||
610 | eb = path->nodes[level]; | |
9345457f | 611 | while (!eb) { |
fae7f21c | 612 | if (WARN_ON(!level)) { |
9345457f JS |
613 | ret = 1; |
614 | goto out; | |
615 | } | |
616 | level--; | |
617 | eb = path->nodes[level]; | |
8da6d581 JS |
618 | } |
619 | ||
ed58f2e6 | 620 | ret = add_all_parents(root, path, parents, preftrees, ref, level, |
b25b0b87 | 621 | time_seq, extent_item_pos, ignore_offset); |
8da6d581 | 622 | out: |
00246528 | 623 | btrfs_put_root(root); |
9326f76f | 624 | out_free: |
da61d31a JB |
625 | path->lowest_level = 0; |
626 | btrfs_release_path(path); | |
8da6d581 JS |
627 | return ret; |
628 | } | |
629 | ||
4dae077a JM |
630 | static struct extent_inode_elem * |
631 | unode_aux_to_inode_list(struct ulist_node *node) | |
632 | { | |
633 | if (!node) | |
634 | return NULL; | |
635 | return (struct extent_inode_elem *)(uintptr_t)node->aux; | |
636 | } | |
637 | ||
8da6d581 | 638 | /* |
52042d8e | 639 | * We maintain three separate rbtrees: one for direct refs, one for |
86d5f994 EN |
640 | * indirect refs which have a key, and one for indirect refs which do not |
641 | * have a key. Each tree does merge on insertion. | |
642 | * | |
643 | * Once all of the references are located, we iterate over the tree of | |
644 | * indirect refs with missing keys. An appropriate key is located and | |
645 | * the ref is moved onto the tree for indirect refs. After all missing | |
646 | * keys are thus located, we iterate over the indirect ref tree, resolve | |
647 | * each reference, and then insert the resolved reference onto the | |
648 | * direct tree (merging there too). | |
649 | * | |
650 | * New backrefs (i.e., for parent nodes) are added to the appropriate | |
651 | * rbtree as they are encountered. The new backrefs are subsequently | |
652 | * resolved as above. | |
8da6d581 | 653 | */ |
e0c476b1 JM |
654 | static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, |
655 | struct btrfs_path *path, u64 time_seq, | |
86d5f994 | 656 | struct preftrees *preftrees, |
b25b0b87 | 657 | const u64 *extent_item_pos, |
c995ab3c | 658 | struct share_check *sc, bool ignore_offset) |
8da6d581 JS |
659 | { |
660 | int err; | |
661 | int ret = 0; | |
8da6d581 JS |
662 | struct ulist *parents; |
663 | struct ulist_node *node; | |
cd1b413c | 664 | struct ulist_iterator uiter; |
86d5f994 | 665 | struct rb_node *rnode; |
8da6d581 JS |
666 | |
667 | parents = ulist_alloc(GFP_NOFS); | |
668 | if (!parents) | |
669 | return -ENOMEM; | |
670 | ||
671 | /* | |
86d5f994 EN |
672 | * We could trade memory usage for performance here by iterating |
673 | * the tree, allocating new refs for each insertion, and then | |
674 | * freeing the entire indirect tree when we're done. In some test | |
675 | * cases, the tree can grow quite large (~200k objects). | |
8da6d581 | 676 | */ |
ecf160b4 | 677 | while ((rnode = rb_first_cached(&preftrees->indirect.root))) { |
86d5f994 EN |
678 | struct prelim_ref *ref; |
679 | ||
680 | ref = rb_entry(rnode, struct prelim_ref, rbnode); | |
681 | if (WARN(ref->parent, | |
682 | "BUG: direct ref found in indirect tree")) { | |
683 | ret = -EINVAL; | |
684 | goto out; | |
685 | } | |
686 | ||
ecf160b4 | 687 | rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); |
6c336b21 | 688 | preftrees->indirect.count--; |
86d5f994 EN |
689 | |
690 | if (ref->count == 0) { | |
691 | free_pref(ref); | |
8da6d581 | 692 | continue; |
86d5f994 EN |
693 | } |
694 | ||
3ec4d323 EN |
695 | if (sc && sc->root_objectid && |
696 | ref->root_id != sc->root_objectid) { | |
86d5f994 | 697 | free_pref(ref); |
dc046b10 JB |
698 | ret = BACKREF_FOUND_SHARED; |
699 | goto out; | |
700 | } | |
ed58f2e6 | 701 | err = resolve_indirect_ref(fs_info, path, time_seq, preftrees, |
702 | ref, parents, extent_item_pos, | |
b25b0b87 | 703 | ignore_offset); |
95def2ed WS |
704 | /* |
705 | * we can only tolerate ENOENT,otherwise,we should catch error | |
706 | * and return directly. | |
707 | */ | |
708 | if (err == -ENOENT) { | |
3ec4d323 EN |
709 | prelim_ref_insert(fs_info, &preftrees->direct, ref, |
710 | NULL); | |
8da6d581 | 711 | continue; |
95def2ed | 712 | } else if (err) { |
86d5f994 | 713 | free_pref(ref); |
95def2ed WS |
714 | ret = err; |
715 | goto out; | |
716 | } | |
8da6d581 JS |
717 | |
718 | /* we put the first parent into the ref at hand */ | |
cd1b413c JS |
719 | ULIST_ITER_INIT(&uiter); |
720 | node = ulist_next(parents, &uiter); | |
8da6d581 | 721 | ref->parent = node ? node->val : 0; |
4dae077a | 722 | ref->inode_list = unode_aux_to_inode_list(node); |
8da6d581 | 723 | |
86d5f994 | 724 | /* Add a prelim_ref(s) for any other parent(s). */ |
cd1b413c | 725 | while ((node = ulist_next(parents, &uiter))) { |
86d5f994 EN |
726 | struct prelim_ref *new_ref; |
727 | ||
b9e9a6cb WS |
728 | new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache, |
729 | GFP_NOFS); | |
8da6d581 | 730 | if (!new_ref) { |
86d5f994 | 731 | free_pref(ref); |
8da6d581 | 732 | ret = -ENOMEM; |
e36902d4 | 733 | goto out; |
8da6d581 JS |
734 | } |
735 | memcpy(new_ref, ref, sizeof(*ref)); | |
736 | new_ref->parent = node->val; | |
4dae077a | 737 | new_ref->inode_list = unode_aux_to_inode_list(node); |
3ec4d323 EN |
738 | prelim_ref_insert(fs_info, &preftrees->direct, |
739 | new_ref, NULL); | |
8da6d581 | 740 | } |
86d5f994 | 741 | |
3ec4d323 | 742 | /* |
52042d8e | 743 | * Now it's a direct ref, put it in the direct tree. We must |
3ec4d323 EN |
744 | * do this last because the ref could be merged/freed here. |
745 | */ | |
746 | prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL); | |
86d5f994 | 747 | |
8da6d581 | 748 | ulist_reinit(parents); |
9dd14fd6 | 749 | cond_resched(); |
8da6d581 | 750 | } |
e36902d4 | 751 | out: |
8da6d581 JS |
752 | ulist_free(parents); |
753 | return ret; | |
754 | } | |
755 | ||
d5c88b73 JS |
756 | /* |
757 | * read tree blocks and add keys where required. | |
758 | */ | |
e0c476b1 | 759 | static int add_missing_keys(struct btrfs_fs_info *fs_info, |
38e3eebf | 760 | struct preftrees *preftrees, bool lock) |
d5c88b73 | 761 | { |
e0c476b1 | 762 | struct prelim_ref *ref; |
d5c88b73 | 763 | struct extent_buffer *eb; |
86d5f994 EN |
764 | struct preftree *tree = &preftrees->indirect_missing_keys; |
765 | struct rb_node *node; | |
d5c88b73 | 766 | |
ecf160b4 | 767 | while ((node = rb_first_cached(&tree->root))) { |
86d5f994 | 768 | ref = rb_entry(node, struct prelim_ref, rbnode); |
ecf160b4 | 769 | rb_erase_cached(node, &tree->root); |
86d5f994 EN |
770 | |
771 | BUG_ON(ref->parent); /* should not be a direct ref */ | |
772 | BUG_ON(ref->key_for_search.type); | |
d5c88b73 | 773 | BUG_ON(!ref->wanted_disk_byte); |
86d5f994 | 774 | |
581c1760 QW |
775 | eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0, |
776 | ref->level - 1, NULL); | |
64c043de | 777 | if (IS_ERR(eb)) { |
86d5f994 | 778 | free_pref(ref); |
64c043de LB |
779 | return PTR_ERR(eb); |
780 | } else if (!extent_buffer_uptodate(eb)) { | |
86d5f994 | 781 | free_pref(ref); |
416bc658 JB |
782 | free_extent_buffer(eb); |
783 | return -EIO; | |
784 | } | |
38e3eebf JB |
785 | if (lock) |
786 | btrfs_tree_read_lock(eb); | |
d5c88b73 JS |
787 | if (btrfs_header_level(eb) == 0) |
788 | btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); | |
789 | else | |
790 | btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); | |
38e3eebf JB |
791 | if (lock) |
792 | btrfs_tree_read_unlock(eb); | |
d5c88b73 | 793 | free_extent_buffer(eb); |
3ec4d323 | 794 | prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); |
9dd14fd6 | 795 | cond_resched(); |
d5c88b73 JS |
796 | } |
797 | return 0; | |
798 | } | |
799 | ||
8da6d581 JS |
800 | /* |
801 | * add all currently queued delayed refs from this head whose seq nr is | |
802 | * smaller or equal that seq to the list | |
803 | */ | |
00142756 JM |
804 | static int add_delayed_refs(const struct btrfs_fs_info *fs_info, |
805 | struct btrfs_delayed_ref_head *head, u64 seq, | |
b25b0b87 | 806 | struct preftrees *preftrees, struct share_check *sc) |
8da6d581 | 807 | { |
c6fc2454 | 808 | struct btrfs_delayed_ref_node *node; |
8da6d581 | 809 | struct btrfs_delayed_extent_op *extent_op = head->extent_op; |
d5c88b73 | 810 | struct btrfs_key key; |
86d5f994 | 811 | struct btrfs_key tmp_op_key; |
0e0adbcf | 812 | struct rb_node *n; |
01747e92 | 813 | int count; |
b1375d64 | 814 | int ret = 0; |
8da6d581 | 815 | |
a6dbceaf | 816 | if (extent_op && extent_op->update_key) |
86d5f994 | 817 | btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key); |
8da6d581 | 818 | |
d7df2c79 | 819 | spin_lock(&head->lock); |
e3d03965 | 820 | for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { |
0e0adbcf JB |
821 | node = rb_entry(n, struct btrfs_delayed_ref_node, |
822 | ref_node); | |
8da6d581 JS |
823 | if (node->seq > seq) |
824 | continue; | |
825 | ||
826 | switch (node->action) { | |
827 | case BTRFS_ADD_DELAYED_EXTENT: | |
828 | case BTRFS_UPDATE_DELAYED_HEAD: | |
829 | WARN_ON(1); | |
830 | continue; | |
831 | case BTRFS_ADD_DELAYED_REF: | |
01747e92 | 832 | count = node->ref_mod; |
8da6d581 JS |
833 | break; |
834 | case BTRFS_DROP_DELAYED_REF: | |
01747e92 | 835 | count = node->ref_mod * -1; |
8da6d581 JS |
836 | break; |
837 | default: | |
290342f6 | 838 | BUG(); |
8da6d581 JS |
839 | } |
840 | switch (node->type) { | |
841 | case BTRFS_TREE_BLOCK_REF_KEY: { | |
86d5f994 | 842 | /* NORMAL INDIRECT METADATA backref */ |
8da6d581 JS |
843 | struct btrfs_delayed_tree_ref *ref; |
844 | ||
845 | ref = btrfs_delayed_node_to_tree_ref(node); | |
00142756 JM |
846 | ret = add_indirect_ref(fs_info, preftrees, ref->root, |
847 | &tmp_op_key, ref->level + 1, | |
01747e92 EN |
848 | node->bytenr, count, sc, |
849 | GFP_ATOMIC); | |
8da6d581 JS |
850 | break; |
851 | } | |
852 | case BTRFS_SHARED_BLOCK_REF_KEY: { | |
86d5f994 | 853 | /* SHARED DIRECT METADATA backref */ |
8da6d581 JS |
854 | struct btrfs_delayed_tree_ref *ref; |
855 | ||
856 | ref = btrfs_delayed_node_to_tree_ref(node); | |
86d5f994 | 857 | |
01747e92 EN |
858 | ret = add_direct_ref(fs_info, preftrees, ref->level + 1, |
859 | ref->parent, node->bytenr, count, | |
3ec4d323 | 860 | sc, GFP_ATOMIC); |
8da6d581 JS |
861 | break; |
862 | } | |
863 | case BTRFS_EXTENT_DATA_REF_KEY: { | |
86d5f994 | 864 | /* NORMAL INDIRECT DATA backref */ |
8da6d581 | 865 | struct btrfs_delayed_data_ref *ref; |
8da6d581 JS |
866 | ref = btrfs_delayed_node_to_data_ref(node); |
867 | ||
868 | key.objectid = ref->objectid; | |
869 | key.type = BTRFS_EXTENT_DATA_KEY; | |
870 | key.offset = ref->offset; | |
dc046b10 JB |
871 | |
872 | /* | |
873 | * Found a inum that doesn't match our known inum, we | |
874 | * know it's shared. | |
875 | */ | |
3ec4d323 | 876 | if (sc && sc->inum && ref->objectid != sc->inum) { |
dc046b10 | 877 | ret = BACKREF_FOUND_SHARED; |
3ec4d323 | 878 | goto out; |
dc046b10 JB |
879 | } |
880 | ||
00142756 | 881 | ret = add_indirect_ref(fs_info, preftrees, ref->root, |
01747e92 EN |
882 | &key, 0, node->bytenr, count, sc, |
883 | GFP_ATOMIC); | |
8da6d581 JS |
884 | break; |
885 | } | |
886 | case BTRFS_SHARED_DATA_REF_KEY: { | |
86d5f994 | 887 | /* SHARED DIRECT FULL backref */ |
8da6d581 | 888 | struct btrfs_delayed_data_ref *ref; |
8da6d581 JS |
889 | |
890 | ref = btrfs_delayed_node_to_data_ref(node); | |
86d5f994 | 891 | |
01747e92 EN |
892 | ret = add_direct_ref(fs_info, preftrees, 0, ref->parent, |
893 | node->bytenr, count, sc, | |
894 | GFP_ATOMIC); | |
8da6d581 JS |
895 | break; |
896 | } | |
897 | default: | |
898 | WARN_ON(1); | |
899 | } | |
3ec4d323 EN |
900 | /* |
901 | * We must ignore BACKREF_FOUND_SHARED until all delayed | |
902 | * refs have been checked. | |
903 | */ | |
904 | if (ret && (ret != BACKREF_FOUND_SHARED)) | |
d7df2c79 | 905 | break; |
8da6d581 | 906 | } |
3ec4d323 EN |
907 | if (!ret) |
908 | ret = extent_is_shared(sc); | |
909 | out: | |
d7df2c79 JB |
910 | spin_unlock(&head->lock); |
911 | return ret; | |
8da6d581 JS |
912 | } |
913 | ||
914 | /* | |
915 | * add all inline backrefs for bytenr to the list | |
3ec4d323 EN |
916 | * |
917 | * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED. | |
8da6d581 | 918 | */ |
00142756 JM |
919 | static int add_inline_refs(const struct btrfs_fs_info *fs_info, |
920 | struct btrfs_path *path, u64 bytenr, | |
86d5f994 | 921 | int *info_level, struct preftrees *preftrees, |
b25b0b87 | 922 | struct share_check *sc) |
8da6d581 | 923 | { |
b1375d64 | 924 | int ret = 0; |
8da6d581 JS |
925 | int slot; |
926 | struct extent_buffer *leaf; | |
927 | struct btrfs_key key; | |
261c84b6 | 928 | struct btrfs_key found_key; |
8da6d581 JS |
929 | unsigned long ptr; |
930 | unsigned long end; | |
931 | struct btrfs_extent_item *ei; | |
932 | u64 flags; | |
933 | u64 item_size; | |
934 | ||
935 | /* | |
936 | * enumerate all inline refs | |
937 | */ | |
938 | leaf = path->nodes[0]; | |
dadcaf78 | 939 | slot = path->slots[0]; |
8da6d581 JS |
940 | |
941 | item_size = btrfs_item_size_nr(leaf, slot); | |
942 | BUG_ON(item_size < sizeof(*ei)); | |
943 | ||
944 | ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); | |
945 | flags = btrfs_extent_flags(leaf, ei); | |
261c84b6 | 946 | btrfs_item_key_to_cpu(leaf, &found_key, slot); |
8da6d581 JS |
947 | |
948 | ptr = (unsigned long)(ei + 1); | |
949 | end = (unsigned long)ei + item_size; | |
950 | ||
261c84b6 JB |
951 | if (found_key.type == BTRFS_EXTENT_ITEM_KEY && |
952 | flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | |
8da6d581 | 953 | struct btrfs_tree_block_info *info; |
8da6d581 JS |
954 | |
955 | info = (struct btrfs_tree_block_info *)ptr; | |
956 | *info_level = btrfs_tree_block_level(leaf, info); | |
8da6d581 JS |
957 | ptr += sizeof(struct btrfs_tree_block_info); |
958 | BUG_ON(ptr > end); | |
261c84b6 JB |
959 | } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) { |
960 | *info_level = found_key.offset; | |
8da6d581 JS |
961 | } else { |
962 | BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); | |
963 | } | |
964 | ||
965 | while (ptr < end) { | |
966 | struct btrfs_extent_inline_ref *iref; | |
967 | u64 offset; | |
968 | int type; | |
969 | ||
970 | iref = (struct btrfs_extent_inline_ref *)ptr; | |
3de28d57 LB |
971 | type = btrfs_get_extent_inline_ref_type(leaf, iref, |
972 | BTRFS_REF_TYPE_ANY); | |
973 | if (type == BTRFS_REF_TYPE_INVALID) | |
af431dcb | 974 | return -EUCLEAN; |
3de28d57 | 975 | |
8da6d581 JS |
976 | offset = btrfs_extent_inline_ref_offset(leaf, iref); |
977 | ||
978 | switch (type) { | |
979 | case BTRFS_SHARED_BLOCK_REF_KEY: | |
00142756 JM |
980 | ret = add_direct_ref(fs_info, preftrees, |
981 | *info_level + 1, offset, | |
3ec4d323 | 982 | bytenr, 1, NULL, GFP_NOFS); |
8da6d581 JS |
983 | break; |
984 | case BTRFS_SHARED_DATA_REF_KEY: { | |
985 | struct btrfs_shared_data_ref *sdref; | |
986 | int count; | |
987 | ||
988 | sdref = (struct btrfs_shared_data_ref *)(iref + 1); | |
989 | count = btrfs_shared_data_ref_count(leaf, sdref); | |
86d5f994 | 990 | |
00142756 | 991 | ret = add_direct_ref(fs_info, preftrees, 0, offset, |
3ec4d323 | 992 | bytenr, count, sc, GFP_NOFS); |
8da6d581 JS |
993 | break; |
994 | } | |
995 | case BTRFS_TREE_BLOCK_REF_KEY: | |
00142756 JM |
996 | ret = add_indirect_ref(fs_info, preftrees, offset, |
997 | NULL, *info_level + 1, | |
3ec4d323 | 998 | bytenr, 1, NULL, GFP_NOFS); |
8da6d581 JS |
999 | break; |
1000 | case BTRFS_EXTENT_DATA_REF_KEY: { | |
1001 | struct btrfs_extent_data_ref *dref; | |
1002 | int count; | |
1003 | u64 root; | |
1004 | ||
1005 | dref = (struct btrfs_extent_data_ref *)(&iref->offset); | |
1006 | count = btrfs_extent_data_ref_count(leaf, dref); | |
1007 | key.objectid = btrfs_extent_data_ref_objectid(leaf, | |
1008 | dref); | |
1009 | key.type = BTRFS_EXTENT_DATA_KEY; | |
1010 | key.offset = btrfs_extent_data_ref_offset(leaf, dref); | |
dc046b10 | 1011 | |
3ec4d323 | 1012 | if (sc && sc->inum && key.objectid != sc->inum) { |
dc046b10 JB |
1013 | ret = BACKREF_FOUND_SHARED; |
1014 | break; | |
1015 | } | |
1016 | ||
8da6d581 | 1017 | root = btrfs_extent_data_ref_root(leaf, dref); |
86d5f994 | 1018 | |
00142756 JM |
1019 | ret = add_indirect_ref(fs_info, preftrees, root, |
1020 | &key, 0, bytenr, count, | |
3ec4d323 | 1021 | sc, GFP_NOFS); |
8da6d581 JS |
1022 | break; |
1023 | } | |
1024 | default: | |
1025 | WARN_ON(1); | |
1026 | } | |
1149ab6b WS |
1027 | if (ret) |
1028 | return ret; | |
8da6d581 JS |
1029 | ptr += btrfs_extent_inline_ref_size(type); |
1030 | } | |
1031 | ||
1032 | return 0; | |
1033 | } | |
1034 | ||
1035 | /* | |
1036 | * add all non-inline backrefs for bytenr to the list | |
3ec4d323 EN |
1037 | * |
1038 | * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED. | |
8da6d581 | 1039 | */ |
e0c476b1 JM |
1040 | static int add_keyed_refs(struct btrfs_fs_info *fs_info, |
1041 | struct btrfs_path *path, u64 bytenr, | |
86d5f994 | 1042 | int info_level, struct preftrees *preftrees, |
3ec4d323 | 1043 | struct share_check *sc) |
8da6d581 JS |
1044 | { |
1045 | struct btrfs_root *extent_root = fs_info->extent_root; | |
1046 | int ret; | |
1047 | int slot; | |
1048 | struct extent_buffer *leaf; | |
1049 | struct btrfs_key key; | |
1050 | ||
1051 | while (1) { | |
1052 | ret = btrfs_next_item(extent_root, path); | |
1053 | if (ret < 0) | |
1054 | break; | |
1055 | if (ret) { | |
1056 | ret = 0; | |
1057 | break; | |
1058 | } | |
1059 | ||
1060 | slot = path->slots[0]; | |
1061 | leaf = path->nodes[0]; | |
1062 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
1063 | ||
1064 | if (key.objectid != bytenr) | |
1065 | break; | |
1066 | if (key.type < BTRFS_TREE_BLOCK_REF_KEY) | |
1067 | continue; | |
1068 | if (key.type > BTRFS_SHARED_DATA_REF_KEY) | |
1069 | break; | |
1070 | ||
1071 | switch (key.type) { | |
1072 | case BTRFS_SHARED_BLOCK_REF_KEY: | |
86d5f994 | 1073 | /* SHARED DIRECT METADATA backref */ |
00142756 JM |
1074 | ret = add_direct_ref(fs_info, preftrees, |
1075 | info_level + 1, key.offset, | |
3ec4d323 | 1076 | bytenr, 1, NULL, GFP_NOFS); |
8da6d581 JS |
1077 | break; |
1078 | case BTRFS_SHARED_DATA_REF_KEY: { | |
86d5f994 | 1079 | /* SHARED DIRECT FULL backref */ |
8da6d581 JS |
1080 | struct btrfs_shared_data_ref *sdref; |
1081 | int count; | |
1082 | ||
1083 | sdref = btrfs_item_ptr(leaf, slot, | |
1084 | struct btrfs_shared_data_ref); | |
1085 | count = btrfs_shared_data_ref_count(leaf, sdref); | |
00142756 JM |
1086 | ret = add_direct_ref(fs_info, preftrees, 0, |
1087 | key.offset, bytenr, count, | |
3ec4d323 | 1088 | sc, GFP_NOFS); |
8da6d581 JS |
1089 | break; |
1090 | } | |
1091 | case BTRFS_TREE_BLOCK_REF_KEY: | |
86d5f994 | 1092 | /* NORMAL INDIRECT METADATA backref */ |
00142756 JM |
1093 | ret = add_indirect_ref(fs_info, preftrees, key.offset, |
1094 | NULL, info_level + 1, bytenr, | |
3ec4d323 | 1095 | 1, NULL, GFP_NOFS); |
8da6d581 JS |
1096 | break; |
1097 | case BTRFS_EXTENT_DATA_REF_KEY: { | |
86d5f994 | 1098 | /* NORMAL INDIRECT DATA backref */ |
8da6d581 JS |
1099 | struct btrfs_extent_data_ref *dref; |
1100 | int count; | |
1101 | u64 root; | |
1102 | ||
1103 | dref = btrfs_item_ptr(leaf, slot, | |
1104 | struct btrfs_extent_data_ref); | |
1105 | count = btrfs_extent_data_ref_count(leaf, dref); | |
1106 | key.objectid = btrfs_extent_data_ref_objectid(leaf, | |
1107 | dref); | |
1108 | key.type = BTRFS_EXTENT_DATA_KEY; | |
1109 | key.offset = btrfs_extent_data_ref_offset(leaf, dref); | |
dc046b10 | 1110 | |
3ec4d323 | 1111 | if (sc && sc->inum && key.objectid != sc->inum) { |
dc046b10 JB |
1112 | ret = BACKREF_FOUND_SHARED; |
1113 | break; | |
1114 | } | |
1115 | ||
8da6d581 | 1116 | root = btrfs_extent_data_ref_root(leaf, dref); |
00142756 JM |
1117 | ret = add_indirect_ref(fs_info, preftrees, root, |
1118 | &key, 0, bytenr, count, | |
3ec4d323 | 1119 | sc, GFP_NOFS); |
8da6d581 JS |
1120 | break; |
1121 | } | |
1122 | default: | |
1123 | WARN_ON(1); | |
1124 | } | |
1149ab6b WS |
1125 | if (ret) |
1126 | return ret; | |
1127 | ||
8da6d581 JS |
1128 | } |
1129 | ||
1130 | return ret; | |
1131 | } | |
1132 | ||
1133 | /* | |
1134 | * this adds all existing backrefs (inline backrefs, backrefs and delayed | |
1135 | * refs) for the given bytenr to the refs list, merges duplicates and resolves | |
1136 | * indirect refs to their parent bytenr. | |
1137 | * When roots are found, they're added to the roots list | |
1138 | * | |
de47c9d3 | 1139 | * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave |
21633fc6 QW |
1140 | * much like trans == NULL case, the difference only lies in it will not |
1141 | * commit root. | |
1142 | * The special case is for qgroup to search roots in commit_transaction(). | |
1143 | * | |
3ec4d323 EN |
1144 | * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a |
1145 | * shared extent is detected. | |
1146 | * | |
1147 | * Otherwise this returns 0 for success and <0 for an error. | |
1148 | * | |
c995ab3c ZB |
1149 | * If ignore_offset is set to false, only extent refs whose offsets match |
1150 | * extent_item_pos are returned. If true, every extent ref is returned | |
1151 | * and extent_item_pos is ignored. | |
1152 | * | |
8da6d581 JS |
1153 | * FIXME some caching might speed things up |
1154 | */ | |
1155 | static int find_parent_nodes(struct btrfs_trans_handle *trans, | |
1156 | struct btrfs_fs_info *fs_info, u64 bytenr, | |
097b8a7c | 1157 | u64 time_seq, struct ulist *refs, |
dc046b10 | 1158 | struct ulist *roots, const u64 *extent_item_pos, |
c995ab3c | 1159 | struct share_check *sc, bool ignore_offset) |
8da6d581 JS |
1160 | { |
1161 | struct btrfs_key key; | |
1162 | struct btrfs_path *path; | |
8da6d581 | 1163 | struct btrfs_delayed_ref_root *delayed_refs = NULL; |
d3b01064 | 1164 | struct btrfs_delayed_ref_head *head; |
8da6d581 JS |
1165 | int info_level = 0; |
1166 | int ret; | |
e0c476b1 | 1167 | struct prelim_ref *ref; |
86d5f994 | 1168 | struct rb_node *node; |
f05c4746 | 1169 | struct extent_inode_elem *eie = NULL; |
86d5f994 EN |
1170 | struct preftrees preftrees = { |
1171 | .direct = PREFTREE_INIT, | |
1172 | .indirect = PREFTREE_INIT, | |
1173 | .indirect_missing_keys = PREFTREE_INIT | |
1174 | }; | |
8da6d581 JS |
1175 | |
1176 | key.objectid = bytenr; | |
8da6d581 | 1177 | key.offset = (u64)-1; |
261c84b6 JB |
1178 | if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) |
1179 | key.type = BTRFS_METADATA_ITEM_KEY; | |
1180 | else | |
1181 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
8da6d581 JS |
1182 | |
1183 | path = btrfs_alloc_path(); | |
1184 | if (!path) | |
1185 | return -ENOMEM; | |
e84752d4 | 1186 | if (!trans) { |
da61d31a | 1187 | path->search_commit_root = 1; |
e84752d4 WS |
1188 | path->skip_locking = 1; |
1189 | } | |
8da6d581 | 1190 | |
de47c9d3 | 1191 | if (time_seq == SEQ_LAST) |
21633fc6 QW |
1192 | path->skip_locking = 1; |
1193 | ||
8da6d581 JS |
1194 | /* |
1195 | * grab both a lock on the path and a lock on the delayed ref head. | |
1196 | * We need both to get a consistent picture of how the refs look | |
1197 | * at a specified point in time | |
1198 | */ | |
1199 | again: | |
d3b01064 LZ |
1200 | head = NULL; |
1201 | ||
8da6d581 JS |
1202 | ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); |
1203 | if (ret < 0) | |
1204 | goto out; | |
1205 | BUG_ON(ret == 0); | |
1206 | ||
faa2dbf0 | 1207 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
21633fc6 | 1208 | if (trans && likely(trans->type != __TRANS_DUMMY) && |
de47c9d3 | 1209 | time_seq != SEQ_LAST) { |
faa2dbf0 | 1210 | #else |
de47c9d3 | 1211 | if (trans && time_seq != SEQ_LAST) { |
faa2dbf0 | 1212 | #endif |
7a3ae2f8 JS |
1213 | /* |
1214 | * look if there are updates for this ref queued and lock the | |
1215 | * head | |
1216 | */ | |
1217 | delayed_refs = &trans->transaction->delayed_refs; | |
1218 | spin_lock(&delayed_refs->lock); | |
f72ad18e | 1219 | head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); |
7a3ae2f8 JS |
1220 | if (head) { |
1221 | if (!mutex_trylock(&head->mutex)) { | |
d278850e | 1222 | refcount_inc(&head->refs); |
7a3ae2f8 JS |
1223 | spin_unlock(&delayed_refs->lock); |
1224 | ||
1225 | btrfs_release_path(path); | |
1226 | ||
1227 | /* | |
1228 | * Mutex was contended, block until it's | |
1229 | * released and try again | |
1230 | */ | |
1231 | mutex_lock(&head->mutex); | |
1232 | mutex_unlock(&head->mutex); | |
d278850e | 1233 | btrfs_put_delayed_ref_head(head); |
7a3ae2f8 JS |
1234 | goto again; |
1235 | } | |
d7df2c79 | 1236 | spin_unlock(&delayed_refs->lock); |
00142756 | 1237 | ret = add_delayed_refs(fs_info, head, time_seq, |
b25b0b87 | 1238 | &preftrees, sc); |
155725c9 | 1239 | mutex_unlock(&head->mutex); |
d7df2c79 | 1240 | if (ret) |
7a3ae2f8 | 1241 | goto out; |
d7df2c79 JB |
1242 | } else { |
1243 | spin_unlock(&delayed_refs->lock); | |
d3b01064 | 1244 | } |
8da6d581 | 1245 | } |
8da6d581 JS |
1246 | |
1247 | if (path->slots[0]) { | |
1248 | struct extent_buffer *leaf; | |
1249 | int slot; | |
1250 | ||
dadcaf78 | 1251 | path->slots[0]--; |
8da6d581 | 1252 | leaf = path->nodes[0]; |
dadcaf78 | 1253 | slot = path->slots[0]; |
8da6d581 JS |
1254 | btrfs_item_key_to_cpu(leaf, &key, slot); |
1255 | if (key.objectid == bytenr && | |
261c84b6 JB |
1256 | (key.type == BTRFS_EXTENT_ITEM_KEY || |
1257 | key.type == BTRFS_METADATA_ITEM_KEY)) { | |
00142756 | 1258 | ret = add_inline_refs(fs_info, path, bytenr, |
b25b0b87 | 1259 | &info_level, &preftrees, sc); |
8da6d581 JS |
1260 | if (ret) |
1261 | goto out; | |
e0c476b1 | 1262 | ret = add_keyed_refs(fs_info, path, bytenr, info_level, |
3ec4d323 | 1263 | &preftrees, sc); |
8da6d581 JS |
1264 | if (ret) |
1265 | goto out; | |
1266 | } | |
1267 | } | |
8da6d581 | 1268 | |
86d5f994 | 1269 | btrfs_release_path(path); |
8da6d581 | 1270 | |
38e3eebf | 1271 | ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0); |
d5c88b73 JS |
1272 | if (ret) |
1273 | goto out; | |
1274 | ||
ecf160b4 | 1275 | WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root)); |
8da6d581 | 1276 | |
86d5f994 | 1277 | ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees, |
b25b0b87 | 1278 | extent_item_pos, sc, ignore_offset); |
8da6d581 JS |
1279 | if (ret) |
1280 | goto out; | |
1281 | ||
ecf160b4 | 1282 | WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root)); |
8da6d581 | 1283 | |
86d5f994 EN |
1284 | /* |
1285 | * This walks the tree of merged and resolved refs. Tree blocks are | |
1286 | * read in as needed. Unique entries are added to the ulist, and | |
1287 | * the list of found roots is updated. | |
1288 | * | |
1289 | * We release the entire tree in one go before returning. | |
1290 | */ | |
ecf160b4 | 1291 | node = rb_first_cached(&preftrees.direct.root); |
86d5f994 EN |
1292 | while (node) { |
1293 | ref = rb_entry(node, struct prelim_ref, rbnode); | |
1294 | node = rb_next(&ref->rbnode); | |
c8195a7b ZB |
1295 | /* |
1296 | * ref->count < 0 can happen here if there are delayed | |
1297 | * refs with a node->action of BTRFS_DROP_DELAYED_REF. | |
1298 | * prelim_ref_insert() relies on this when merging | |
1299 | * identical refs to keep the overall count correct. | |
1300 | * prelim_ref_insert() will merge only those refs | |
1301 | * which compare identically. Any refs having | |
1302 | * e.g. different offsets would not be merged, | |
1303 | * and would retain their original ref->count < 0. | |
1304 | */ | |
98cfee21 | 1305 | if (roots && ref->count && ref->root_id && ref->parent == 0) { |
3ec4d323 EN |
1306 | if (sc && sc->root_objectid && |
1307 | ref->root_id != sc->root_objectid) { | |
dc046b10 JB |
1308 | ret = BACKREF_FOUND_SHARED; |
1309 | goto out; | |
1310 | } | |
1311 | ||
8da6d581 JS |
1312 | /* no parent == root of tree */ |
1313 | ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); | |
f1723939 WS |
1314 | if (ret < 0) |
1315 | goto out; | |
8da6d581 JS |
1316 | } |
1317 | if (ref->count && ref->parent) { | |
8a56457f JB |
1318 | if (extent_item_pos && !ref->inode_list && |
1319 | ref->level == 0) { | |
976b1908 | 1320 | struct extent_buffer *eb; |
707e8a07 | 1321 | |
581c1760 QW |
1322 | eb = read_tree_block(fs_info, ref->parent, 0, |
1323 | ref->level, NULL); | |
64c043de LB |
1324 | if (IS_ERR(eb)) { |
1325 | ret = PTR_ERR(eb); | |
1326 | goto out; | |
1327 | } else if (!extent_buffer_uptodate(eb)) { | |
416bc658 | 1328 | free_extent_buffer(eb); |
c16c2e2e WS |
1329 | ret = -EIO; |
1330 | goto out; | |
416bc658 | 1331 | } |
38e3eebf JB |
1332 | |
1333 | if (!path->skip_locking) { | |
1334 | btrfs_tree_read_lock(eb); | |
1335 | btrfs_set_lock_blocking_read(eb); | |
1336 | } | |
976b1908 | 1337 | ret = find_extent_in_eb(eb, bytenr, |
c995ab3c | 1338 | *extent_item_pos, &eie, ignore_offset); |
38e3eebf JB |
1339 | if (!path->skip_locking) |
1340 | btrfs_tree_read_unlock_blocking(eb); | |
976b1908 | 1341 | free_extent_buffer(eb); |
f5929cd8 FDBM |
1342 | if (ret < 0) |
1343 | goto out; | |
1344 | ref->inode_list = eie; | |
976b1908 | 1345 | } |
4eb1f66d TI |
1346 | ret = ulist_add_merge_ptr(refs, ref->parent, |
1347 | ref->inode_list, | |
1348 | (void **)&eie, GFP_NOFS); | |
f1723939 WS |
1349 | if (ret < 0) |
1350 | goto out; | |
3301958b JS |
1351 | if (!ret && extent_item_pos) { |
1352 | /* | |
1353 | * we've recorded that parent, so we must extend | |
1354 | * its inode list here | |
1355 | */ | |
1356 | BUG_ON(!eie); | |
1357 | while (eie->next) | |
1358 | eie = eie->next; | |
1359 | eie->next = ref->inode_list; | |
1360 | } | |
f05c4746 | 1361 | eie = NULL; |
8da6d581 | 1362 | } |
9dd14fd6 | 1363 | cond_resched(); |
8da6d581 JS |
1364 | } |
1365 | ||
1366 | out: | |
8da6d581 | 1367 | btrfs_free_path(path); |
86d5f994 EN |
1368 | |
1369 | prelim_release(&preftrees.direct); | |
1370 | prelim_release(&preftrees.indirect); | |
1371 | prelim_release(&preftrees.indirect_missing_keys); | |
1372 | ||
f05c4746 WS |
1373 | if (ret < 0) |
1374 | free_inode_elem_list(eie); | |
8da6d581 JS |
1375 | return ret; |
1376 | } | |
1377 | ||
976b1908 JS |
1378 | static void free_leaf_list(struct ulist *blocks) |
1379 | { | |
1380 | struct ulist_node *node = NULL; | |
1381 | struct extent_inode_elem *eie; | |
976b1908 JS |
1382 | struct ulist_iterator uiter; |
1383 | ||
1384 | ULIST_ITER_INIT(&uiter); | |
1385 | while ((node = ulist_next(blocks, &uiter))) { | |
1386 | if (!node->aux) | |
1387 | continue; | |
4dae077a | 1388 | eie = unode_aux_to_inode_list(node); |
f05c4746 | 1389 | free_inode_elem_list(eie); |
976b1908 JS |
1390 | node->aux = 0; |
1391 | } | |
1392 | ||
1393 | ulist_free(blocks); | |
1394 | } | |
1395 | ||
8da6d581 JS |
1396 | /* |
1397 | * Finds all leafs with a reference to the specified combination of bytenr and | |
1398 | * offset. key_list_head will point to a list of corresponding keys (caller must | |
1399 | * free each list element). The leafs will be stored in the leafs ulist, which | |
1400 | * must be freed with ulist_free. | |
1401 | * | |
1402 | * returns 0 on success, <0 on error | |
1403 | */ | |
19b546d7 QW |
1404 | int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, |
1405 | struct btrfs_fs_info *fs_info, u64 bytenr, | |
1406 | u64 time_seq, struct ulist **leafs, | |
1407 | const u64 *extent_item_pos, bool ignore_offset) | |
8da6d581 | 1408 | { |
8da6d581 JS |
1409 | int ret; |
1410 | ||
8da6d581 | 1411 | *leafs = ulist_alloc(GFP_NOFS); |
98cfee21 | 1412 | if (!*leafs) |
8da6d581 | 1413 | return -ENOMEM; |
8da6d581 | 1414 | |
afce772e | 1415 | ret = find_parent_nodes(trans, fs_info, bytenr, time_seq, |
c995ab3c | 1416 | *leafs, NULL, extent_item_pos, NULL, ignore_offset); |
8da6d581 | 1417 | if (ret < 0 && ret != -ENOENT) { |
976b1908 | 1418 | free_leaf_list(*leafs); |
8da6d581 JS |
1419 | return ret; |
1420 | } | |
1421 | ||
1422 | return 0; | |
1423 | } | |
1424 | ||
1425 | /* | |
1426 | * walk all backrefs for a given extent to find all roots that reference this | |
1427 | * extent. Walking a backref means finding all extents that reference this | |
1428 | * extent and in turn walk the backrefs of those, too. Naturally this is a | |
1429 | * recursive process, but here it is implemented in an iterative fashion: We | |
1430 | * find all referencing extents for the extent in question and put them on a | |
1431 | * list. In turn, we find all referencing extents for those, further appending | |
1432 | * to the list. The way we iterate the list allows adding more elements after | |
1433 | * the current while iterating. The process stops when we reach the end of the | |
1434 | * list. Found roots are added to the roots list. | |
1435 | * | |
1436 | * returns 0 on success, < 0 on error. | |
1437 | */ | |
e0c476b1 JM |
1438 | static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans, |
1439 | struct btrfs_fs_info *fs_info, u64 bytenr, | |
c995ab3c ZB |
1440 | u64 time_seq, struct ulist **roots, |
1441 | bool ignore_offset) | |
8da6d581 JS |
1442 | { |
1443 | struct ulist *tmp; | |
1444 | struct ulist_node *node = NULL; | |
cd1b413c | 1445 | struct ulist_iterator uiter; |
8da6d581 JS |
1446 | int ret; |
1447 | ||
1448 | tmp = ulist_alloc(GFP_NOFS); | |
1449 | if (!tmp) | |
1450 | return -ENOMEM; | |
1451 | *roots = ulist_alloc(GFP_NOFS); | |
1452 | if (!*roots) { | |
1453 | ulist_free(tmp); | |
1454 | return -ENOMEM; | |
1455 | } | |
1456 | ||
cd1b413c | 1457 | ULIST_ITER_INIT(&uiter); |
8da6d581 | 1458 | while (1) { |
afce772e | 1459 | ret = find_parent_nodes(trans, fs_info, bytenr, time_seq, |
c995ab3c | 1460 | tmp, *roots, NULL, NULL, ignore_offset); |
8da6d581 JS |
1461 | if (ret < 0 && ret != -ENOENT) { |
1462 | ulist_free(tmp); | |
1463 | ulist_free(*roots); | |
580c079b | 1464 | *roots = NULL; |
8da6d581 JS |
1465 | return ret; |
1466 | } | |
cd1b413c | 1467 | node = ulist_next(tmp, &uiter); |
8da6d581 JS |
1468 | if (!node) |
1469 | break; | |
1470 | bytenr = node->val; | |
bca1a290 | 1471 | cond_resched(); |
8da6d581 JS |
1472 | } |
1473 | ||
1474 | ulist_free(tmp); | |
1475 | return 0; | |
1476 | } | |
1477 | ||
9e351cc8 JB |
1478 | int btrfs_find_all_roots(struct btrfs_trans_handle *trans, |
1479 | struct btrfs_fs_info *fs_info, u64 bytenr, | |
c995ab3c ZB |
1480 | u64 time_seq, struct ulist **roots, |
1481 | bool ignore_offset) | |
9e351cc8 JB |
1482 | { |
1483 | int ret; | |
1484 | ||
1485 | if (!trans) | |
1486 | down_read(&fs_info->commit_root_sem); | |
e0c476b1 | 1487 | ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr, |
c995ab3c | 1488 | time_seq, roots, ignore_offset); |
9e351cc8 JB |
1489 | if (!trans) |
1490 | up_read(&fs_info->commit_root_sem); | |
1491 | return ret; | |
1492 | } | |
1493 | ||
2c2ed5aa MF |
1494 | /** |
1495 | * btrfs_check_shared - tell us whether an extent is shared | |
1496 | * | |
2c2ed5aa MF |
1497 | * btrfs_check_shared uses the backref walking code but will short |
1498 | * circuit as soon as it finds a root or inode that doesn't match the | |
1499 | * one passed in. This provides a significant performance benefit for | |
1500 | * callers (such as fiemap) which want to know whether the extent is | |
1501 | * shared but do not need a ref count. | |
1502 | * | |
03628cdb FM |
1503 | * This attempts to attach to the running transaction in order to account for |
1504 | * delayed refs, but continues on even when no running transaction exists. | |
bb739cf0 | 1505 | * |
2c2ed5aa MF |
1506 | * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error. |
1507 | */ | |
5911c8fe DS |
1508 | int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr, |
1509 | struct ulist *roots, struct ulist *tmp) | |
dc046b10 | 1510 | { |
bb739cf0 EN |
1511 | struct btrfs_fs_info *fs_info = root->fs_info; |
1512 | struct btrfs_trans_handle *trans; | |
dc046b10 JB |
1513 | struct ulist_iterator uiter; |
1514 | struct ulist_node *node; | |
3284da7b | 1515 | struct seq_list elem = SEQ_LIST_INIT(elem); |
dc046b10 | 1516 | int ret = 0; |
3ec4d323 | 1517 | struct share_check shared = { |
4fd786e6 | 1518 | .root_objectid = root->root_key.objectid, |
3ec4d323 EN |
1519 | .inum = inum, |
1520 | .share_count = 0, | |
1521 | }; | |
dc046b10 | 1522 | |
5911c8fe DS |
1523 | ulist_init(roots); |
1524 | ulist_init(tmp); | |
dc046b10 | 1525 | |
a6d155d2 | 1526 | trans = btrfs_join_transaction_nostart(root); |
bb739cf0 | 1527 | if (IS_ERR(trans)) { |
03628cdb FM |
1528 | if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { |
1529 | ret = PTR_ERR(trans); | |
1530 | goto out; | |
1531 | } | |
bb739cf0 | 1532 | trans = NULL; |
dc046b10 | 1533 | down_read(&fs_info->commit_root_sem); |
bb739cf0 EN |
1534 | } else { |
1535 | btrfs_get_tree_mod_seq(fs_info, &elem); | |
1536 | } | |
1537 | ||
dc046b10 JB |
1538 | ULIST_ITER_INIT(&uiter); |
1539 | while (1) { | |
1540 | ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp, | |
c995ab3c | 1541 | roots, NULL, &shared, false); |
dc046b10 | 1542 | if (ret == BACKREF_FOUND_SHARED) { |
2c2ed5aa | 1543 | /* this is the only condition under which we return 1 */ |
dc046b10 JB |
1544 | ret = 1; |
1545 | break; | |
1546 | } | |
1547 | if (ret < 0 && ret != -ENOENT) | |
1548 | break; | |
2c2ed5aa | 1549 | ret = 0; |
dc046b10 JB |
1550 | node = ulist_next(tmp, &uiter); |
1551 | if (!node) | |
1552 | break; | |
1553 | bytenr = node->val; | |
18bf591b | 1554 | shared.share_count = 0; |
dc046b10 JB |
1555 | cond_resched(); |
1556 | } | |
bb739cf0 EN |
1557 | |
1558 | if (trans) { | |
dc046b10 | 1559 | btrfs_put_tree_mod_seq(fs_info, &elem); |
bb739cf0 EN |
1560 | btrfs_end_transaction(trans); |
1561 | } else { | |
dc046b10 | 1562 | up_read(&fs_info->commit_root_sem); |
bb739cf0 | 1563 | } |
03628cdb | 1564 | out: |
5911c8fe DS |
1565 | ulist_release(roots); |
1566 | ulist_release(tmp); | |
dc046b10 JB |
1567 | return ret; |
1568 | } | |
1569 | ||
f186373f MF |
1570 | int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, |
1571 | u64 start_off, struct btrfs_path *path, | |
1572 | struct btrfs_inode_extref **ret_extref, | |
1573 | u64 *found_off) | |
1574 | { | |
1575 | int ret, slot; | |
1576 | struct btrfs_key key; | |
1577 | struct btrfs_key found_key; | |
1578 | struct btrfs_inode_extref *extref; | |
73980bec | 1579 | const struct extent_buffer *leaf; |
f186373f MF |
1580 | unsigned long ptr; |
1581 | ||
1582 | key.objectid = inode_objectid; | |
962a298f | 1583 | key.type = BTRFS_INODE_EXTREF_KEY; |
f186373f MF |
1584 | key.offset = start_off; |
1585 | ||
1586 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
1587 | if (ret < 0) | |
1588 | return ret; | |
1589 | ||
1590 | while (1) { | |
1591 | leaf = path->nodes[0]; | |
1592 | slot = path->slots[0]; | |
1593 | if (slot >= btrfs_header_nritems(leaf)) { | |
1594 | /* | |
1595 | * If the item at offset is not found, | |
1596 | * btrfs_search_slot will point us to the slot | |
1597 | * where it should be inserted. In our case | |
1598 | * that will be the slot directly before the | |
1599 | * next INODE_REF_KEY_V2 item. In the case | |
1600 | * that we're pointing to the last slot in a | |
1601 | * leaf, we must move one leaf over. | |
1602 | */ | |
1603 | ret = btrfs_next_leaf(root, path); | |
1604 | if (ret) { | |
1605 | if (ret >= 1) | |
1606 | ret = -ENOENT; | |
1607 | break; | |
1608 | } | |
1609 | continue; | |
1610 | } | |
1611 | ||
1612 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
1613 | ||
1614 | /* | |
1615 | * Check that we're still looking at an extended ref key for | |
1616 | * this particular objectid. If we have different | |
1617 | * objectid or type then there are no more to be found | |
1618 | * in the tree and we can exit. | |
1619 | */ | |
1620 | ret = -ENOENT; | |
1621 | if (found_key.objectid != inode_objectid) | |
1622 | break; | |
962a298f | 1623 | if (found_key.type != BTRFS_INODE_EXTREF_KEY) |
f186373f MF |
1624 | break; |
1625 | ||
1626 | ret = 0; | |
1627 | ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); | |
1628 | extref = (struct btrfs_inode_extref *)ptr; | |
1629 | *ret_extref = extref; | |
1630 | if (found_off) | |
1631 | *found_off = found_key.offset; | |
1632 | break; | |
1633 | } | |
1634 | ||
1635 | return ret; | |
1636 | } | |
1637 | ||
48a3b636 ES |
1638 | /* |
1639 | * this iterates to turn a name (from iref/extref) into a full filesystem path. | |
1640 | * Elements of the path are separated by '/' and the path is guaranteed to be | |
1641 | * 0-terminated. the path is only given within the current file system. | |
1642 | * Therefore, it never starts with a '/'. the caller is responsible to provide | |
1643 | * "size" bytes in "dest". the dest buffer will be filled backwards. finally, | |
1644 | * the start point of the resulting string is returned. this pointer is within | |
1645 | * dest, normally. | |
1646 | * in case the path buffer would overflow, the pointer is decremented further | |
1647 | * as if output was written to the buffer, though no more output is actually | |
1648 | * generated. that way, the caller can determine how much space would be | |
1649 | * required for the path to fit into the buffer. in that case, the returned | |
1650 | * value will be smaller than dest. callers must check this! | |
1651 | */ | |
96b5bd77 JS |
1652 | char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, |
1653 | u32 name_len, unsigned long name_off, | |
1654 | struct extent_buffer *eb_in, u64 parent, | |
1655 | char *dest, u32 size) | |
a542ad1b | 1656 | { |
a542ad1b JS |
1657 | int slot; |
1658 | u64 next_inum; | |
1659 | int ret; | |
661bec6b | 1660 | s64 bytes_left = ((s64)size) - 1; |
a542ad1b JS |
1661 | struct extent_buffer *eb = eb_in; |
1662 | struct btrfs_key found_key; | |
b916a59a | 1663 | int leave_spinning = path->leave_spinning; |
d24bec3a | 1664 | struct btrfs_inode_ref *iref; |
a542ad1b JS |
1665 | |
1666 | if (bytes_left >= 0) | |
1667 | dest[bytes_left] = '\0'; | |
1668 | ||
b916a59a | 1669 | path->leave_spinning = 1; |
a542ad1b | 1670 | while (1) { |
d24bec3a | 1671 | bytes_left -= name_len; |
a542ad1b JS |
1672 | if (bytes_left >= 0) |
1673 | read_extent_buffer(eb, dest + bytes_left, | |
d24bec3a | 1674 | name_off, name_len); |
b916a59a | 1675 | if (eb != eb_in) { |
0c0fe3b0 FM |
1676 | if (!path->skip_locking) |
1677 | btrfs_tree_read_unlock_blocking(eb); | |
a542ad1b | 1678 | free_extent_buffer(eb); |
b916a59a | 1679 | } |
c234a24d DS |
1680 | ret = btrfs_find_item(fs_root, path, parent, 0, |
1681 | BTRFS_INODE_REF_KEY, &found_key); | |
8f24b496 JS |
1682 | if (ret > 0) |
1683 | ret = -ENOENT; | |
a542ad1b JS |
1684 | if (ret) |
1685 | break; | |
d24bec3a | 1686 | |
a542ad1b JS |
1687 | next_inum = found_key.offset; |
1688 | ||
1689 | /* regular exit ahead */ | |
1690 | if (parent == next_inum) | |
1691 | break; | |
1692 | ||
1693 | slot = path->slots[0]; | |
1694 | eb = path->nodes[0]; | |
1695 | /* make sure we can use eb after releasing the path */ | |
b916a59a | 1696 | if (eb != eb_in) { |
0c0fe3b0 | 1697 | if (!path->skip_locking) |
300aa896 | 1698 | btrfs_set_lock_blocking_read(eb); |
0c0fe3b0 FM |
1699 | path->nodes[0] = NULL; |
1700 | path->locks[0] = 0; | |
b916a59a | 1701 | } |
a542ad1b | 1702 | btrfs_release_path(path); |
a542ad1b | 1703 | iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); |
d24bec3a MF |
1704 | |
1705 | name_len = btrfs_inode_ref_name_len(eb, iref); | |
1706 | name_off = (unsigned long)(iref + 1); | |
1707 | ||
a542ad1b JS |
1708 | parent = next_inum; |
1709 | --bytes_left; | |
1710 | if (bytes_left >= 0) | |
1711 | dest[bytes_left] = '/'; | |
1712 | } | |
1713 | ||
1714 | btrfs_release_path(path); | |
b916a59a | 1715 | path->leave_spinning = leave_spinning; |
a542ad1b JS |
1716 | |
1717 | if (ret) | |
1718 | return ERR_PTR(ret); | |
1719 | ||
1720 | return dest + bytes_left; | |
1721 | } | |
1722 | ||
1723 | /* | |
1724 | * this makes the path point to (logical EXTENT_ITEM *) | |
1725 | * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for | |
1726 | * tree blocks and <0 on error. | |
1727 | */ | |
1728 | int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, | |
69917e43 LB |
1729 | struct btrfs_path *path, struct btrfs_key *found_key, |
1730 | u64 *flags_ret) | |
a542ad1b JS |
1731 | { |
1732 | int ret; | |
1733 | u64 flags; | |
261c84b6 | 1734 | u64 size = 0; |
a542ad1b | 1735 | u32 item_size; |
73980bec | 1736 | const struct extent_buffer *eb; |
a542ad1b JS |
1737 | struct btrfs_extent_item *ei; |
1738 | struct btrfs_key key; | |
1739 | ||
261c84b6 JB |
1740 | if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) |
1741 | key.type = BTRFS_METADATA_ITEM_KEY; | |
1742 | else | |
1743 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
a542ad1b JS |
1744 | key.objectid = logical; |
1745 | key.offset = (u64)-1; | |
1746 | ||
1747 | ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); | |
1748 | if (ret < 0) | |
1749 | return ret; | |
a542ad1b | 1750 | |
850a8cdf WS |
1751 | ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0); |
1752 | if (ret) { | |
1753 | if (ret > 0) | |
1754 | ret = -ENOENT; | |
1755 | return ret; | |
580f0a67 | 1756 | } |
850a8cdf | 1757 | btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); |
261c84b6 | 1758 | if (found_key->type == BTRFS_METADATA_ITEM_KEY) |
da17066c | 1759 | size = fs_info->nodesize; |
261c84b6 JB |
1760 | else if (found_key->type == BTRFS_EXTENT_ITEM_KEY) |
1761 | size = found_key->offset; | |
1762 | ||
580f0a67 | 1763 | if (found_key->objectid > logical || |
261c84b6 | 1764 | found_key->objectid + size <= logical) { |
ab8d0fc4 JM |
1765 | btrfs_debug(fs_info, |
1766 | "logical %llu is not within any extent", logical); | |
a542ad1b | 1767 | return -ENOENT; |
4692cf58 | 1768 | } |
a542ad1b JS |
1769 | |
1770 | eb = path->nodes[0]; | |
1771 | item_size = btrfs_item_size_nr(eb, path->slots[0]); | |
1772 | BUG_ON(item_size < sizeof(*ei)); | |
1773 | ||
1774 | ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); | |
1775 | flags = btrfs_extent_flags(eb, ei); | |
1776 | ||
ab8d0fc4 JM |
1777 | btrfs_debug(fs_info, |
1778 | "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u", | |
c1c9ff7c GU |
1779 | logical, logical - found_key->objectid, found_key->objectid, |
1780 | found_key->offset, flags, item_size); | |
69917e43 LB |
1781 | |
1782 | WARN_ON(!flags_ret); | |
1783 | if (flags_ret) { | |
1784 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) | |
1785 | *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK; | |
1786 | else if (flags & BTRFS_EXTENT_FLAG_DATA) | |
1787 | *flags_ret = BTRFS_EXTENT_FLAG_DATA; | |
1788 | else | |
290342f6 | 1789 | BUG(); |
69917e43 LB |
1790 | return 0; |
1791 | } | |
a542ad1b JS |
1792 | |
1793 | return -EIO; | |
1794 | } | |
1795 | ||
1796 | /* | |
1797 | * helper function to iterate extent inline refs. ptr must point to a 0 value | |
1798 | * for the first call and may be modified. it is used to track state. | |
1799 | * if more refs exist, 0 is returned and the next call to | |
e0c476b1 | 1800 | * get_extent_inline_ref must pass the modified ptr parameter to get the |
a542ad1b JS |
1801 | * next ref. after the last ref was processed, 1 is returned. |
1802 | * returns <0 on error | |
1803 | */ | |
e0c476b1 JM |
1804 | static int get_extent_inline_ref(unsigned long *ptr, |
1805 | const struct extent_buffer *eb, | |
1806 | const struct btrfs_key *key, | |
1807 | const struct btrfs_extent_item *ei, | |
1808 | u32 item_size, | |
1809 | struct btrfs_extent_inline_ref **out_eiref, | |
1810 | int *out_type) | |
a542ad1b JS |
1811 | { |
1812 | unsigned long end; | |
1813 | u64 flags; | |
1814 | struct btrfs_tree_block_info *info; | |
1815 | ||
1816 | if (!*ptr) { | |
1817 | /* first call */ | |
1818 | flags = btrfs_extent_flags(eb, ei); | |
1819 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | |
6eda71d0 LB |
1820 | if (key->type == BTRFS_METADATA_ITEM_KEY) { |
1821 | /* a skinny metadata extent */ | |
1822 | *out_eiref = | |
1823 | (struct btrfs_extent_inline_ref *)(ei + 1); | |
1824 | } else { | |
1825 | WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY); | |
1826 | info = (struct btrfs_tree_block_info *)(ei + 1); | |
1827 | *out_eiref = | |
1828 | (struct btrfs_extent_inline_ref *)(info + 1); | |
1829 | } | |
a542ad1b JS |
1830 | } else { |
1831 | *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); | |
1832 | } | |
1833 | *ptr = (unsigned long)*out_eiref; | |
cd857dd6 | 1834 | if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size) |
a542ad1b JS |
1835 | return -ENOENT; |
1836 | } | |
1837 | ||
1838 | end = (unsigned long)ei + item_size; | |
6eda71d0 | 1839 | *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr); |
3de28d57 LB |
1840 | *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref, |
1841 | BTRFS_REF_TYPE_ANY); | |
1842 | if (*out_type == BTRFS_REF_TYPE_INVALID) | |
af431dcb | 1843 | return -EUCLEAN; |
a542ad1b JS |
1844 | |
1845 | *ptr += btrfs_extent_inline_ref_size(*out_type); | |
1846 | WARN_ON(*ptr > end); | |
1847 | if (*ptr == end) | |
1848 | return 1; /* last */ | |
1849 | ||
1850 | return 0; | |
1851 | } | |
1852 | ||
1853 | /* | |
1854 | * reads the tree block backref for an extent. tree level and root are returned | |
1855 | * through out_level and out_root. ptr must point to a 0 value for the first | |
e0c476b1 | 1856 | * call and may be modified (see get_extent_inline_ref comment). |
a542ad1b JS |
1857 | * returns 0 if data was provided, 1 if there was no more data to provide or |
1858 | * <0 on error. | |
1859 | */ | |
1860 | int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, | |
6eda71d0 LB |
1861 | struct btrfs_key *key, struct btrfs_extent_item *ei, |
1862 | u32 item_size, u64 *out_root, u8 *out_level) | |
a542ad1b JS |
1863 | { |
1864 | int ret; | |
1865 | int type; | |
a542ad1b JS |
1866 | struct btrfs_extent_inline_ref *eiref; |
1867 | ||
1868 | if (*ptr == (unsigned long)-1) | |
1869 | return 1; | |
1870 | ||
1871 | while (1) { | |
e0c476b1 | 1872 | ret = get_extent_inline_ref(ptr, eb, key, ei, item_size, |
6eda71d0 | 1873 | &eiref, &type); |
a542ad1b JS |
1874 | if (ret < 0) |
1875 | return ret; | |
1876 | ||
1877 | if (type == BTRFS_TREE_BLOCK_REF_KEY || | |
1878 | type == BTRFS_SHARED_BLOCK_REF_KEY) | |
1879 | break; | |
1880 | ||
1881 | if (ret == 1) | |
1882 | return 1; | |
1883 | } | |
1884 | ||
1885 | /* we can treat both ref types equally here */ | |
a542ad1b | 1886 | *out_root = btrfs_extent_inline_ref_offset(eb, eiref); |
a1317f45 FM |
1887 | |
1888 | if (key->type == BTRFS_EXTENT_ITEM_KEY) { | |
1889 | struct btrfs_tree_block_info *info; | |
1890 | ||
1891 | info = (struct btrfs_tree_block_info *)(ei + 1); | |
1892 | *out_level = btrfs_tree_block_level(eb, info); | |
1893 | } else { | |
1894 | ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); | |
1895 | *out_level = (u8)key->offset; | |
1896 | } | |
a542ad1b JS |
1897 | |
1898 | if (ret == 1) | |
1899 | *ptr = (unsigned long)-1; | |
1900 | ||
1901 | return 0; | |
1902 | } | |
1903 | ||
ab8d0fc4 JM |
1904 | static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, |
1905 | struct extent_inode_elem *inode_list, | |
1906 | u64 root, u64 extent_item_objectid, | |
1907 | iterate_extent_inodes_t *iterate, void *ctx) | |
a542ad1b | 1908 | { |
976b1908 | 1909 | struct extent_inode_elem *eie; |
4692cf58 | 1910 | int ret = 0; |
4692cf58 | 1911 | |
976b1908 | 1912 | for (eie = inode_list; eie; eie = eie->next) { |
ab8d0fc4 JM |
1913 | btrfs_debug(fs_info, |
1914 | "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu", | |
1915 | extent_item_objectid, eie->inum, | |
1916 | eie->offset, root); | |
976b1908 | 1917 | ret = iterate(eie->inum, eie->offset, root, ctx); |
4692cf58 | 1918 | if (ret) { |
ab8d0fc4 JM |
1919 | btrfs_debug(fs_info, |
1920 | "stopping iteration for %llu due to ret=%d", | |
1921 | extent_item_objectid, ret); | |
4692cf58 JS |
1922 | break; |
1923 | } | |
a542ad1b JS |
1924 | } |
1925 | ||
a542ad1b JS |
1926 | return ret; |
1927 | } | |
1928 | ||
1929 | /* | |
1930 | * calls iterate() for every inode that references the extent identified by | |
4692cf58 | 1931 | * the given parameters. |
a542ad1b JS |
1932 | * when the iterator function returns a non-zero value, iteration stops. |
1933 | */ | |
1934 | int iterate_extent_inodes(struct btrfs_fs_info *fs_info, | |
4692cf58 | 1935 | u64 extent_item_objectid, u64 extent_item_pos, |
7a3ae2f8 | 1936 | int search_commit_root, |
c995ab3c ZB |
1937 | iterate_extent_inodes_t *iterate, void *ctx, |
1938 | bool ignore_offset) | |
a542ad1b | 1939 | { |
a542ad1b | 1940 | int ret; |
da61d31a | 1941 | struct btrfs_trans_handle *trans = NULL; |
7a3ae2f8 JS |
1942 | struct ulist *refs = NULL; |
1943 | struct ulist *roots = NULL; | |
4692cf58 JS |
1944 | struct ulist_node *ref_node = NULL; |
1945 | struct ulist_node *root_node = NULL; | |
3284da7b | 1946 | struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem); |
cd1b413c JS |
1947 | struct ulist_iterator ref_uiter; |
1948 | struct ulist_iterator root_uiter; | |
a542ad1b | 1949 | |
ab8d0fc4 | 1950 | btrfs_debug(fs_info, "resolving all inodes for extent %llu", |
4692cf58 | 1951 | extent_item_objectid); |
a542ad1b | 1952 | |
da61d31a | 1953 | if (!search_commit_root) { |
bfc61c36 FM |
1954 | trans = btrfs_attach_transaction(fs_info->extent_root); |
1955 | if (IS_ERR(trans)) { | |
1956 | if (PTR_ERR(trans) != -ENOENT && | |
1957 | PTR_ERR(trans) != -EROFS) | |
1958 | return PTR_ERR(trans); | |
1959 | trans = NULL; | |
1960 | } | |
1961 | } | |
1962 | ||
1963 | if (trans) | |
8445f61c | 1964 | btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); |
bfc61c36 | 1965 | else |
9e351cc8 | 1966 | down_read(&fs_info->commit_root_sem); |
a542ad1b | 1967 | |
4692cf58 | 1968 | ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, |
097b8a7c | 1969 | tree_mod_seq_elem.seq, &refs, |
c995ab3c | 1970 | &extent_item_pos, ignore_offset); |
4692cf58 JS |
1971 | if (ret) |
1972 | goto out; | |
a542ad1b | 1973 | |
cd1b413c JS |
1974 | ULIST_ITER_INIT(&ref_uiter); |
1975 | while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { | |
e0c476b1 | 1976 | ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val, |
c995ab3c ZB |
1977 | tree_mod_seq_elem.seq, &roots, |
1978 | ignore_offset); | |
4692cf58 JS |
1979 | if (ret) |
1980 | break; | |
cd1b413c JS |
1981 | ULIST_ITER_INIT(&root_uiter); |
1982 | while (!ret && (root_node = ulist_next(roots, &root_uiter))) { | |
ab8d0fc4 JM |
1983 | btrfs_debug(fs_info, |
1984 | "root %llu references leaf %llu, data list %#llx", | |
1985 | root_node->val, ref_node->val, | |
1986 | ref_node->aux); | |
1987 | ret = iterate_leaf_refs(fs_info, | |
1988 | (struct extent_inode_elem *) | |
995e01b7 JS |
1989 | (uintptr_t)ref_node->aux, |
1990 | root_node->val, | |
1991 | extent_item_objectid, | |
1992 | iterate, ctx); | |
4692cf58 | 1993 | } |
976b1908 | 1994 | ulist_free(roots); |
a542ad1b JS |
1995 | } |
1996 | ||
976b1908 | 1997 | free_leaf_list(refs); |
4692cf58 | 1998 | out: |
bfc61c36 | 1999 | if (trans) { |
8445f61c | 2000 | btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); |
3a45bb20 | 2001 | btrfs_end_transaction(trans); |
9e351cc8 JB |
2002 | } else { |
2003 | up_read(&fs_info->commit_root_sem); | |
7a3ae2f8 JS |
2004 | } |
2005 | ||
a542ad1b JS |
2006 | return ret; |
2007 | } | |
2008 | ||
2009 | int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, | |
2010 | struct btrfs_path *path, | |
c995ab3c ZB |
2011 | iterate_extent_inodes_t *iterate, void *ctx, |
2012 | bool ignore_offset) | |
a542ad1b JS |
2013 | { |
2014 | int ret; | |
4692cf58 | 2015 | u64 extent_item_pos; |
69917e43 | 2016 | u64 flags = 0; |
a542ad1b | 2017 | struct btrfs_key found_key; |
7a3ae2f8 | 2018 | int search_commit_root = path->search_commit_root; |
a542ad1b | 2019 | |
69917e43 | 2020 | ret = extent_from_logical(fs_info, logical, path, &found_key, &flags); |
4692cf58 | 2021 | btrfs_release_path(path); |
a542ad1b JS |
2022 | if (ret < 0) |
2023 | return ret; | |
69917e43 | 2024 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) |
3627bf45 | 2025 | return -EINVAL; |
a542ad1b | 2026 | |
4692cf58 | 2027 | extent_item_pos = logical - found_key.objectid; |
7a3ae2f8 JS |
2028 | ret = iterate_extent_inodes(fs_info, found_key.objectid, |
2029 | extent_item_pos, search_commit_root, | |
c995ab3c | 2030 | iterate, ctx, ignore_offset); |
a542ad1b JS |
2031 | |
2032 | return ret; | |
2033 | } | |
2034 | ||
d24bec3a MF |
2035 | typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off, |
2036 | struct extent_buffer *eb, void *ctx); | |
2037 | ||
2038 | static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root, | |
2039 | struct btrfs_path *path, | |
2040 | iterate_irefs_t *iterate, void *ctx) | |
a542ad1b | 2041 | { |
aefc1eb1 | 2042 | int ret = 0; |
a542ad1b JS |
2043 | int slot; |
2044 | u32 cur; | |
2045 | u32 len; | |
2046 | u32 name_len; | |
2047 | u64 parent = 0; | |
2048 | int found = 0; | |
2049 | struct extent_buffer *eb; | |
2050 | struct btrfs_item *item; | |
2051 | struct btrfs_inode_ref *iref; | |
2052 | struct btrfs_key found_key; | |
2053 | ||
aefc1eb1 | 2054 | while (!ret) { |
c234a24d DS |
2055 | ret = btrfs_find_item(fs_root, path, inum, |
2056 | parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY, | |
2057 | &found_key); | |
2058 | ||
a542ad1b JS |
2059 | if (ret < 0) |
2060 | break; | |
2061 | if (ret) { | |
2062 | ret = found ? 0 : -ENOENT; | |
2063 | break; | |
2064 | } | |
2065 | ++found; | |
2066 | ||
2067 | parent = found_key.offset; | |
2068 | slot = path->slots[0]; | |
3fe81ce2 FDBM |
2069 | eb = btrfs_clone_extent_buffer(path->nodes[0]); |
2070 | if (!eb) { | |
2071 | ret = -ENOMEM; | |
2072 | break; | |
2073 | } | |
a542ad1b JS |
2074 | btrfs_release_path(path); |
2075 | ||
dd3cc16b | 2076 | item = btrfs_item_nr(slot); |
a542ad1b JS |
2077 | iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); |
2078 | ||
2079 | for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { | |
2080 | name_len = btrfs_inode_ref_name_len(eb, iref); | |
2081 | /* path must be released before calling iterate()! */ | |
ab8d0fc4 JM |
2082 | btrfs_debug(fs_root->fs_info, |
2083 | "following ref at offset %u for inode %llu in tree %llu", | |
4fd786e6 MT |
2084 | cur, found_key.objectid, |
2085 | fs_root->root_key.objectid); | |
d24bec3a MF |
2086 | ret = iterate(parent, name_len, |
2087 | (unsigned long)(iref + 1), eb, ctx); | |
aefc1eb1 | 2088 | if (ret) |
a542ad1b | 2089 | break; |
a542ad1b JS |
2090 | len = sizeof(*iref) + name_len; |
2091 | iref = (struct btrfs_inode_ref *)((char *)iref + len); | |
2092 | } | |
2093 | free_extent_buffer(eb); | |
2094 | } | |
2095 | ||
2096 | btrfs_release_path(path); | |
2097 | ||
2098 | return ret; | |
2099 | } | |
2100 | ||
d24bec3a MF |
2101 | static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, |
2102 | struct btrfs_path *path, | |
2103 | iterate_irefs_t *iterate, void *ctx) | |
2104 | { | |
2105 | int ret; | |
2106 | int slot; | |
2107 | u64 offset = 0; | |
2108 | u64 parent; | |
2109 | int found = 0; | |
2110 | struct extent_buffer *eb; | |
2111 | struct btrfs_inode_extref *extref; | |
d24bec3a MF |
2112 | u32 item_size; |
2113 | u32 cur_offset; | |
2114 | unsigned long ptr; | |
2115 | ||
2116 | while (1) { | |
2117 | ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, | |
2118 | &offset); | |
2119 | if (ret < 0) | |
2120 | break; | |
2121 | if (ret) { | |
2122 | ret = found ? 0 : -ENOENT; | |
2123 | break; | |
2124 | } | |
2125 | ++found; | |
2126 | ||
2127 | slot = path->slots[0]; | |
3fe81ce2 FDBM |
2128 | eb = btrfs_clone_extent_buffer(path->nodes[0]); |
2129 | if (!eb) { | |
2130 | ret = -ENOMEM; | |
2131 | break; | |
2132 | } | |
d24bec3a MF |
2133 | btrfs_release_path(path); |
2134 | ||
2849a854 CM |
2135 | item_size = btrfs_item_size_nr(eb, slot); |
2136 | ptr = btrfs_item_ptr_offset(eb, slot); | |
d24bec3a MF |
2137 | cur_offset = 0; |
2138 | ||
2139 | while (cur_offset < item_size) { | |
2140 | u32 name_len; | |
2141 | ||
2142 | extref = (struct btrfs_inode_extref *)(ptr + cur_offset); | |
2143 | parent = btrfs_inode_extref_parent(eb, extref); | |
2144 | name_len = btrfs_inode_extref_name_len(eb, extref); | |
2145 | ret = iterate(parent, name_len, | |
2146 | (unsigned long)&extref->name, eb, ctx); | |
2147 | if (ret) | |
2148 | break; | |
2149 | ||
2849a854 | 2150 | cur_offset += btrfs_inode_extref_name_len(eb, extref); |
d24bec3a MF |
2151 | cur_offset += sizeof(*extref); |
2152 | } | |
d24bec3a MF |
2153 | free_extent_buffer(eb); |
2154 | ||
2155 | offset++; | |
2156 | } | |
2157 | ||
2158 | btrfs_release_path(path); | |
2159 | ||
2160 | return ret; | |
2161 | } | |
2162 | ||
2163 | static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, | |
2164 | struct btrfs_path *path, iterate_irefs_t *iterate, | |
2165 | void *ctx) | |
2166 | { | |
2167 | int ret; | |
2168 | int found_refs = 0; | |
2169 | ||
2170 | ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx); | |
2171 | if (!ret) | |
2172 | ++found_refs; | |
2173 | else if (ret != -ENOENT) | |
2174 | return ret; | |
2175 | ||
2176 | ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx); | |
2177 | if (ret == -ENOENT && found_refs) | |
2178 | return 0; | |
2179 | ||
2180 | return ret; | |
2181 | } | |
2182 | ||
a542ad1b JS |
2183 | /* |
2184 | * returns 0 if the path could be dumped (probably truncated) | |
2185 | * returns <0 in case of an error | |
2186 | */ | |
d24bec3a MF |
2187 | static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, |
2188 | struct extent_buffer *eb, void *ctx) | |
a542ad1b JS |
2189 | { |
2190 | struct inode_fs_paths *ipath = ctx; | |
2191 | char *fspath; | |
2192 | char *fspath_min; | |
2193 | int i = ipath->fspath->elem_cnt; | |
2194 | const int s_ptr = sizeof(char *); | |
2195 | u32 bytes_left; | |
2196 | ||
2197 | bytes_left = ipath->fspath->bytes_left > s_ptr ? | |
2198 | ipath->fspath->bytes_left - s_ptr : 0; | |
2199 | ||
740c3d22 | 2200 | fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; |
96b5bd77 JS |
2201 | fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, |
2202 | name_off, eb, inum, fspath_min, bytes_left); | |
a542ad1b JS |
2203 | if (IS_ERR(fspath)) |
2204 | return PTR_ERR(fspath); | |
2205 | ||
2206 | if (fspath > fspath_min) { | |
745c4d8e | 2207 | ipath->fspath->val[i] = (u64)(unsigned long)fspath; |
a542ad1b JS |
2208 | ++ipath->fspath->elem_cnt; |
2209 | ipath->fspath->bytes_left = fspath - fspath_min; | |
2210 | } else { | |
2211 | ++ipath->fspath->elem_missed; | |
2212 | ipath->fspath->bytes_missing += fspath_min - fspath; | |
2213 | ipath->fspath->bytes_left = 0; | |
2214 | } | |
2215 | ||
2216 | return 0; | |
2217 | } | |
2218 | ||
2219 | /* | |
2220 | * this dumps all file system paths to the inode into the ipath struct, provided | |
2221 | * is has been created large enough. each path is zero-terminated and accessed | |
740c3d22 | 2222 | * from ipath->fspath->val[i]. |
a542ad1b | 2223 | * when it returns, there are ipath->fspath->elem_cnt number of paths available |
740c3d22 | 2224 | * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the |
01327610 | 2225 | * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise, |
a542ad1b JS |
2226 | * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would |
2227 | * have been needed to return all paths. | |
2228 | */ | |
2229 | int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) | |
2230 | { | |
2231 | return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path, | |
d24bec3a | 2232 | inode_to_path, ipath); |
a542ad1b JS |
2233 | } |
2234 | ||
a542ad1b JS |
2235 | struct btrfs_data_container *init_data_container(u32 total_bytes) |
2236 | { | |
2237 | struct btrfs_data_container *data; | |
2238 | size_t alloc_bytes; | |
2239 | ||
2240 | alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); | |
f54de068 | 2241 | data = kvmalloc(alloc_bytes, GFP_KERNEL); |
a542ad1b JS |
2242 | if (!data) |
2243 | return ERR_PTR(-ENOMEM); | |
2244 | ||
2245 | if (total_bytes >= sizeof(*data)) { | |
2246 | data->bytes_left = total_bytes - sizeof(*data); | |
2247 | data->bytes_missing = 0; | |
2248 | } else { | |
2249 | data->bytes_missing = sizeof(*data) - total_bytes; | |
2250 | data->bytes_left = 0; | |
2251 | } | |
2252 | ||
2253 | data->elem_cnt = 0; | |
2254 | data->elem_missed = 0; | |
2255 | ||
2256 | return data; | |
2257 | } | |
2258 | ||
2259 | /* | |
2260 | * allocates space to return multiple file system paths for an inode. | |
2261 | * total_bytes to allocate are passed, note that space usable for actual path | |
2262 | * information will be total_bytes - sizeof(struct inode_fs_paths). | |
2263 | * the returned pointer must be freed with free_ipath() in the end. | |
2264 | */ | |
2265 | struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, | |
2266 | struct btrfs_path *path) | |
2267 | { | |
2268 | struct inode_fs_paths *ifp; | |
2269 | struct btrfs_data_container *fspath; | |
2270 | ||
2271 | fspath = init_data_container(total_bytes); | |
2272 | if (IS_ERR(fspath)) | |
afc6961f | 2273 | return ERR_CAST(fspath); |
a542ad1b | 2274 | |
f54de068 | 2275 | ifp = kmalloc(sizeof(*ifp), GFP_KERNEL); |
a542ad1b | 2276 | if (!ifp) { |
f54de068 | 2277 | kvfree(fspath); |
a542ad1b JS |
2278 | return ERR_PTR(-ENOMEM); |
2279 | } | |
2280 | ||
2281 | ifp->btrfs_path = path; | |
2282 | ifp->fspath = fspath; | |
2283 | ifp->fs_root = fs_root; | |
2284 | ||
2285 | return ifp; | |
2286 | } | |
2287 | ||
2288 | void free_ipath(struct inode_fs_paths *ipath) | |
2289 | { | |
4735fb28 JJ |
2290 | if (!ipath) |
2291 | return; | |
f54de068 | 2292 | kvfree(ipath->fspath); |
a542ad1b JS |
2293 | kfree(ipath); |
2294 | } | |
a37f232b QW |
2295 | |
2296 | struct btrfs_backref_iter *btrfs_backref_iter_alloc( | |
2297 | struct btrfs_fs_info *fs_info, gfp_t gfp_flag) | |
2298 | { | |
2299 | struct btrfs_backref_iter *ret; | |
2300 | ||
2301 | ret = kzalloc(sizeof(*ret), gfp_flag); | |
2302 | if (!ret) | |
2303 | return NULL; | |
2304 | ||
2305 | ret->path = btrfs_alloc_path(); | |
c15c2ec0 | 2306 | if (!ret->path) { |
a37f232b QW |
2307 | kfree(ret); |
2308 | return NULL; | |
2309 | } | |
2310 | ||
2311 | /* Current backref iterator only supports iteration in commit root */ | |
2312 | ret->path->search_commit_root = 1; | |
2313 | ret->path->skip_locking = 1; | |
2314 | ret->fs_info = fs_info; | |
2315 | ||
2316 | return ret; | |
2317 | } | |
2318 | ||
2319 | int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr) | |
2320 | { | |
2321 | struct btrfs_fs_info *fs_info = iter->fs_info; | |
2322 | struct btrfs_path *path = iter->path; | |
2323 | struct btrfs_extent_item *ei; | |
2324 | struct btrfs_key key; | |
2325 | int ret; | |
2326 | ||
2327 | key.objectid = bytenr; | |
2328 | key.type = BTRFS_METADATA_ITEM_KEY; | |
2329 | key.offset = (u64)-1; | |
2330 | iter->bytenr = bytenr; | |
2331 | ||
2332 | ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0); | |
2333 | if (ret < 0) | |
2334 | return ret; | |
2335 | if (ret == 0) { | |
2336 | ret = -EUCLEAN; | |
2337 | goto release; | |
2338 | } | |
2339 | if (path->slots[0] == 0) { | |
2340 | WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); | |
2341 | ret = -EUCLEAN; | |
2342 | goto release; | |
2343 | } | |
2344 | path->slots[0]--; | |
2345 | ||
2346 | btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); | |
2347 | if ((key.type != BTRFS_EXTENT_ITEM_KEY && | |
2348 | key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) { | |
2349 | ret = -ENOENT; | |
2350 | goto release; | |
2351 | } | |
2352 | memcpy(&iter->cur_key, &key, sizeof(key)); | |
2353 | iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], | |
2354 | path->slots[0]); | |
2355 | iter->end_ptr = (u32)(iter->item_ptr + | |
2356 | btrfs_item_size_nr(path->nodes[0], path->slots[0])); | |
2357 | ei = btrfs_item_ptr(path->nodes[0], path->slots[0], | |
2358 | struct btrfs_extent_item); | |
2359 | ||
2360 | /* | |
2361 | * Only support iteration on tree backref yet. | |
2362 | * | |
2363 | * This is an extra precaution for non skinny-metadata, where | |
2364 | * EXTENT_ITEM is also used for tree blocks, that we can only use | |
2365 | * extent flags to determine if it's a tree block. | |
2366 | */ | |
2367 | if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) { | |
2368 | ret = -ENOTSUPP; | |
2369 | goto release; | |
2370 | } | |
2371 | iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei)); | |
2372 | ||
2373 | /* If there is no inline backref, go search for keyed backref */ | |
2374 | if (iter->cur_ptr >= iter->end_ptr) { | |
2375 | ret = btrfs_next_item(fs_info->extent_root, path); | |
2376 | ||
2377 | /* No inline nor keyed ref */ | |
2378 | if (ret > 0) { | |
2379 | ret = -ENOENT; | |
2380 | goto release; | |
2381 | } | |
2382 | if (ret < 0) | |
2383 | goto release; | |
2384 | ||
2385 | btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, | |
2386 | path->slots[0]); | |
2387 | if (iter->cur_key.objectid != bytenr || | |
2388 | (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY && | |
2389 | iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) { | |
2390 | ret = -ENOENT; | |
2391 | goto release; | |
2392 | } | |
2393 | iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], | |
2394 | path->slots[0]); | |
2395 | iter->item_ptr = iter->cur_ptr; | |
2396 | iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr( | |
2397 | path->nodes[0], path->slots[0])); | |
2398 | } | |
2399 | ||
2400 | return 0; | |
2401 | release: | |
2402 | btrfs_backref_iter_release(iter); | |
2403 | return ret; | |
2404 | } | |
c39c2ddc QW |
2405 | |
2406 | /* | |
2407 | * Go to the next backref item of current bytenr, can be either inlined or | |
2408 | * keyed. | |
2409 | * | |
2410 | * Caller needs to check whether it's inline ref or not by iter->cur_key. | |
2411 | * | |
2412 | * Return 0 if we get next backref without problem. | |
2413 | * Return >0 if there is no extra backref for this bytenr. | |
2414 | * Return <0 if there is something wrong happened. | |
2415 | */ | |
2416 | int btrfs_backref_iter_next(struct btrfs_backref_iter *iter) | |
2417 | { | |
2418 | struct extent_buffer *eb = btrfs_backref_get_eb(iter); | |
2419 | struct btrfs_path *path = iter->path; | |
2420 | struct btrfs_extent_inline_ref *iref; | |
2421 | int ret; | |
2422 | u32 size; | |
2423 | ||
2424 | if (btrfs_backref_iter_is_inline_ref(iter)) { | |
2425 | /* We're still inside the inline refs */ | |
2426 | ASSERT(iter->cur_ptr < iter->end_ptr); | |
2427 | ||
2428 | if (btrfs_backref_has_tree_block_info(iter)) { | |
2429 | /* First tree block info */ | |
2430 | size = sizeof(struct btrfs_tree_block_info); | |
2431 | } else { | |
2432 | /* Use inline ref type to determine the size */ | |
2433 | int type; | |
2434 | ||
2435 | iref = (struct btrfs_extent_inline_ref *) | |
2436 | ((unsigned long)iter->cur_ptr); | |
2437 | type = btrfs_extent_inline_ref_type(eb, iref); | |
2438 | ||
2439 | size = btrfs_extent_inline_ref_size(type); | |
2440 | } | |
2441 | iter->cur_ptr += size; | |
2442 | if (iter->cur_ptr < iter->end_ptr) | |
2443 | return 0; | |
2444 | ||
2445 | /* All inline items iterated, fall through */ | |
2446 | } | |
2447 | ||
2448 | /* We're at keyed items, there is no inline item, go to the next one */ | |
2449 | ret = btrfs_next_item(iter->fs_info->extent_root, iter->path); | |
2450 | if (ret) | |
2451 | return ret; | |
2452 | ||
2453 | btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]); | |
2454 | if (iter->cur_key.objectid != iter->bytenr || | |
2455 | (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY && | |
2456 | iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY)) | |
2457 | return 1; | |
2458 | iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], | |
2459 | path->slots[0]); | |
2460 | iter->cur_ptr = iter->item_ptr; | |
2461 | iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0], | |
2462 | path->slots[0]); | |
2463 | return 0; | |
2464 | } | |
584fb121 QW |
2465 | |
2466 | void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info, | |
2467 | struct btrfs_backref_cache *cache, int is_reloc) | |
2468 | { | |
2469 | int i; | |
2470 | ||
2471 | cache->rb_root = RB_ROOT; | |
2472 | for (i = 0; i < BTRFS_MAX_LEVEL; i++) | |
2473 | INIT_LIST_HEAD(&cache->pending[i]); | |
2474 | INIT_LIST_HEAD(&cache->changed); | |
2475 | INIT_LIST_HEAD(&cache->detached); | |
2476 | INIT_LIST_HEAD(&cache->leaves); | |
2477 | INIT_LIST_HEAD(&cache->pending_edge); | |
2478 | INIT_LIST_HEAD(&cache->useless_node); | |
2479 | cache->fs_info = fs_info; | |
2480 | cache->is_reloc = is_reloc; | |
2481 | } | |
b1818dab QW |
2482 | |
2483 | struct btrfs_backref_node *btrfs_backref_alloc_node( | |
2484 | struct btrfs_backref_cache *cache, u64 bytenr, int level) | |
2485 | { | |
2486 | struct btrfs_backref_node *node; | |
2487 | ||
2488 | ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL); | |
2489 | node = kzalloc(sizeof(*node), GFP_NOFS); | |
2490 | if (!node) | |
2491 | return node; | |
2492 | ||
2493 | INIT_LIST_HEAD(&node->list); | |
2494 | INIT_LIST_HEAD(&node->upper); | |
2495 | INIT_LIST_HEAD(&node->lower); | |
2496 | RB_CLEAR_NODE(&node->rb_node); | |
2497 | cache->nr_nodes++; | |
2498 | node->level = level; | |
2499 | node->bytenr = bytenr; | |
2500 | ||
2501 | return node; | |
2502 | } | |
47254d07 QW |
2503 | |
2504 | struct btrfs_backref_edge *btrfs_backref_alloc_edge( | |
2505 | struct btrfs_backref_cache *cache) | |
2506 | { | |
2507 | struct btrfs_backref_edge *edge; | |
2508 | ||
2509 | edge = kzalloc(sizeof(*edge), GFP_NOFS); | |
2510 | if (edge) | |
2511 | cache->nr_edges++; | |
2512 | return edge; | |
2513 | } | |
023acb07 QW |
2514 | |
2515 | /* | |
2516 | * Drop the backref node from cache, also cleaning up all its | |
2517 | * upper edges and any uncached nodes in the path. | |
2518 | * | |
2519 | * This cleanup happens bottom up, thus the node should either | |
2520 | * be the lowest node in the cache or a detached node. | |
2521 | */ | |
2522 | void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache, | |
2523 | struct btrfs_backref_node *node) | |
2524 | { | |
2525 | struct btrfs_backref_node *upper; | |
2526 | struct btrfs_backref_edge *edge; | |
2527 | ||
2528 | if (!node) | |
2529 | return; | |
2530 | ||
2531 | BUG_ON(!node->lowest && !node->detached); | |
2532 | while (!list_empty(&node->upper)) { | |
2533 | edge = list_entry(node->upper.next, struct btrfs_backref_edge, | |
2534 | list[LOWER]); | |
2535 | upper = edge->node[UPPER]; | |
2536 | list_del(&edge->list[LOWER]); | |
2537 | list_del(&edge->list[UPPER]); | |
2538 | btrfs_backref_free_edge(cache, edge); | |
2539 | ||
2540 | if (RB_EMPTY_NODE(&upper->rb_node)) { | |
2541 | BUG_ON(!list_empty(&node->upper)); | |
2542 | btrfs_backref_drop_node(cache, node); | |
2543 | node = upper; | |
2544 | node->lowest = 1; | |
2545 | continue; | |
2546 | } | |
2547 | /* | |
2548 | * Add the node to leaf node list if no other child block | |
2549 | * cached. | |
2550 | */ | |
2551 | if (list_empty(&upper->lower)) { | |
2552 | list_add_tail(&upper->lower, &cache->leaves); | |
2553 | upper->lowest = 1; | |
2554 | } | |
2555 | } | |
2556 | ||
2557 | btrfs_backref_drop_node(cache, node); | |
2558 | } | |
13fe1bdb QW |
2559 | |
2560 | /* | |
2561 | * Release all nodes/edges from current cache | |
2562 | */ | |
2563 | void btrfs_backref_release_cache(struct btrfs_backref_cache *cache) | |
2564 | { | |
2565 | struct btrfs_backref_node *node; | |
2566 | int i; | |
2567 | ||
2568 | while (!list_empty(&cache->detached)) { | |
2569 | node = list_entry(cache->detached.next, | |
2570 | struct btrfs_backref_node, list); | |
2571 | btrfs_backref_cleanup_node(cache, node); | |
2572 | } | |
2573 | ||
2574 | while (!list_empty(&cache->leaves)) { | |
2575 | node = list_entry(cache->leaves.next, | |
2576 | struct btrfs_backref_node, lower); | |
2577 | btrfs_backref_cleanup_node(cache, node); | |
2578 | } | |
2579 | ||
2580 | cache->last_trans = 0; | |
2581 | ||
2582 | for (i = 0; i < BTRFS_MAX_LEVEL; i++) | |
2583 | ASSERT(list_empty(&cache->pending[i])); | |
2584 | ASSERT(list_empty(&cache->pending_edge)); | |
2585 | ASSERT(list_empty(&cache->useless_node)); | |
2586 | ASSERT(list_empty(&cache->changed)); | |
2587 | ASSERT(list_empty(&cache->detached)); | |
2588 | ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); | |
2589 | ASSERT(!cache->nr_nodes); | |
2590 | ASSERT(!cache->nr_edges); | |
2591 | } | |
1b60d2ec QW |
2592 | |
2593 | /* | |
2594 | * Handle direct tree backref | |
2595 | * | |
2596 | * Direct tree backref means, the backref item shows its parent bytenr | |
2597 | * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined). | |
2598 | * | |
2599 | * @ref_key: The converted backref key. | |
2600 | * For keyed backref, it's the item key. | |
2601 | * For inlined backref, objectid is the bytenr, | |
2602 | * type is btrfs_inline_ref_type, offset is | |
2603 | * btrfs_inline_ref_offset. | |
2604 | */ | |
2605 | static int handle_direct_tree_backref(struct btrfs_backref_cache *cache, | |
2606 | struct btrfs_key *ref_key, | |
2607 | struct btrfs_backref_node *cur) | |
2608 | { | |
2609 | struct btrfs_backref_edge *edge; | |
2610 | struct btrfs_backref_node *upper; | |
2611 | struct rb_node *rb_node; | |
2612 | ||
2613 | ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY); | |
2614 | ||
2615 | /* Only reloc root uses backref pointing to itself */ | |
2616 | if (ref_key->objectid == ref_key->offset) { | |
2617 | struct btrfs_root *root; | |
2618 | ||
2619 | cur->is_reloc_root = 1; | |
2620 | /* Only reloc backref cache cares about a specific root */ | |
2621 | if (cache->is_reloc) { | |
2622 | root = find_reloc_root(cache->fs_info, cur->bytenr); | |
2623 | if (WARN_ON(!root)) | |
2624 | return -ENOENT; | |
2625 | cur->root = root; | |
2626 | } else { | |
2627 | /* | |
2628 | * For generic purpose backref cache, reloc root node | |
2629 | * is useless. | |
2630 | */ | |
2631 | list_add(&cur->list, &cache->useless_node); | |
2632 | } | |
2633 | return 0; | |
2634 | } | |
2635 | ||
2636 | edge = btrfs_backref_alloc_edge(cache); | |
2637 | if (!edge) | |
2638 | return -ENOMEM; | |
2639 | ||
2640 | rb_node = rb_simple_search(&cache->rb_root, ref_key->offset); | |
2641 | if (!rb_node) { | |
2642 | /* Parent node not yet cached */ | |
2643 | upper = btrfs_backref_alloc_node(cache, ref_key->offset, | |
2644 | cur->level + 1); | |
2645 | if (!upper) { | |
2646 | btrfs_backref_free_edge(cache, edge); | |
2647 | return -ENOMEM; | |
2648 | } | |
2649 | ||
2650 | /* | |
2651 | * Backrefs for the upper level block isn't cached, add the | |
2652 | * block to pending list | |
2653 | */ | |
2654 | list_add_tail(&edge->list[UPPER], &cache->pending_edge); | |
2655 | } else { | |
2656 | /* Parent node already cached */ | |
2657 | upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node); | |
2658 | ASSERT(upper->checked); | |
2659 | INIT_LIST_HEAD(&edge->list[UPPER]); | |
2660 | } | |
2661 | btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER); | |
2662 | return 0; | |
2663 | } | |
2664 | ||
2665 | /* | |
2666 | * Handle indirect tree backref | |
2667 | * | |
2668 | * Indirect tree backref means, we only know which tree the node belongs to. | |
2669 | * We still need to do a tree search to find out the parents. This is for | |
2670 | * TREE_BLOCK_REF backref (keyed or inlined). | |
2671 | * | |
2672 | * @ref_key: The same as @ref_key in handle_direct_tree_backref() | |
2673 | * @tree_key: The first key of this tree block. | |
2674 | * @path: A clean (released) path, to avoid allocating path everytime | |
2675 | * the function get called. | |
2676 | */ | |
2677 | static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache, | |
2678 | struct btrfs_path *path, | |
2679 | struct btrfs_key *ref_key, | |
2680 | struct btrfs_key *tree_key, | |
2681 | struct btrfs_backref_node *cur) | |
2682 | { | |
2683 | struct btrfs_fs_info *fs_info = cache->fs_info; | |
2684 | struct btrfs_backref_node *upper; | |
2685 | struct btrfs_backref_node *lower; | |
2686 | struct btrfs_backref_edge *edge; | |
2687 | struct extent_buffer *eb; | |
2688 | struct btrfs_root *root; | |
1b60d2ec QW |
2689 | struct rb_node *rb_node; |
2690 | int level; | |
2691 | bool need_check = true; | |
2692 | int ret; | |
2693 | ||
56e9357a | 2694 | root = btrfs_get_fs_root(fs_info, ref_key->offset, false); |
1b60d2ec QW |
2695 | if (IS_ERR(root)) |
2696 | return PTR_ERR(root); | |
92a7cc42 | 2697 | if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) |
1b60d2ec QW |
2698 | cur->cowonly = 1; |
2699 | ||
2700 | if (btrfs_root_level(&root->root_item) == cur->level) { | |
2701 | /* Tree root */ | |
2702 | ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr); | |
876de781 QW |
2703 | /* |
2704 | * For reloc backref cache, we may ignore reloc root. But for | |
2705 | * general purpose backref cache, we can't rely on | |
2706 | * btrfs_should_ignore_reloc_root() as it may conflict with | |
2707 | * current running relocation and lead to missing root. | |
2708 | * | |
2709 | * For general purpose backref cache, reloc root detection is | |
2710 | * completely relying on direct backref (key->offset is parent | |
2711 | * bytenr), thus only do such check for reloc cache. | |
2712 | */ | |
2713 | if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) { | |
1b60d2ec QW |
2714 | btrfs_put_root(root); |
2715 | list_add(&cur->list, &cache->useless_node); | |
2716 | } else { | |
2717 | cur->root = root; | |
2718 | } | |
2719 | return 0; | |
2720 | } | |
2721 | ||
2722 | level = cur->level + 1; | |
2723 | ||
2724 | /* Search the tree to find parent blocks referring to the block */ | |
2725 | path->search_commit_root = 1; | |
2726 | path->skip_locking = 1; | |
2727 | path->lowest_level = level; | |
2728 | ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0); | |
2729 | path->lowest_level = 0; | |
2730 | if (ret < 0) { | |
2731 | btrfs_put_root(root); | |
2732 | return ret; | |
2733 | } | |
2734 | if (ret > 0 && path->slots[level] > 0) | |
2735 | path->slots[level]--; | |
2736 | ||
2737 | eb = path->nodes[level]; | |
2738 | if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) { | |
2739 | btrfs_err(fs_info, | |
2740 | "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)", | |
2741 | cur->bytenr, level - 1, root->root_key.objectid, | |
2742 | tree_key->objectid, tree_key->type, tree_key->offset); | |
2743 | btrfs_put_root(root); | |
2744 | ret = -ENOENT; | |
2745 | goto out; | |
2746 | } | |
2747 | lower = cur; | |
2748 | ||
2749 | /* Add all nodes and edges in the path */ | |
2750 | for (; level < BTRFS_MAX_LEVEL; level++) { | |
2751 | if (!path->nodes[level]) { | |
2752 | ASSERT(btrfs_root_bytenr(&root->root_item) == | |
2753 | lower->bytenr); | |
876de781 QW |
2754 | /* Same as previous should_ignore_reloc_root() call */ |
2755 | if (btrfs_should_ignore_reloc_root(root) && | |
2756 | cache->is_reloc) { | |
1b60d2ec QW |
2757 | btrfs_put_root(root); |
2758 | list_add(&lower->list, &cache->useless_node); | |
2759 | } else { | |
2760 | lower->root = root; | |
2761 | } | |
2762 | break; | |
2763 | } | |
2764 | ||
2765 | edge = btrfs_backref_alloc_edge(cache); | |
2766 | if (!edge) { | |
2767 | btrfs_put_root(root); | |
2768 | ret = -ENOMEM; | |
2769 | goto out; | |
2770 | } | |
2771 | ||
2772 | eb = path->nodes[level]; | |
2773 | rb_node = rb_simple_search(&cache->rb_root, eb->start); | |
2774 | if (!rb_node) { | |
2775 | upper = btrfs_backref_alloc_node(cache, eb->start, | |
2776 | lower->level + 1); | |
2777 | if (!upper) { | |
2778 | btrfs_put_root(root); | |
2779 | btrfs_backref_free_edge(cache, edge); | |
2780 | ret = -ENOMEM; | |
2781 | goto out; | |
2782 | } | |
2783 | upper->owner = btrfs_header_owner(eb); | |
92a7cc42 | 2784 | if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) |
1b60d2ec QW |
2785 | upper->cowonly = 1; |
2786 | ||
2787 | /* | |
2788 | * If we know the block isn't shared we can avoid | |
2789 | * checking its backrefs. | |
2790 | */ | |
2791 | if (btrfs_block_can_be_shared(root, eb)) | |
2792 | upper->checked = 0; | |
2793 | else | |
2794 | upper->checked = 1; | |
2795 | ||
2796 | /* | |
2797 | * Add the block to pending list if we need to check its | |
2798 | * backrefs, we only do this once while walking up a | |
2799 | * tree as we will catch anything else later on. | |
2800 | */ | |
2801 | if (!upper->checked && need_check) { | |
2802 | need_check = false; | |
2803 | list_add_tail(&edge->list[UPPER], | |
2804 | &cache->pending_edge); | |
2805 | } else { | |
2806 | if (upper->checked) | |
2807 | need_check = true; | |
2808 | INIT_LIST_HEAD(&edge->list[UPPER]); | |
2809 | } | |
2810 | } else { | |
2811 | upper = rb_entry(rb_node, struct btrfs_backref_node, | |
2812 | rb_node); | |
2813 | ASSERT(upper->checked); | |
2814 | INIT_LIST_HEAD(&edge->list[UPPER]); | |
2815 | if (!upper->owner) | |
2816 | upper->owner = btrfs_header_owner(eb); | |
2817 | } | |
2818 | btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER); | |
2819 | ||
2820 | if (rb_node) { | |
2821 | btrfs_put_root(root); | |
2822 | break; | |
2823 | } | |
2824 | lower = upper; | |
2825 | upper = NULL; | |
2826 | } | |
2827 | out: | |
2828 | btrfs_release_path(path); | |
2829 | return ret; | |
2830 | } | |
2831 | ||
2832 | /* | |
2833 | * Add backref node @cur into @cache. | |
2834 | * | |
2835 | * NOTE: Even if the function returned 0, @cur is not yet cached as its upper | |
2836 | * links aren't yet bi-directional. Needs to finish such links. | |
fc997ed0 | 2837 | * Use btrfs_backref_finish_upper_links() to finish such linkage. |
1b60d2ec QW |
2838 | * |
2839 | * @path: Released path for indirect tree backref lookup | |
2840 | * @iter: Released backref iter for extent tree search | |
2841 | * @node_key: The first key of the tree block | |
2842 | */ | |
2843 | int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache, | |
2844 | struct btrfs_path *path, | |
2845 | struct btrfs_backref_iter *iter, | |
2846 | struct btrfs_key *node_key, | |
2847 | struct btrfs_backref_node *cur) | |
2848 | { | |
2849 | struct btrfs_fs_info *fs_info = cache->fs_info; | |
2850 | struct btrfs_backref_edge *edge; | |
2851 | struct btrfs_backref_node *exist; | |
2852 | int ret; | |
2853 | ||
2854 | ret = btrfs_backref_iter_start(iter, cur->bytenr); | |
2855 | if (ret < 0) | |
2856 | return ret; | |
2857 | /* | |
2858 | * We skip the first btrfs_tree_block_info, as we don't use the key | |
2859 | * stored in it, but fetch it from the tree block | |
2860 | */ | |
2861 | if (btrfs_backref_has_tree_block_info(iter)) { | |
2862 | ret = btrfs_backref_iter_next(iter); | |
2863 | if (ret < 0) | |
2864 | goto out; | |
2865 | /* No extra backref? This means the tree block is corrupted */ | |
2866 | if (ret > 0) { | |
2867 | ret = -EUCLEAN; | |
2868 | goto out; | |
2869 | } | |
2870 | } | |
2871 | WARN_ON(cur->checked); | |
2872 | if (!list_empty(&cur->upper)) { | |
2873 | /* | |
2874 | * The backref was added previously when processing backref of | |
2875 | * type BTRFS_TREE_BLOCK_REF_KEY | |
2876 | */ | |
2877 | ASSERT(list_is_singular(&cur->upper)); | |
2878 | edge = list_entry(cur->upper.next, struct btrfs_backref_edge, | |
2879 | list[LOWER]); | |
2880 | ASSERT(list_empty(&edge->list[UPPER])); | |
2881 | exist = edge->node[UPPER]; | |
2882 | /* | |
2883 | * Add the upper level block to pending list if we need check | |
2884 | * its backrefs | |
2885 | */ | |
2886 | if (!exist->checked) | |
2887 | list_add_tail(&edge->list[UPPER], &cache->pending_edge); | |
2888 | } else { | |
2889 | exist = NULL; | |
2890 | } | |
2891 | ||
2892 | for (; ret == 0; ret = btrfs_backref_iter_next(iter)) { | |
2893 | struct extent_buffer *eb; | |
2894 | struct btrfs_key key; | |
2895 | int type; | |
2896 | ||
2897 | cond_resched(); | |
2898 | eb = btrfs_backref_get_eb(iter); | |
2899 | ||
2900 | key.objectid = iter->bytenr; | |
2901 | if (btrfs_backref_iter_is_inline_ref(iter)) { | |
2902 | struct btrfs_extent_inline_ref *iref; | |
2903 | ||
2904 | /* Update key for inline backref */ | |
2905 | iref = (struct btrfs_extent_inline_ref *) | |
2906 | ((unsigned long)iter->cur_ptr); | |
2907 | type = btrfs_get_extent_inline_ref_type(eb, iref, | |
2908 | BTRFS_REF_TYPE_BLOCK); | |
2909 | if (type == BTRFS_REF_TYPE_INVALID) { | |
2910 | ret = -EUCLEAN; | |
2911 | goto out; | |
2912 | } | |
2913 | key.type = type; | |
2914 | key.offset = btrfs_extent_inline_ref_offset(eb, iref); | |
2915 | } else { | |
2916 | key.type = iter->cur_key.type; | |
2917 | key.offset = iter->cur_key.offset; | |
2918 | } | |
2919 | ||
2920 | /* | |
2921 | * Parent node found and matches current inline ref, no need to | |
2922 | * rebuild this node for this inline ref | |
2923 | */ | |
2924 | if (exist && | |
2925 | ((key.type == BTRFS_TREE_BLOCK_REF_KEY && | |
2926 | exist->owner == key.offset) || | |
2927 | (key.type == BTRFS_SHARED_BLOCK_REF_KEY && | |
2928 | exist->bytenr == key.offset))) { | |
2929 | exist = NULL; | |
2930 | continue; | |
2931 | } | |
2932 | ||
2933 | /* SHARED_BLOCK_REF means key.offset is the parent bytenr */ | |
2934 | if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { | |
2935 | ret = handle_direct_tree_backref(cache, &key, cur); | |
2936 | if (ret < 0) | |
2937 | goto out; | |
2938 | continue; | |
2939 | } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { | |
2940 | ret = -EINVAL; | |
2941 | btrfs_print_v0_err(fs_info); | |
2942 | btrfs_handle_fs_error(fs_info, ret, NULL); | |
2943 | goto out; | |
2944 | } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { | |
2945 | continue; | |
2946 | } | |
2947 | ||
2948 | /* | |
2949 | * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset | |
2950 | * means the root objectid. We need to search the tree to get | |
2951 | * its parent bytenr. | |
2952 | */ | |
2953 | ret = handle_indirect_tree_backref(cache, path, &key, node_key, | |
2954 | cur); | |
2955 | if (ret < 0) | |
2956 | goto out; | |
2957 | } | |
2958 | ret = 0; | |
2959 | cur->checked = 1; | |
2960 | WARN_ON(exist); | |
2961 | out: | |
2962 | btrfs_backref_iter_release(iter); | |
2963 | return ret; | |
2964 | } | |
fc997ed0 QW |
2965 | |
2966 | /* | |
2967 | * Finish the upwards linkage created by btrfs_backref_add_tree_node() | |
2968 | */ | |
2969 | int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, | |
2970 | struct btrfs_backref_node *start) | |
2971 | { | |
2972 | struct list_head *useless_node = &cache->useless_node; | |
2973 | struct btrfs_backref_edge *edge; | |
2974 | struct rb_node *rb_node; | |
2975 | LIST_HEAD(pending_edge); | |
2976 | ||
2977 | ASSERT(start->checked); | |
2978 | ||
2979 | /* Insert this node to cache if it's not COW-only */ | |
2980 | if (!start->cowonly) { | |
2981 | rb_node = rb_simple_insert(&cache->rb_root, start->bytenr, | |
2982 | &start->rb_node); | |
2983 | if (rb_node) | |
2984 | btrfs_backref_panic(cache->fs_info, start->bytenr, | |
2985 | -EEXIST); | |
2986 | list_add_tail(&start->lower, &cache->leaves); | |
2987 | } | |
2988 | ||
2989 | /* | |
2990 | * Use breadth first search to iterate all related edges. | |
2991 | * | |
2992 | * The starting points are all the edges of this node | |
2993 | */ | |
2994 | list_for_each_entry(edge, &start->upper, list[LOWER]) | |
2995 | list_add_tail(&edge->list[UPPER], &pending_edge); | |
2996 | ||
2997 | while (!list_empty(&pending_edge)) { | |
2998 | struct btrfs_backref_node *upper; | |
2999 | struct btrfs_backref_node *lower; | |
fc997ed0 QW |
3000 | |
3001 | edge = list_first_entry(&pending_edge, | |
3002 | struct btrfs_backref_edge, list[UPPER]); | |
3003 | list_del_init(&edge->list[UPPER]); | |
3004 | upper = edge->node[UPPER]; | |
3005 | lower = edge->node[LOWER]; | |
3006 | ||
3007 | /* Parent is detached, no need to keep any edges */ | |
3008 | if (upper->detached) { | |
3009 | list_del(&edge->list[LOWER]); | |
3010 | btrfs_backref_free_edge(cache, edge); | |
3011 | ||
3012 | /* Lower node is orphan, queue for cleanup */ | |
3013 | if (list_empty(&lower->upper)) | |
3014 | list_add(&lower->list, useless_node); | |
3015 | continue; | |
3016 | } | |
3017 | ||
3018 | /* | |
3019 | * All new nodes added in current build_backref_tree() haven't | |
3020 | * been linked to the cache rb tree. | |
3021 | * So if we have upper->rb_node populated, this means a cache | |
3022 | * hit. We only need to link the edge, as @upper and all its | |
3023 | * parents have already been linked. | |
3024 | */ | |
3025 | if (!RB_EMPTY_NODE(&upper->rb_node)) { | |
3026 | if (upper->lowest) { | |
3027 | list_del_init(&upper->lower); | |
3028 | upper->lowest = 0; | |
3029 | } | |
3030 | ||
3031 | list_add_tail(&edge->list[UPPER], &upper->lower); | |
3032 | continue; | |
3033 | } | |
3034 | ||
3035 | /* Sanity check, we shouldn't have any unchecked nodes */ | |
3036 | if (!upper->checked) { | |
3037 | ASSERT(0); | |
3038 | return -EUCLEAN; | |
3039 | } | |
3040 | ||
3041 | /* Sanity check, COW-only node has non-COW-only parent */ | |
3042 | if (start->cowonly != upper->cowonly) { | |
3043 | ASSERT(0); | |
3044 | return -EUCLEAN; | |
3045 | } | |
3046 | ||
3047 | /* Only cache non-COW-only (subvolume trees) tree blocks */ | |
3048 | if (!upper->cowonly) { | |
3049 | rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr, | |
3050 | &upper->rb_node); | |
3051 | if (rb_node) { | |
3052 | btrfs_backref_panic(cache->fs_info, | |
3053 | upper->bytenr, -EEXIST); | |
3054 | return -EUCLEAN; | |
3055 | } | |
3056 | } | |
3057 | ||
3058 | list_add_tail(&edge->list[UPPER], &upper->lower); | |
3059 | ||
3060 | /* | |
3061 | * Also queue all the parent edges of this uncached node | |
3062 | * to finish the upper linkage | |
3063 | */ | |
3064 | list_for_each_entry(edge, &upper->upper, list[LOWER]) | |
3065 | list_add_tail(&edge->list[UPPER], &pending_edge); | |
3066 | } | |
3067 | return 0; | |
3068 | } | |
1b23ea18 QW |
3069 | |
3070 | void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache, | |
3071 | struct btrfs_backref_node *node) | |
3072 | { | |
3073 | struct btrfs_backref_node *lower; | |
3074 | struct btrfs_backref_node *upper; | |
3075 | struct btrfs_backref_edge *edge; | |
3076 | ||
3077 | while (!list_empty(&cache->useless_node)) { | |
3078 | lower = list_first_entry(&cache->useless_node, | |
3079 | struct btrfs_backref_node, list); | |
3080 | list_del_init(&lower->list); | |
3081 | } | |
3082 | while (!list_empty(&cache->pending_edge)) { | |
3083 | edge = list_first_entry(&cache->pending_edge, | |
3084 | struct btrfs_backref_edge, list[UPPER]); | |
3085 | list_del(&edge->list[UPPER]); | |
3086 | list_del(&edge->list[LOWER]); | |
3087 | lower = edge->node[LOWER]; | |
3088 | upper = edge->node[UPPER]; | |
3089 | btrfs_backref_free_edge(cache, edge); | |
3090 | ||
3091 | /* | |
3092 | * Lower is no longer linked to any upper backref nodes and | |
3093 | * isn't in the cache, we can free it ourselves. | |
3094 | */ | |
3095 | if (list_empty(&lower->upper) && | |
3096 | RB_EMPTY_NODE(&lower->rb_node)) | |
3097 | list_add(&lower->list, &cache->useless_node); | |
3098 | ||
3099 | if (!RB_EMPTY_NODE(&upper->rb_node)) | |
3100 | continue; | |
3101 | ||
3102 | /* Add this guy's upper edges to the list to process */ | |
3103 | list_for_each_entry(edge, &upper->upper, list[LOWER]) | |
3104 | list_add_tail(&edge->list[UPPER], | |
3105 | &cache->pending_edge); | |
3106 | if (list_empty(&upper->upper)) | |
3107 | list_add(&upper->list, &cache->useless_node); | |
3108 | } | |
3109 | ||
3110 | while (!list_empty(&cache->useless_node)) { | |
3111 | lower = list_first_entry(&cache->useless_node, | |
3112 | struct btrfs_backref_node, list); | |
3113 | list_del_init(&lower->list); | |
3114 | if (lower == node) | |
3115 | node = NULL; | |
3116 | btrfs_backref_free_node(cache, lower); | |
3117 | } | |
3118 | ||
3119 | btrfs_backref_cleanup_node(cache, node); | |
3120 | ASSERT(list_empty(&cache->useless_node) && | |
3121 | list_empty(&cache->pending_edge)); | |
3122 | } |