Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
fd708b81 JB |
2 | /* |
3 | * Copyright (C) 2014 Facebook. All rights reserved. | |
fd708b81 JB |
4 | */ |
5 | ||
6 | #include <linux/sched.h> | |
7 | #include <linux/stacktrace.h> | |
8 | #include "ctree.h" | |
9 | #include "disk-io.h" | |
10 | #include "locking.h" | |
11 | #include "delayed-ref.h" | |
12 | #include "ref-verify.h" | |
13 | ||
14 | /* | |
15 | * Used to keep track the roots and number of refs each root has for a given | |
16 | * bytenr. This just tracks the number of direct references, no shared | |
17 | * references. | |
18 | */ | |
19 | struct root_entry { | |
20 | u64 root_objectid; | |
21 | u64 num_refs; | |
22 | struct rb_node node; | |
23 | }; | |
24 | ||
25 | /* | |
26 | * These are meant to represent what should exist in the extent tree, these can | |
27 | * be used to verify the extent tree is consistent as these should all match | |
28 | * what the extent tree says. | |
29 | */ | |
30 | struct ref_entry { | |
31 | u64 root_objectid; | |
32 | u64 parent; | |
33 | u64 owner; | |
34 | u64 offset; | |
35 | u64 num_refs; | |
36 | struct rb_node node; | |
37 | }; | |
38 | ||
39 | #define MAX_TRACE 16 | |
40 | ||
41 | /* | |
42 | * Whenever we add/remove a reference we record the action. The action maps | |
43 | * back to the delayed ref action. We hold the ref we are changing in the | |
44 | * action so we can account for the history properly, and we record the root we | |
45 | * were called with since it could be different from ref_root. We also store | |
52042d8e | 46 | * stack traces because that's how I roll. |
fd708b81 JB |
47 | */ |
48 | struct ref_action { | |
49 | int action; | |
50 | u64 root; | |
51 | struct ref_entry ref; | |
52 | struct list_head list; | |
53 | unsigned long trace[MAX_TRACE]; | |
54 | unsigned int trace_len; | |
55 | }; | |
56 | ||
57 | /* | |
58 | * One of these for every block we reference, it holds the roots and references | |
52042d8e | 59 | * to it as well as all of the ref actions that have occurred to it. We never |
fd708b81 JB |
60 | * free it until we unmount the file system in order to make sure re-allocations |
61 | * are happening properly. | |
62 | */ | |
63 | struct block_entry { | |
64 | u64 bytenr; | |
65 | u64 len; | |
66 | u64 num_refs; | |
67 | int metadata; | |
68 | int from_disk; | |
69 | struct rb_root roots; | |
70 | struct rb_root refs; | |
71 | struct rb_node node; | |
72 | struct list_head actions; | |
73 | }; | |
74 | ||
75 | static struct block_entry *insert_block_entry(struct rb_root *root, | |
76 | struct block_entry *be) | |
77 | { | |
78 | struct rb_node **p = &root->rb_node; | |
79 | struct rb_node *parent_node = NULL; | |
80 | struct block_entry *entry; | |
81 | ||
82 | while (*p) { | |
83 | parent_node = *p; | |
84 | entry = rb_entry(parent_node, struct block_entry, node); | |
85 | if (entry->bytenr > be->bytenr) | |
86 | p = &(*p)->rb_left; | |
87 | else if (entry->bytenr < be->bytenr) | |
88 | p = &(*p)->rb_right; | |
89 | else | |
90 | return entry; | |
91 | } | |
92 | ||
93 | rb_link_node(&be->node, parent_node, p); | |
94 | rb_insert_color(&be->node, root); | |
95 | return NULL; | |
96 | } | |
97 | ||
98 | static struct block_entry *lookup_block_entry(struct rb_root *root, u64 bytenr) | |
99 | { | |
100 | struct rb_node *n; | |
101 | struct block_entry *entry = NULL; | |
102 | ||
103 | n = root->rb_node; | |
104 | while (n) { | |
105 | entry = rb_entry(n, struct block_entry, node); | |
106 | if (entry->bytenr < bytenr) | |
107 | n = n->rb_right; | |
108 | else if (entry->bytenr > bytenr) | |
109 | n = n->rb_left; | |
110 | else | |
111 | return entry; | |
112 | } | |
113 | return NULL; | |
114 | } | |
115 | ||
116 | static struct root_entry *insert_root_entry(struct rb_root *root, | |
117 | struct root_entry *re) | |
118 | { | |
119 | struct rb_node **p = &root->rb_node; | |
120 | struct rb_node *parent_node = NULL; | |
121 | struct root_entry *entry; | |
122 | ||
123 | while (*p) { | |
124 | parent_node = *p; | |
125 | entry = rb_entry(parent_node, struct root_entry, node); | |
126 | if (entry->root_objectid > re->root_objectid) | |
127 | p = &(*p)->rb_left; | |
128 | else if (entry->root_objectid < re->root_objectid) | |
129 | p = &(*p)->rb_right; | |
130 | else | |
131 | return entry; | |
132 | } | |
133 | ||
134 | rb_link_node(&re->node, parent_node, p); | |
135 | rb_insert_color(&re->node, root); | |
136 | return NULL; | |
137 | ||
138 | } | |
139 | ||
140 | static int comp_refs(struct ref_entry *ref1, struct ref_entry *ref2) | |
141 | { | |
142 | if (ref1->root_objectid < ref2->root_objectid) | |
143 | return -1; | |
144 | if (ref1->root_objectid > ref2->root_objectid) | |
145 | return 1; | |
146 | if (ref1->parent < ref2->parent) | |
147 | return -1; | |
148 | if (ref1->parent > ref2->parent) | |
149 | return 1; | |
150 | if (ref1->owner < ref2->owner) | |
151 | return -1; | |
152 | if (ref1->owner > ref2->owner) | |
153 | return 1; | |
154 | if (ref1->offset < ref2->offset) | |
155 | return -1; | |
156 | if (ref1->offset > ref2->offset) | |
157 | return 1; | |
158 | return 0; | |
159 | } | |
160 | ||
161 | static struct ref_entry *insert_ref_entry(struct rb_root *root, | |
162 | struct ref_entry *ref) | |
163 | { | |
164 | struct rb_node **p = &root->rb_node; | |
165 | struct rb_node *parent_node = NULL; | |
166 | struct ref_entry *entry; | |
167 | int cmp; | |
168 | ||
169 | while (*p) { | |
170 | parent_node = *p; | |
171 | entry = rb_entry(parent_node, struct ref_entry, node); | |
172 | cmp = comp_refs(entry, ref); | |
173 | if (cmp > 0) | |
174 | p = &(*p)->rb_left; | |
175 | else if (cmp < 0) | |
176 | p = &(*p)->rb_right; | |
177 | else | |
178 | return entry; | |
179 | } | |
180 | ||
181 | rb_link_node(&ref->node, parent_node, p); | |
182 | rb_insert_color(&ref->node, root); | |
183 | return NULL; | |
184 | ||
185 | } | |
186 | ||
187 | static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid) | |
188 | { | |
189 | struct rb_node *n; | |
190 | struct root_entry *entry = NULL; | |
191 | ||
192 | n = root->rb_node; | |
193 | while (n) { | |
194 | entry = rb_entry(n, struct root_entry, node); | |
195 | if (entry->root_objectid < objectid) | |
196 | n = n->rb_right; | |
197 | else if (entry->root_objectid > objectid) | |
198 | n = n->rb_left; | |
199 | else | |
200 | return entry; | |
201 | } | |
202 | return NULL; | |
203 | } | |
204 | ||
205 | #ifdef CONFIG_STACKTRACE | |
206 | static void __save_stack_trace(struct ref_action *ra) | |
207 | { | |
6924f5fe | 208 | ra->trace_len = stack_trace_save(ra->trace, MAX_TRACE, 2); |
fd708b81 JB |
209 | } |
210 | ||
211 | static void __print_stack_trace(struct btrfs_fs_info *fs_info, | |
212 | struct ref_action *ra) | |
213 | { | |
fd708b81 JB |
214 | if (ra->trace_len == 0) { |
215 | btrfs_err(fs_info, " ref-verify: no stacktrace"); | |
216 | return; | |
217 | } | |
6924f5fe | 218 | stack_trace_print(ra->trace, ra->trace_len, 2); |
fd708b81 JB |
219 | } |
220 | #else | |
221 | static void inline __save_stack_trace(struct ref_action *ra) | |
222 | { | |
223 | } | |
224 | ||
225 | static void inline __print_stack_trace(struct btrfs_fs_info *fs_info, | |
226 | struct ref_action *ra) | |
227 | { | |
228 | btrfs_err(fs_info, " ref-verify: no stacktrace support"); | |
229 | } | |
230 | #endif | |
231 | ||
232 | static void free_block_entry(struct block_entry *be) | |
233 | { | |
234 | struct root_entry *re; | |
235 | struct ref_entry *ref; | |
236 | struct ref_action *ra; | |
237 | struct rb_node *n; | |
238 | ||
239 | while ((n = rb_first(&be->roots))) { | |
240 | re = rb_entry(n, struct root_entry, node); | |
241 | rb_erase(&re->node, &be->roots); | |
242 | kfree(re); | |
243 | } | |
244 | ||
245 | while((n = rb_first(&be->refs))) { | |
246 | ref = rb_entry(n, struct ref_entry, node); | |
247 | rb_erase(&ref->node, &be->refs); | |
248 | kfree(ref); | |
249 | } | |
250 | ||
251 | while (!list_empty(&be->actions)) { | |
252 | ra = list_first_entry(&be->actions, struct ref_action, | |
253 | list); | |
254 | list_del(&ra->list); | |
255 | kfree(ra); | |
256 | } | |
257 | kfree(be); | |
258 | } | |
259 | ||
260 | static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info, | |
261 | u64 bytenr, u64 len, | |
262 | u64 root_objectid) | |
263 | { | |
264 | struct block_entry *be = NULL, *exist; | |
265 | struct root_entry *re = NULL; | |
266 | ||
267 | re = kzalloc(sizeof(struct root_entry), GFP_KERNEL); | |
268 | be = kzalloc(sizeof(struct block_entry), GFP_KERNEL); | |
269 | if (!be || !re) { | |
270 | kfree(re); | |
271 | kfree(be); | |
272 | return ERR_PTR(-ENOMEM); | |
273 | } | |
274 | be->bytenr = bytenr; | |
275 | be->len = len; | |
276 | ||
277 | re->root_objectid = root_objectid; | |
278 | re->num_refs = 0; | |
279 | ||
280 | spin_lock(&fs_info->ref_verify_lock); | |
281 | exist = insert_block_entry(&fs_info->block_tree, be); | |
282 | if (exist) { | |
283 | if (root_objectid) { | |
284 | struct root_entry *exist_re; | |
285 | ||
286 | exist_re = insert_root_entry(&exist->roots, re); | |
287 | if (exist_re) | |
288 | kfree(re); | |
289 | } | |
290 | kfree(be); | |
291 | return exist; | |
292 | } | |
293 | ||
294 | be->num_refs = 0; | |
295 | be->metadata = 0; | |
296 | be->from_disk = 0; | |
297 | be->roots = RB_ROOT; | |
298 | be->refs = RB_ROOT; | |
299 | INIT_LIST_HEAD(&be->actions); | |
300 | if (root_objectid) | |
301 | insert_root_entry(&be->roots, re); | |
302 | else | |
303 | kfree(re); | |
304 | return be; | |
305 | } | |
306 | ||
307 | static int add_tree_block(struct btrfs_fs_info *fs_info, u64 ref_root, | |
308 | u64 parent, u64 bytenr, int level) | |
309 | { | |
310 | struct block_entry *be; | |
311 | struct root_entry *re; | |
312 | struct ref_entry *ref = NULL, *exist; | |
313 | ||
314 | ref = kmalloc(sizeof(struct ref_entry), GFP_KERNEL); | |
315 | if (!ref) | |
316 | return -ENOMEM; | |
317 | ||
318 | if (parent) | |
319 | ref->root_objectid = 0; | |
320 | else | |
321 | ref->root_objectid = ref_root; | |
322 | ref->parent = parent; | |
323 | ref->owner = level; | |
324 | ref->offset = 0; | |
325 | ref->num_refs = 1; | |
326 | ||
327 | be = add_block_entry(fs_info, bytenr, fs_info->nodesize, ref_root); | |
328 | if (IS_ERR(be)) { | |
329 | kfree(ref); | |
330 | return PTR_ERR(be); | |
331 | } | |
332 | be->num_refs++; | |
333 | be->from_disk = 1; | |
334 | be->metadata = 1; | |
335 | ||
336 | if (!parent) { | |
337 | ASSERT(ref_root); | |
338 | re = lookup_root_entry(&be->roots, ref_root); | |
339 | ASSERT(re); | |
340 | re->num_refs++; | |
341 | } | |
342 | exist = insert_ref_entry(&be->refs, ref); | |
343 | if (exist) { | |
344 | exist->num_refs++; | |
345 | kfree(ref); | |
346 | } | |
347 | spin_unlock(&fs_info->ref_verify_lock); | |
348 | ||
349 | return 0; | |
350 | } | |
351 | ||
352 | static int add_shared_data_ref(struct btrfs_fs_info *fs_info, | |
353 | u64 parent, u32 num_refs, u64 bytenr, | |
354 | u64 num_bytes) | |
355 | { | |
356 | struct block_entry *be; | |
357 | struct ref_entry *ref; | |
358 | ||
359 | ref = kzalloc(sizeof(struct ref_entry), GFP_KERNEL); | |
360 | if (!ref) | |
361 | return -ENOMEM; | |
362 | be = add_block_entry(fs_info, bytenr, num_bytes, 0); | |
363 | if (IS_ERR(be)) { | |
364 | kfree(ref); | |
365 | return PTR_ERR(be); | |
366 | } | |
367 | be->num_refs += num_refs; | |
368 | ||
369 | ref->parent = parent; | |
370 | ref->num_refs = num_refs; | |
371 | if (insert_ref_entry(&be->refs, ref)) { | |
372 | spin_unlock(&fs_info->ref_verify_lock); | |
373 | btrfs_err(fs_info, "existing shared ref when reading from disk?"); | |
374 | kfree(ref); | |
375 | return -EINVAL; | |
376 | } | |
377 | spin_unlock(&fs_info->ref_verify_lock); | |
378 | return 0; | |
379 | } | |
380 | ||
381 | static int add_extent_data_ref(struct btrfs_fs_info *fs_info, | |
382 | struct extent_buffer *leaf, | |
383 | struct btrfs_extent_data_ref *dref, | |
384 | u64 bytenr, u64 num_bytes) | |
385 | { | |
386 | struct block_entry *be; | |
387 | struct ref_entry *ref; | |
388 | struct root_entry *re; | |
389 | u64 ref_root = btrfs_extent_data_ref_root(leaf, dref); | |
390 | u64 owner = btrfs_extent_data_ref_objectid(leaf, dref); | |
391 | u64 offset = btrfs_extent_data_ref_offset(leaf, dref); | |
392 | u32 num_refs = btrfs_extent_data_ref_count(leaf, dref); | |
393 | ||
394 | ref = kzalloc(sizeof(struct ref_entry), GFP_KERNEL); | |
395 | if (!ref) | |
396 | return -ENOMEM; | |
397 | be = add_block_entry(fs_info, bytenr, num_bytes, ref_root); | |
398 | if (IS_ERR(be)) { | |
399 | kfree(ref); | |
400 | return PTR_ERR(be); | |
401 | } | |
402 | be->num_refs += num_refs; | |
403 | ||
404 | ref->parent = 0; | |
405 | ref->owner = owner; | |
406 | ref->root_objectid = ref_root; | |
407 | ref->offset = offset; | |
408 | ref->num_refs = num_refs; | |
409 | if (insert_ref_entry(&be->refs, ref)) { | |
410 | spin_unlock(&fs_info->ref_verify_lock); | |
411 | btrfs_err(fs_info, "existing ref when reading from disk?"); | |
412 | kfree(ref); | |
413 | return -EINVAL; | |
414 | } | |
415 | ||
416 | re = lookup_root_entry(&be->roots, ref_root); | |
417 | if (!re) { | |
418 | spin_unlock(&fs_info->ref_verify_lock); | |
419 | btrfs_err(fs_info, "missing root in new block entry?"); | |
420 | return -EINVAL; | |
421 | } | |
422 | re->num_refs += num_refs; | |
423 | spin_unlock(&fs_info->ref_verify_lock); | |
424 | return 0; | |
425 | } | |
426 | ||
427 | static int process_extent_item(struct btrfs_fs_info *fs_info, | |
428 | struct btrfs_path *path, struct btrfs_key *key, | |
429 | int slot, int *tree_block_level) | |
430 | { | |
431 | struct btrfs_extent_item *ei; | |
432 | struct btrfs_extent_inline_ref *iref; | |
433 | struct btrfs_extent_data_ref *dref; | |
434 | struct btrfs_shared_data_ref *sref; | |
435 | struct extent_buffer *leaf = path->nodes[0]; | |
436 | u32 item_size = btrfs_item_size_nr(leaf, slot); | |
437 | unsigned long end, ptr; | |
438 | u64 offset, flags, count; | |
439 | int type, ret; | |
440 | ||
441 | ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); | |
442 | flags = btrfs_extent_flags(leaf, ei); | |
443 | ||
444 | if ((key->type == BTRFS_EXTENT_ITEM_KEY) && | |
445 | flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | |
446 | struct btrfs_tree_block_info *info; | |
447 | ||
448 | info = (struct btrfs_tree_block_info *)(ei + 1); | |
449 | *tree_block_level = btrfs_tree_block_level(leaf, info); | |
450 | iref = (struct btrfs_extent_inline_ref *)(info + 1); | |
451 | } else { | |
452 | if (key->type == BTRFS_METADATA_ITEM_KEY) | |
453 | *tree_block_level = key->offset; | |
454 | iref = (struct btrfs_extent_inline_ref *)(ei + 1); | |
455 | } | |
456 | ||
457 | ptr = (unsigned long)iref; | |
458 | end = (unsigned long)ei + item_size; | |
459 | while (ptr < end) { | |
460 | iref = (struct btrfs_extent_inline_ref *)ptr; | |
461 | type = btrfs_extent_inline_ref_type(leaf, iref); | |
462 | offset = btrfs_extent_inline_ref_offset(leaf, iref); | |
463 | switch (type) { | |
464 | case BTRFS_TREE_BLOCK_REF_KEY: | |
465 | ret = add_tree_block(fs_info, offset, 0, key->objectid, | |
466 | *tree_block_level); | |
467 | break; | |
468 | case BTRFS_SHARED_BLOCK_REF_KEY: | |
469 | ret = add_tree_block(fs_info, 0, offset, key->objectid, | |
470 | *tree_block_level); | |
471 | break; | |
472 | case BTRFS_EXTENT_DATA_REF_KEY: | |
473 | dref = (struct btrfs_extent_data_ref *)(&iref->offset); | |
474 | ret = add_extent_data_ref(fs_info, leaf, dref, | |
475 | key->objectid, key->offset); | |
476 | break; | |
477 | case BTRFS_SHARED_DATA_REF_KEY: | |
478 | sref = (struct btrfs_shared_data_ref *)(iref + 1); | |
479 | count = btrfs_shared_data_ref_count(leaf, sref); | |
480 | ret = add_shared_data_ref(fs_info, offset, count, | |
481 | key->objectid, key->offset); | |
482 | break; | |
483 | default: | |
484 | btrfs_err(fs_info, "invalid key type in iref"); | |
485 | ret = -EINVAL; | |
486 | break; | |
487 | } | |
488 | if (ret) | |
489 | break; | |
490 | ptr += btrfs_extent_inline_ref_size(type); | |
491 | } | |
492 | return ret; | |
493 | } | |
494 | ||
495 | static int process_leaf(struct btrfs_root *root, | |
496 | struct btrfs_path *path, u64 *bytenr, u64 *num_bytes) | |
497 | { | |
498 | struct btrfs_fs_info *fs_info = root->fs_info; | |
499 | struct extent_buffer *leaf = path->nodes[0]; | |
500 | struct btrfs_extent_data_ref *dref; | |
501 | struct btrfs_shared_data_ref *sref; | |
502 | u32 count; | |
c5f4987e | 503 | int i = 0, tree_block_level = 0, ret = 0; |
fd708b81 JB |
504 | struct btrfs_key key; |
505 | int nritems = btrfs_header_nritems(leaf); | |
506 | ||
507 | for (i = 0; i < nritems; i++) { | |
508 | btrfs_item_key_to_cpu(leaf, &key, i); | |
509 | switch (key.type) { | |
510 | case BTRFS_EXTENT_ITEM_KEY: | |
511 | *num_bytes = key.offset; | |
0a4c9265 | 512 | /* fall through */ |
fd708b81 JB |
513 | case BTRFS_METADATA_ITEM_KEY: |
514 | *bytenr = key.objectid; | |
515 | ret = process_extent_item(fs_info, path, &key, i, | |
516 | &tree_block_level); | |
517 | break; | |
518 | case BTRFS_TREE_BLOCK_REF_KEY: | |
519 | ret = add_tree_block(fs_info, key.offset, 0, | |
520 | key.objectid, tree_block_level); | |
521 | break; | |
522 | case BTRFS_SHARED_BLOCK_REF_KEY: | |
523 | ret = add_tree_block(fs_info, 0, key.offset, | |
524 | key.objectid, tree_block_level); | |
525 | break; | |
526 | case BTRFS_EXTENT_DATA_REF_KEY: | |
527 | dref = btrfs_item_ptr(leaf, i, | |
528 | struct btrfs_extent_data_ref); | |
529 | ret = add_extent_data_ref(fs_info, leaf, dref, *bytenr, | |
530 | *num_bytes); | |
531 | break; | |
532 | case BTRFS_SHARED_DATA_REF_KEY: | |
533 | sref = btrfs_item_ptr(leaf, i, | |
534 | struct btrfs_shared_data_ref); | |
535 | count = btrfs_shared_data_ref_count(leaf, sref); | |
536 | ret = add_shared_data_ref(fs_info, key.offset, count, | |
537 | *bytenr, *num_bytes); | |
538 | break; | |
539 | default: | |
540 | break; | |
541 | } | |
542 | if (ret) | |
543 | break; | |
544 | } | |
545 | return ret; | |
546 | } | |
547 | ||
548 | /* Walk down to the leaf from the given level */ | |
549 | static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path, | |
550 | int level, u64 *bytenr, u64 *num_bytes) | |
551 | { | |
552 | struct btrfs_fs_info *fs_info = root->fs_info; | |
553 | struct extent_buffer *eb; | |
554 | u64 block_bytenr, gen; | |
555 | int ret = 0; | |
556 | ||
557 | while (level >= 0) { | |
558 | if (level) { | |
581c1760 QW |
559 | struct btrfs_key first_key; |
560 | ||
fd708b81 JB |
561 | block_bytenr = btrfs_node_blockptr(path->nodes[level], |
562 | path->slots[level]); | |
563 | gen = btrfs_node_ptr_generation(path->nodes[level], | |
564 | path->slots[level]); | |
581c1760 QW |
565 | btrfs_node_key_to_cpu(path->nodes[level], &first_key, |
566 | path->slots[level]); | |
567 | eb = read_tree_block(fs_info, block_bytenr, gen, | |
568 | level - 1, &first_key); | |
fd708b81 JB |
569 | if (IS_ERR(eb)) |
570 | return PTR_ERR(eb); | |
571 | if (!extent_buffer_uptodate(eb)) { | |
572 | free_extent_buffer(eb); | |
573 | return -EIO; | |
574 | } | |
575 | btrfs_tree_read_lock(eb); | |
300aa896 | 576 | btrfs_set_lock_blocking_read(eb); |
fd708b81 JB |
577 | path->nodes[level-1] = eb; |
578 | path->slots[level-1] = 0; | |
579 | path->locks[level-1] = BTRFS_READ_LOCK_BLOCKING; | |
580 | } else { | |
581 | ret = process_leaf(root, path, bytenr, num_bytes); | |
582 | if (ret) | |
583 | break; | |
584 | } | |
585 | level--; | |
586 | } | |
587 | return ret; | |
588 | } | |
589 | ||
590 | /* Walk up to the next node that needs to be processed */ | |
02cfe779 | 591 | static int walk_up_tree(struct btrfs_path *path, int *level) |
fd708b81 JB |
592 | { |
593 | int l; | |
594 | ||
595 | for (l = 0; l < BTRFS_MAX_LEVEL; l++) { | |
596 | if (!path->nodes[l]) | |
597 | continue; | |
598 | if (l) { | |
599 | path->slots[l]++; | |
600 | if (path->slots[l] < | |
601 | btrfs_header_nritems(path->nodes[l])) { | |
602 | *level = l; | |
603 | return 0; | |
604 | } | |
605 | } | |
606 | btrfs_tree_unlock_rw(path->nodes[l], path->locks[l]); | |
607 | free_extent_buffer(path->nodes[l]); | |
608 | path->nodes[l] = NULL; | |
609 | path->slots[l] = 0; | |
610 | path->locks[l] = 0; | |
611 | } | |
612 | ||
613 | return 1; | |
614 | } | |
615 | ||
616 | static void dump_ref_action(struct btrfs_fs_info *fs_info, | |
617 | struct ref_action *ra) | |
618 | { | |
619 | btrfs_err(fs_info, | |
620 | " Ref action %d, root %llu, ref_root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu", | |
621 | ra->action, ra->root, ra->ref.root_objectid, ra->ref.parent, | |
622 | ra->ref.owner, ra->ref.offset, ra->ref.num_refs); | |
623 | __print_stack_trace(fs_info, ra); | |
624 | } | |
625 | ||
626 | /* | |
627 | * Dumps all the information from the block entry to printk, it's going to be | |
628 | * awesome. | |
629 | */ | |
630 | static void dump_block_entry(struct btrfs_fs_info *fs_info, | |
631 | struct block_entry *be) | |
632 | { | |
633 | struct ref_entry *ref; | |
634 | struct root_entry *re; | |
635 | struct ref_action *ra; | |
636 | struct rb_node *n; | |
637 | ||
638 | btrfs_err(fs_info, | |
639 | "dumping block entry [%llu %llu], num_refs %llu, metadata %d, from disk %d", | |
640 | be->bytenr, be->len, be->num_refs, be->metadata, | |
641 | be->from_disk); | |
642 | ||
643 | for (n = rb_first(&be->refs); n; n = rb_next(n)) { | |
644 | ref = rb_entry(n, struct ref_entry, node); | |
645 | btrfs_err(fs_info, | |
646 | " ref root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu", | |
647 | ref->root_objectid, ref->parent, ref->owner, | |
648 | ref->offset, ref->num_refs); | |
649 | } | |
650 | ||
651 | for (n = rb_first(&be->roots); n; n = rb_next(n)) { | |
652 | re = rb_entry(n, struct root_entry, node); | |
653 | btrfs_err(fs_info, " root entry %llu, num_refs %llu", | |
654 | re->root_objectid, re->num_refs); | |
655 | } | |
656 | ||
657 | list_for_each_entry(ra, &be->actions, list) | |
658 | dump_ref_action(fs_info, ra); | |
659 | } | |
660 | ||
661 | /* | |
662 | * btrfs_ref_tree_mod: called when we modify a ref for a bytenr | |
fd708b81 JB |
663 | * |
664 | * This will add an action item to the given bytenr and do sanity checks to make | |
665 | * sure we haven't messed something up. If we are making a new allocation and | |
666 | * this block entry has history we will delete all previous actions as long as | |
667 | * our sanity checks pass as they are no longer needed. | |
668 | */ | |
8a5040f7 QW |
669 | int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, |
670 | struct btrfs_ref *generic_ref) | |
fd708b81 | 671 | { |
fd708b81 JB |
672 | struct ref_entry *ref = NULL, *exist; |
673 | struct ref_action *ra = NULL; | |
674 | struct block_entry *be = NULL; | |
675 | struct root_entry *re = NULL; | |
8a5040f7 | 676 | int action = generic_ref->action; |
fd708b81 | 677 | int ret = 0; |
8a5040f7 QW |
678 | bool metadata; |
679 | u64 bytenr = generic_ref->bytenr; | |
680 | u64 num_bytes = generic_ref->len; | |
681 | u64 parent = generic_ref->parent; | |
682 | u64 ref_root; | |
683 | u64 owner; | |
684 | u64 offset; | |
fd708b81 | 685 | |
8a5040f7 | 686 | if (!btrfs_test_opt(fs_info, REF_VERIFY)) |
fd708b81 JB |
687 | return 0; |
688 | ||
8a5040f7 QW |
689 | if (generic_ref->type == BTRFS_REF_METADATA) { |
690 | ref_root = generic_ref->tree_ref.root; | |
691 | owner = generic_ref->tree_ref.level; | |
692 | offset = 0; | |
693 | } else { | |
694 | ref_root = generic_ref->data_ref.ref_root; | |
695 | owner = generic_ref->data_ref.ino; | |
696 | offset = generic_ref->data_ref.offset; | |
697 | } | |
698 | metadata = owner < BTRFS_FIRST_FREE_OBJECTID; | |
699 | ||
fd708b81 JB |
700 | ref = kzalloc(sizeof(struct ref_entry), GFP_NOFS); |
701 | ra = kmalloc(sizeof(struct ref_action), GFP_NOFS); | |
702 | if (!ra || !ref) { | |
703 | kfree(ref); | |
704 | kfree(ra); | |
705 | ret = -ENOMEM; | |
706 | goto out; | |
707 | } | |
708 | ||
709 | if (parent) { | |
710 | ref->parent = parent; | |
711 | } else { | |
712 | ref->root_objectid = ref_root; | |
713 | ref->owner = owner; | |
714 | ref->offset = offset; | |
715 | } | |
716 | ref->num_refs = (action == BTRFS_DROP_DELAYED_REF) ? -1 : 1; | |
717 | ||
718 | memcpy(&ra->ref, ref, sizeof(struct ref_entry)); | |
719 | /* | |
720 | * Save the extra info from the delayed ref in the ref action to make it | |
721 | * easier to figure out what is happening. The real ref's we add to the | |
722 | * ref tree need to reflect what we save on disk so it matches any | |
723 | * on-disk refs we pre-loaded. | |
724 | */ | |
725 | ra->ref.owner = owner; | |
726 | ra->ref.offset = offset; | |
727 | ra->ref.root_objectid = ref_root; | |
728 | __save_stack_trace(ra); | |
729 | ||
730 | INIT_LIST_HEAD(&ra->list); | |
731 | ra->action = action; | |
8a5040f7 | 732 | ra->root = generic_ref->real_root; |
fd708b81 JB |
733 | |
734 | /* | |
735 | * This is an allocation, preallocate the block_entry in case we haven't | |
736 | * used it before. | |
737 | */ | |
738 | ret = -EINVAL; | |
739 | if (action == BTRFS_ADD_DELAYED_EXTENT) { | |
740 | /* | |
741 | * For subvol_create we'll just pass in whatever the parent root | |
742 | * is and the new root objectid, so let's not treat the passed | |
743 | * in root as if it really has a ref for this bytenr. | |
744 | */ | |
8a5040f7 | 745 | be = add_block_entry(fs_info, bytenr, num_bytes, ref_root); |
fd708b81 | 746 | if (IS_ERR(be)) { |
f311ade3 | 747 | kfree(ref); |
fd708b81 JB |
748 | kfree(ra); |
749 | ret = PTR_ERR(be); | |
750 | goto out; | |
751 | } | |
752 | be->num_refs++; | |
753 | if (metadata) | |
754 | be->metadata = 1; | |
755 | ||
756 | if (be->num_refs != 1) { | |
757 | btrfs_err(fs_info, | |
758 | "re-allocated a block that still has references to it!"); | |
759 | dump_block_entry(fs_info, be); | |
760 | dump_ref_action(fs_info, ra); | |
f311ade3 WW |
761 | kfree(ref); |
762 | kfree(ra); | |
fd708b81 JB |
763 | goto out_unlock; |
764 | } | |
765 | ||
766 | while (!list_empty(&be->actions)) { | |
767 | struct ref_action *tmp; | |
768 | ||
769 | tmp = list_first_entry(&be->actions, struct ref_action, | |
770 | list); | |
771 | list_del(&tmp->list); | |
772 | kfree(tmp); | |
773 | } | |
774 | } else { | |
775 | struct root_entry *tmp; | |
776 | ||
777 | if (!parent) { | |
778 | re = kmalloc(sizeof(struct root_entry), GFP_NOFS); | |
779 | if (!re) { | |
780 | kfree(ref); | |
781 | kfree(ra); | |
782 | ret = -ENOMEM; | |
783 | goto out; | |
784 | } | |
785 | /* | |
786 | * This is the root that is modifying us, so it's the | |
787 | * one we want to lookup below when we modify the | |
788 | * re->num_refs. | |
789 | */ | |
8a5040f7 QW |
790 | ref_root = generic_ref->real_root; |
791 | re->root_objectid = generic_ref->real_root; | |
fd708b81 JB |
792 | re->num_refs = 0; |
793 | } | |
794 | ||
8a5040f7 QW |
795 | spin_lock(&fs_info->ref_verify_lock); |
796 | be = lookup_block_entry(&fs_info->block_tree, bytenr); | |
fd708b81 JB |
797 | if (!be) { |
798 | btrfs_err(fs_info, | |
799 | "trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!", | |
800 | action, (unsigned long long)bytenr, | |
801 | (unsigned long long)num_bytes); | |
802 | dump_ref_action(fs_info, ra); | |
803 | kfree(ref); | |
804 | kfree(ra); | |
805 | goto out_unlock; | |
b39c8f5a JB |
806 | } else if (be->num_refs == 0) { |
807 | btrfs_err(fs_info, | |
808 | "trying to do action %d for a bytenr that has 0 total references", | |
809 | action); | |
810 | dump_block_entry(fs_info, be); | |
811 | dump_ref_action(fs_info, ra); | |
812 | kfree(ref); | |
813 | kfree(ra); | |
814 | goto out_unlock; | |
fd708b81 JB |
815 | } |
816 | ||
817 | if (!parent) { | |
818 | tmp = insert_root_entry(&be->roots, re); | |
819 | if (tmp) { | |
820 | kfree(re); | |
821 | re = tmp; | |
822 | } | |
823 | } | |
824 | } | |
825 | ||
826 | exist = insert_ref_entry(&be->refs, ref); | |
827 | if (exist) { | |
828 | if (action == BTRFS_DROP_DELAYED_REF) { | |
829 | if (exist->num_refs == 0) { | |
830 | btrfs_err(fs_info, | |
831 | "dropping a ref for a existing root that doesn't have a ref on the block"); | |
832 | dump_block_entry(fs_info, be); | |
833 | dump_ref_action(fs_info, ra); | |
f311ade3 | 834 | kfree(ref); |
fd708b81 JB |
835 | kfree(ra); |
836 | goto out_unlock; | |
837 | } | |
838 | exist->num_refs--; | |
839 | if (exist->num_refs == 0) { | |
840 | rb_erase(&exist->node, &be->refs); | |
841 | kfree(exist); | |
842 | } | |
843 | } else if (!be->metadata) { | |
844 | exist->num_refs++; | |
845 | } else { | |
846 | btrfs_err(fs_info, | |
847 | "attempting to add another ref for an existing ref on a tree block"); | |
848 | dump_block_entry(fs_info, be); | |
849 | dump_ref_action(fs_info, ra); | |
f311ade3 | 850 | kfree(ref); |
fd708b81 JB |
851 | kfree(ra); |
852 | goto out_unlock; | |
853 | } | |
854 | kfree(ref); | |
855 | } else { | |
856 | if (action == BTRFS_DROP_DELAYED_REF) { | |
857 | btrfs_err(fs_info, | |
858 | "dropping a ref for a root that doesn't have a ref on the block"); | |
859 | dump_block_entry(fs_info, be); | |
860 | dump_ref_action(fs_info, ra); | |
861 | kfree(ra); | |
862 | goto out_unlock; | |
863 | } | |
864 | } | |
865 | ||
866 | if (!parent && !re) { | |
867 | re = lookup_root_entry(&be->roots, ref_root); | |
868 | if (!re) { | |
869 | /* | |
870 | * This shouldn't happen because we will add our re | |
871 | * above when we lookup the be with !parent, but just in | |
872 | * case catch this case so we don't panic because I | |
52042d8e | 873 | * didn't think of some other corner case. |
fd708b81 JB |
874 | */ |
875 | btrfs_err(fs_info, "failed to find root %llu for %llu", | |
8a5040f7 | 876 | generic_ref->real_root, be->bytenr); |
fd708b81 JB |
877 | dump_block_entry(fs_info, be); |
878 | dump_ref_action(fs_info, ra); | |
879 | kfree(ra); | |
880 | goto out_unlock; | |
881 | } | |
882 | } | |
883 | if (action == BTRFS_DROP_DELAYED_REF) { | |
884 | if (re) | |
885 | re->num_refs--; | |
886 | be->num_refs--; | |
887 | } else if (action == BTRFS_ADD_DELAYED_REF) { | |
888 | be->num_refs++; | |
889 | if (re) | |
890 | re->num_refs++; | |
891 | } | |
892 | list_add_tail(&ra->list, &be->actions); | |
893 | ret = 0; | |
894 | out_unlock: | |
8a5040f7 | 895 | spin_unlock(&fs_info->ref_verify_lock); |
fd708b81 JB |
896 | out: |
897 | if (ret) | |
898 | btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY); | |
899 | return ret; | |
900 | } | |
901 | ||
902 | /* Free up the ref cache */ | |
903 | void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info) | |
904 | { | |
905 | struct block_entry *be; | |
906 | struct rb_node *n; | |
907 | ||
908 | if (!btrfs_test_opt(fs_info, REF_VERIFY)) | |
909 | return; | |
910 | ||
911 | spin_lock(&fs_info->ref_verify_lock); | |
912 | while ((n = rb_first(&fs_info->block_tree))) { | |
913 | be = rb_entry(n, struct block_entry, node); | |
914 | rb_erase(&be->node, &fs_info->block_tree); | |
915 | free_block_entry(be); | |
916 | cond_resched_lock(&fs_info->ref_verify_lock); | |
917 | } | |
918 | spin_unlock(&fs_info->ref_verify_lock); | |
919 | } | |
920 | ||
921 | void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start, | |
922 | u64 len) | |
923 | { | |
924 | struct block_entry *be = NULL, *entry; | |
925 | struct rb_node *n; | |
926 | ||
927 | if (!btrfs_test_opt(fs_info, REF_VERIFY)) | |
928 | return; | |
929 | ||
930 | spin_lock(&fs_info->ref_verify_lock); | |
931 | n = fs_info->block_tree.rb_node; | |
932 | while (n) { | |
933 | entry = rb_entry(n, struct block_entry, node); | |
934 | if (entry->bytenr < start) { | |
935 | n = n->rb_right; | |
936 | } else if (entry->bytenr > start) { | |
937 | n = n->rb_left; | |
938 | } else { | |
939 | be = entry; | |
940 | break; | |
941 | } | |
942 | /* We want to get as close to start as possible */ | |
943 | if (be == NULL || | |
944 | (entry->bytenr < start && be->bytenr > start) || | |
945 | (entry->bytenr < start && entry->bytenr > be->bytenr)) | |
946 | be = entry; | |
947 | } | |
948 | ||
949 | /* | |
950 | * Could have an empty block group, maybe have something to check for | |
951 | * this case to verify we were actually empty? | |
952 | */ | |
953 | if (!be) { | |
954 | spin_unlock(&fs_info->ref_verify_lock); | |
955 | return; | |
956 | } | |
957 | ||
958 | n = &be->node; | |
959 | while (n) { | |
960 | be = rb_entry(n, struct block_entry, node); | |
961 | n = rb_next(n); | |
962 | if (be->bytenr < start && be->bytenr + be->len > start) { | |
963 | btrfs_err(fs_info, | |
964 | "block entry overlaps a block group [%llu,%llu]!", | |
965 | start, len); | |
966 | dump_block_entry(fs_info, be); | |
967 | continue; | |
968 | } | |
969 | if (be->bytenr < start) | |
970 | continue; | |
971 | if (be->bytenr >= start + len) | |
972 | break; | |
973 | if (be->bytenr + be->len > start + len) { | |
974 | btrfs_err(fs_info, | |
975 | "block entry overlaps a block group [%llu,%llu]!", | |
976 | start, len); | |
977 | dump_block_entry(fs_info, be); | |
978 | } | |
979 | rb_erase(&be->node, &fs_info->block_tree); | |
980 | free_block_entry(be); | |
981 | } | |
982 | spin_unlock(&fs_info->ref_verify_lock); | |
983 | } | |
984 | ||
985 | /* Walk down all roots and build the ref tree, meant to be called at mount */ | |
986 | int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info) | |
987 | { | |
988 | struct btrfs_path *path; | |
fd708b81 JB |
989 | struct extent_buffer *eb; |
990 | u64 bytenr = 0, num_bytes = 0; | |
991 | int ret, level; | |
992 | ||
993 | if (!btrfs_test_opt(fs_info, REF_VERIFY)) | |
994 | return 0; | |
995 | ||
996 | path = btrfs_alloc_path(); | |
997 | if (!path) | |
998 | return -ENOMEM; | |
999 | ||
1000 | eb = btrfs_read_lock_root_node(fs_info->extent_root); | |
300aa896 | 1001 | btrfs_set_lock_blocking_read(eb); |
fd708b81 JB |
1002 | level = btrfs_header_level(eb); |
1003 | path->nodes[level] = eb; | |
1004 | path->slots[level] = 0; | |
1005 | path->locks[level] = BTRFS_READ_LOCK_BLOCKING; | |
1006 | ||
1007 | while (1) { | |
1008 | /* | |
1009 | * We have to keep track of the bytenr/num_bytes we last hit | |
1010 | * because we could have run out of space for an inline ref, and | |
1011 | * would have had to added a ref key item which may appear on a | |
1012 | * different leaf from the original extent item. | |
1013 | */ | |
1014 | ret = walk_down_tree(fs_info->extent_root, path, level, | |
1015 | &bytenr, &num_bytes); | |
1016 | if (ret) | |
1017 | break; | |
02cfe779 | 1018 | ret = walk_up_tree(path, &level); |
fd708b81 JB |
1019 | if (ret < 0) |
1020 | break; | |
1021 | if (ret > 0) { | |
1022 | ret = 0; | |
1023 | break; | |
1024 | } | |
1025 | } | |
1026 | if (ret) { | |
1027 | btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY); | |
1028 | btrfs_free_ref_cache(fs_info); | |
1029 | } | |
1030 | btrfs_free_path(path); | |
1031 | return ret; | |
1032 | } |