Merge tag 'input-for-v6.10-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / fs / btrfs / disk-io.c
... / ...
CommitLineData
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/fs.h>
7#include <linux/blkdev.h>
8#include <linux/radix-tree.h>
9#include <linux/writeback.h>
10#include <linux/workqueue.h>
11#include <linux/kthread.h>
12#include <linux/slab.h>
13#include <linux/migrate.h>
14#include <linux/ratelimit.h>
15#include <linux/uuid.h>
16#include <linux/semaphore.h>
17#include <linux/error-injection.h>
18#include <linux/crc32c.h>
19#include <linux/sched/mm.h>
20#include <asm/unaligned.h>
21#include <crypto/hash.h>
22#include "ctree.h"
23#include "disk-io.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
26#include "bio.h"
27#include "print-tree.h"
28#include "locking.h"
29#include "tree-log.h"
30#include "free-space-cache.h"
31#include "free-space-tree.h"
32#include "dev-replace.h"
33#include "raid56.h"
34#include "sysfs.h"
35#include "qgroup.h"
36#include "compression.h"
37#include "tree-checker.h"
38#include "ref-verify.h"
39#include "block-group.h"
40#include "discard.h"
41#include "space-info.h"
42#include "zoned.h"
43#include "subpage.h"
44#include "fs.h"
45#include "accessors.h"
46#include "extent-tree.h"
47#include "root-tree.h"
48#include "defrag.h"
49#include "uuid-tree.h"
50#include "relocation.h"
51#include "scrub.h"
52#include "super.h"
53
54#define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
55 BTRFS_HEADER_FLAG_RELOC |\
56 BTRFS_SUPER_FLAG_ERROR |\
57 BTRFS_SUPER_FLAG_SEEDING |\
58 BTRFS_SUPER_FLAG_METADUMP |\
59 BTRFS_SUPER_FLAG_METADUMP_V2)
60
61static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
62static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
63
64static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
65{
66 if (fs_info->csum_shash)
67 crypto_free_shash(fs_info->csum_shash);
68}
69
70/*
71 * Compute the csum of a btree block and store the result to provided buffer.
72 */
73static void csum_tree_block(struct extent_buffer *buf, u8 *result)
74{
75 struct btrfs_fs_info *fs_info = buf->fs_info;
76 int num_pages;
77 u32 first_page_part;
78 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
79 char *kaddr;
80 int i;
81
82 shash->tfm = fs_info->csum_shash;
83 crypto_shash_init(shash);
84
85 if (buf->addr) {
86 /* Pages are contiguous, handle them as a big one. */
87 kaddr = buf->addr;
88 first_page_part = fs_info->nodesize;
89 num_pages = 1;
90 } else {
91 kaddr = folio_address(buf->folios[0]);
92 first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
93 num_pages = num_extent_pages(buf);
94 }
95
96 crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
97 first_page_part - BTRFS_CSUM_SIZE);
98
99 /*
100 * Multiple single-page folios case would reach here.
101 *
102 * nodesize <= PAGE_SIZE and large folio all handled by above
103 * crypto_shash_update() already.
104 */
105 for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
106 kaddr = folio_address(buf->folios[i]);
107 crypto_shash_update(shash, kaddr, PAGE_SIZE);
108 }
109 memset(result, 0, BTRFS_CSUM_SIZE);
110 crypto_shash_final(shash, result);
111}
112
113/*
114 * we can't consider a given block up to date unless the transid of the
115 * block matches the transid in the parent node's pointer. This is how we
116 * detect blocks that either didn't get written at all or got written
117 * in the wrong place.
118 */
119int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, int atomic)
120{
121 if (!extent_buffer_uptodate(eb))
122 return 0;
123
124 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
125 return 1;
126
127 if (atomic)
128 return -EAGAIN;
129
130 if (!extent_buffer_uptodate(eb) ||
131 btrfs_header_generation(eb) != parent_transid) {
132 btrfs_err_rl(eb->fs_info,
133"parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
134 eb->start, eb->read_mirror,
135 parent_transid, btrfs_header_generation(eb));
136 clear_extent_buffer_uptodate(eb);
137 return 0;
138 }
139 return 1;
140}
141
142static bool btrfs_supported_super_csum(u16 csum_type)
143{
144 switch (csum_type) {
145 case BTRFS_CSUM_TYPE_CRC32:
146 case BTRFS_CSUM_TYPE_XXHASH:
147 case BTRFS_CSUM_TYPE_SHA256:
148 case BTRFS_CSUM_TYPE_BLAKE2:
149 return true;
150 default:
151 return false;
152 }
153}
154
155/*
156 * Return 0 if the superblock checksum type matches the checksum value of that
157 * algorithm. Pass the raw disk superblock data.
158 */
159int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
160 const struct btrfs_super_block *disk_sb)
161{
162 char result[BTRFS_CSUM_SIZE];
163 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
164
165 shash->tfm = fs_info->csum_shash;
166
167 /*
168 * The super_block structure does not span the whole
169 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
170 * filled with zeros and is included in the checksum.
171 */
172 crypto_shash_digest(shash, (const u8 *)disk_sb + BTRFS_CSUM_SIZE,
173 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
174
175 if (memcmp(disk_sb->csum, result, fs_info->csum_size))
176 return 1;
177
178 return 0;
179}
180
181static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
182 int mirror_num)
183{
184 struct btrfs_fs_info *fs_info = eb->fs_info;
185 int num_folios = num_extent_folios(eb);
186 int ret = 0;
187
188 if (sb_rdonly(fs_info->sb))
189 return -EROFS;
190
191 for (int i = 0; i < num_folios; i++) {
192 struct folio *folio = eb->folios[i];
193 u64 start = max_t(u64, eb->start, folio_pos(folio));
194 u64 end = min_t(u64, eb->start + eb->len,
195 folio_pos(folio) + eb->folio_size);
196 u32 len = end - start;
197
198 ret = btrfs_repair_io_failure(fs_info, 0, start, len,
199 start, folio, offset_in_folio(folio, start),
200 mirror_num);
201 if (ret)
202 break;
203 }
204
205 return ret;
206}
207
208/*
209 * helper to read a given tree block, doing retries as required when
210 * the checksums don't match and we have alternate mirrors to try.
211 *
212 * @check: expected tree parentness check, see the comments of the
213 * structure for details.
214 */
215int btrfs_read_extent_buffer(struct extent_buffer *eb,
216 struct btrfs_tree_parent_check *check)
217{
218 struct btrfs_fs_info *fs_info = eb->fs_info;
219 int failed = 0;
220 int ret;
221 int num_copies = 0;
222 int mirror_num = 0;
223 int failed_mirror = 0;
224
225 ASSERT(check);
226
227 while (1) {
228 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
229 ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num, check);
230 if (!ret)
231 break;
232
233 num_copies = btrfs_num_copies(fs_info,
234 eb->start, eb->len);
235 if (num_copies == 1)
236 break;
237
238 if (!failed_mirror) {
239 failed = 1;
240 failed_mirror = eb->read_mirror;
241 }
242
243 mirror_num++;
244 if (mirror_num == failed_mirror)
245 mirror_num++;
246
247 if (mirror_num > num_copies)
248 break;
249 }
250
251 if (failed && !ret && failed_mirror)
252 btrfs_repair_eb_io_failure(eb, failed_mirror);
253
254 return ret;
255}
256
257/*
258 * Checksum a dirty tree block before IO.
259 */
260blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
261{
262 struct extent_buffer *eb = bbio->private;
263 struct btrfs_fs_info *fs_info = eb->fs_info;
264 u64 found_start = btrfs_header_bytenr(eb);
265 u64 last_trans;
266 u8 result[BTRFS_CSUM_SIZE];
267 int ret;
268
269 /* Btree blocks are always contiguous on disk. */
270 if (WARN_ON_ONCE(bbio->file_offset != eb->start))
271 return BLK_STS_IOERR;
272 if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len))
273 return BLK_STS_IOERR;
274
275 /*
276 * If an extent_buffer is marked as EXTENT_BUFFER_ZONED_ZEROOUT, don't
277 * checksum it but zero-out its content. This is done to preserve
278 * ordering of I/O without unnecessarily writing out data.
279 */
280 if (test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)) {
281 memzero_extent_buffer(eb, 0, eb->len);
282 return BLK_STS_OK;
283 }
284
285 if (WARN_ON_ONCE(found_start != eb->start))
286 return BLK_STS_IOERR;
287 if (WARN_ON(!btrfs_folio_test_uptodate(fs_info, eb->folios[0],
288 eb->start, eb->len)))
289 return BLK_STS_IOERR;
290
291 ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
292 offsetof(struct btrfs_header, fsid),
293 BTRFS_FSID_SIZE) == 0);
294 csum_tree_block(eb, result);
295
296 if (btrfs_header_level(eb))
297 ret = btrfs_check_node(eb);
298 else
299 ret = btrfs_check_leaf(eb);
300
301 if (ret < 0)
302 goto error;
303
304 /*
305 * Also check the generation, the eb reached here must be newer than
306 * last committed. Or something seriously wrong happened.
307 */
308 last_trans = btrfs_get_last_trans_committed(fs_info);
309 if (unlikely(btrfs_header_generation(eb) <= last_trans)) {
310 ret = -EUCLEAN;
311 btrfs_err(fs_info,
312 "block=%llu bad generation, have %llu expect > %llu",
313 eb->start, btrfs_header_generation(eb), last_trans);
314 goto error;
315 }
316 write_extent_buffer(eb, result, 0, fs_info->csum_size);
317 return BLK_STS_OK;
318
319error:
320 btrfs_print_tree(eb, 0);
321 btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
322 eb->start);
323 /*
324 * Be noisy if this is an extent buffer from a log tree. We don't abort
325 * a transaction in case there's a bad log tree extent buffer, we just
326 * fallback to a transaction commit. Still we want to know when there is
327 * a bad log tree extent buffer, as that may signal a bug somewhere.
328 */
329 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
330 btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
331 return errno_to_blk_status(ret);
332}
333
334static bool check_tree_block_fsid(struct extent_buffer *eb)
335{
336 struct btrfs_fs_info *fs_info = eb->fs_info;
337 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
338 u8 fsid[BTRFS_FSID_SIZE];
339
340 read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
341 BTRFS_FSID_SIZE);
342
343 /*
344 * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid.
345 * This is then overwritten by metadata_uuid if it is present in the
346 * device_list_add(). The same true for a seed device as well. So use of
347 * fs_devices::metadata_uuid is appropriate here.
348 */
349 if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0)
350 return false;
351
352 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
353 if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE))
354 return false;
355
356 return true;
357}
358
359/* Do basic extent buffer checks at read time */
360int btrfs_validate_extent_buffer(struct extent_buffer *eb,
361 struct btrfs_tree_parent_check *check)
362{
363 struct btrfs_fs_info *fs_info = eb->fs_info;
364 u64 found_start;
365 const u32 csum_size = fs_info->csum_size;
366 u8 found_level;
367 u8 result[BTRFS_CSUM_SIZE];
368 const u8 *header_csum;
369 int ret = 0;
370
371 ASSERT(check);
372
373 found_start = btrfs_header_bytenr(eb);
374 if (found_start != eb->start) {
375 btrfs_err_rl(fs_info,
376 "bad tree block start, mirror %u want %llu have %llu",
377 eb->read_mirror, eb->start, found_start);
378 ret = -EIO;
379 goto out;
380 }
381 if (check_tree_block_fsid(eb)) {
382 btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
383 eb->start, eb->read_mirror);
384 ret = -EIO;
385 goto out;
386 }
387 found_level = btrfs_header_level(eb);
388 if (found_level >= BTRFS_MAX_LEVEL) {
389 btrfs_err(fs_info,
390 "bad tree block level, mirror %u level %d on logical %llu",
391 eb->read_mirror, btrfs_header_level(eb), eb->start);
392 ret = -EIO;
393 goto out;
394 }
395
396 csum_tree_block(eb, result);
397 header_csum = folio_address(eb->folios[0]) +
398 get_eb_offset_in_folio(eb, offsetof(struct btrfs_header, csum));
399
400 if (memcmp(result, header_csum, csum_size) != 0) {
401 btrfs_warn_rl(fs_info,
402"checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d",
403 eb->start, eb->read_mirror,
404 CSUM_FMT_VALUE(csum_size, header_csum),
405 CSUM_FMT_VALUE(csum_size, result),
406 btrfs_header_level(eb));
407 ret = -EUCLEAN;
408 goto out;
409 }
410
411 if (found_level != check->level) {
412 btrfs_err(fs_info,
413 "level verify failed on logical %llu mirror %u wanted %u found %u",
414 eb->start, eb->read_mirror, check->level, found_level);
415 ret = -EIO;
416 goto out;
417 }
418 if (unlikely(check->transid &&
419 btrfs_header_generation(eb) != check->transid)) {
420 btrfs_err_rl(eb->fs_info,
421"parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
422 eb->start, eb->read_mirror, check->transid,
423 btrfs_header_generation(eb));
424 ret = -EIO;
425 goto out;
426 }
427 if (check->has_first_key) {
428 struct btrfs_key *expect_key = &check->first_key;
429 struct btrfs_key found_key;
430
431 if (found_level)
432 btrfs_node_key_to_cpu(eb, &found_key, 0);
433 else
434 btrfs_item_key_to_cpu(eb, &found_key, 0);
435 if (unlikely(btrfs_comp_cpu_keys(expect_key, &found_key))) {
436 btrfs_err(fs_info,
437"tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
438 eb->start, check->transid,
439 expect_key->objectid,
440 expect_key->type, expect_key->offset,
441 found_key.objectid, found_key.type,
442 found_key.offset);
443 ret = -EUCLEAN;
444 goto out;
445 }
446 }
447 if (check->owner_root) {
448 ret = btrfs_check_eb_owner(eb, check->owner_root);
449 if (ret < 0)
450 goto out;
451 }
452
453 /*
454 * If this is a leaf block and it is corrupt, set the corrupt bit so
455 * that we don't try and read the other copies of this block, just
456 * return -EIO.
457 */
458 if (found_level == 0 && btrfs_check_leaf(eb)) {
459 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
460 ret = -EIO;
461 }
462
463 if (found_level > 0 && btrfs_check_node(eb))
464 ret = -EIO;
465
466 if (ret)
467 btrfs_err(fs_info,
468 "read time tree block corruption detected on logical %llu mirror %u",
469 eb->start, eb->read_mirror);
470out:
471 return ret;
472}
473
474#ifdef CONFIG_MIGRATION
475static int btree_migrate_folio(struct address_space *mapping,
476 struct folio *dst, struct folio *src, enum migrate_mode mode)
477{
478 /*
479 * we can't safely write a btree page from here,
480 * we haven't done the locking hook
481 */
482 if (folio_test_dirty(src))
483 return -EAGAIN;
484 /*
485 * Buffers may be managed in a filesystem specific way.
486 * We must have no buffers or drop them.
487 */
488 if (folio_get_private(src) &&
489 !filemap_release_folio(src, GFP_KERNEL))
490 return -EAGAIN;
491 return migrate_folio(mapping, dst, src, mode);
492}
493#else
494#define btree_migrate_folio NULL
495#endif
496
497static int btree_writepages(struct address_space *mapping,
498 struct writeback_control *wbc)
499{
500 int ret;
501
502 if (wbc->sync_mode == WB_SYNC_NONE) {
503 struct btrfs_fs_info *fs_info;
504
505 if (wbc->for_kupdate)
506 return 0;
507
508 fs_info = inode_to_fs_info(mapping->host);
509 /* this is a bit racy, but that's ok */
510 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
511 BTRFS_DIRTY_METADATA_THRESH,
512 fs_info->dirty_metadata_batch);
513 if (ret < 0)
514 return 0;
515 }
516 return btree_write_cache_pages(mapping, wbc);
517}
518
519static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
520{
521 if (folio_test_writeback(folio) || folio_test_dirty(folio))
522 return false;
523
524 return try_release_extent_buffer(&folio->page);
525}
526
527static void btree_invalidate_folio(struct folio *folio, size_t offset,
528 size_t length)
529{
530 struct extent_io_tree *tree;
531
532 tree = &folio_to_inode(folio)->io_tree;
533 extent_invalidate_folio(tree, folio, offset);
534 btree_release_folio(folio, GFP_NOFS);
535 if (folio_get_private(folio)) {
536 btrfs_warn(folio_to_fs_info(folio),
537 "folio private not zero on folio %llu",
538 (unsigned long long)folio_pos(folio));
539 folio_detach_private(folio);
540 }
541}
542
543#ifdef DEBUG
544static bool btree_dirty_folio(struct address_space *mapping,
545 struct folio *folio)
546{
547 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
548 struct btrfs_subpage_info *spi = fs_info->subpage_info;
549 struct btrfs_subpage *subpage;
550 struct extent_buffer *eb;
551 int cur_bit = 0;
552 u64 page_start = folio_pos(folio);
553
554 if (fs_info->sectorsize == PAGE_SIZE) {
555 eb = folio_get_private(folio);
556 BUG_ON(!eb);
557 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
558 BUG_ON(!atomic_read(&eb->refs));
559 btrfs_assert_tree_write_locked(eb);
560 return filemap_dirty_folio(mapping, folio);
561 }
562
563 ASSERT(spi);
564 subpage = folio_get_private(folio);
565
566 for (cur_bit = spi->dirty_offset;
567 cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
568 cur_bit++) {
569 unsigned long flags;
570 u64 cur;
571
572 spin_lock_irqsave(&subpage->lock, flags);
573 if (!test_bit(cur_bit, subpage->bitmaps)) {
574 spin_unlock_irqrestore(&subpage->lock, flags);
575 continue;
576 }
577 spin_unlock_irqrestore(&subpage->lock, flags);
578 cur = page_start + cur_bit * fs_info->sectorsize;
579
580 eb = find_extent_buffer(fs_info, cur);
581 ASSERT(eb);
582 ASSERT(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
583 ASSERT(atomic_read(&eb->refs));
584 btrfs_assert_tree_write_locked(eb);
585 free_extent_buffer(eb);
586
587 cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
588 }
589 return filemap_dirty_folio(mapping, folio);
590}
591#else
592#define btree_dirty_folio filemap_dirty_folio
593#endif
594
595static const struct address_space_operations btree_aops = {
596 .writepages = btree_writepages,
597 .release_folio = btree_release_folio,
598 .invalidate_folio = btree_invalidate_folio,
599 .migrate_folio = btree_migrate_folio,
600 .dirty_folio = btree_dirty_folio,
601};
602
603struct extent_buffer *btrfs_find_create_tree_block(
604 struct btrfs_fs_info *fs_info,
605 u64 bytenr, u64 owner_root,
606 int level)
607{
608 if (btrfs_is_testing(fs_info))
609 return alloc_test_extent_buffer(fs_info, bytenr);
610 return alloc_extent_buffer(fs_info, bytenr, owner_root, level);
611}
612
613/*
614 * Read tree block at logical address @bytenr and do variant basic but critical
615 * verification.
616 *
617 * @check: expected tree parentness check, see comments of the
618 * structure for details.
619 */
620struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
621 struct btrfs_tree_parent_check *check)
622{
623 struct extent_buffer *buf = NULL;
624 int ret;
625
626 ASSERT(check);
627
628 buf = btrfs_find_create_tree_block(fs_info, bytenr, check->owner_root,
629 check->level);
630 if (IS_ERR(buf))
631 return buf;
632
633 ret = btrfs_read_extent_buffer(buf, check);
634 if (ret) {
635 free_extent_buffer_stale(buf);
636 return ERR_PTR(ret);
637 }
638 if (btrfs_check_eb_owner(buf, check->owner_root)) {
639 free_extent_buffer_stale(buf);
640 return ERR_PTR(-EUCLEAN);
641 }
642 return buf;
643
644}
645
646static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
647 u64 objectid)
648{
649 bool dummy = btrfs_is_testing(fs_info);
650
651 memset(&root->root_key, 0, sizeof(root->root_key));
652 memset(&root->root_item, 0, sizeof(root->root_item));
653 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
654 root->fs_info = fs_info;
655 root->root_key.objectid = objectid;
656 root->node = NULL;
657 root->commit_root = NULL;
658 root->state = 0;
659 RB_CLEAR_NODE(&root->rb_node);
660
661 root->last_trans = 0;
662 root->free_objectid = 0;
663 root->nr_delalloc_inodes = 0;
664 root->nr_ordered_extents = 0;
665 root->inode_tree = RB_ROOT;
666 xa_init(&root->delayed_nodes);
667
668 btrfs_init_root_block_rsv(root);
669
670 INIT_LIST_HEAD(&root->dirty_list);
671 INIT_LIST_HEAD(&root->root_list);
672 INIT_LIST_HEAD(&root->delalloc_inodes);
673 INIT_LIST_HEAD(&root->delalloc_root);
674 INIT_LIST_HEAD(&root->ordered_extents);
675 INIT_LIST_HEAD(&root->ordered_root);
676 INIT_LIST_HEAD(&root->reloc_dirty_list);
677 spin_lock_init(&root->inode_lock);
678 spin_lock_init(&root->delalloc_lock);
679 spin_lock_init(&root->ordered_extent_lock);
680 spin_lock_init(&root->accounting_lock);
681 spin_lock_init(&root->qgroup_meta_rsv_lock);
682 mutex_init(&root->objectid_mutex);
683 mutex_init(&root->log_mutex);
684 mutex_init(&root->ordered_extent_mutex);
685 mutex_init(&root->delalloc_mutex);
686 init_waitqueue_head(&root->qgroup_flush_wait);
687 init_waitqueue_head(&root->log_writer_wait);
688 init_waitqueue_head(&root->log_commit_wait[0]);
689 init_waitqueue_head(&root->log_commit_wait[1]);
690 INIT_LIST_HEAD(&root->log_ctxs[0]);
691 INIT_LIST_HEAD(&root->log_ctxs[1]);
692 atomic_set(&root->log_commit[0], 0);
693 atomic_set(&root->log_commit[1], 0);
694 atomic_set(&root->log_writers, 0);
695 atomic_set(&root->log_batch, 0);
696 refcount_set(&root->refs, 1);
697 atomic_set(&root->snapshot_force_cow, 0);
698 atomic_set(&root->nr_swapfiles, 0);
699 btrfs_set_root_log_transid(root, 0);
700 root->log_transid_committed = -1;
701 btrfs_set_root_last_log_commit(root, 0);
702 root->anon_dev = 0;
703 if (!dummy) {
704 extent_io_tree_init(fs_info, &root->dirty_log_pages,
705 IO_TREE_ROOT_DIRTY_LOG_PAGES);
706 extent_io_tree_init(fs_info, &root->log_csum_range,
707 IO_TREE_LOG_CSUM_RANGE);
708 }
709
710 spin_lock_init(&root->root_item_lock);
711 btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
712#ifdef CONFIG_BTRFS_DEBUG
713 INIT_LIST_HEAD(&root->leak_list);
714 spin_lock(&fs_info->fs_roots_radix_lock);
715 list_add_tail(&root->leak_list, &fs_info->allocated_roots);
716 spin_unlock(&fs_info->fs_roots_radix_lock);
717#endif
718}
719
720static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
721 u64 objectid, gfp_t flags)
722{
723 struct btrfs_root *root = kzalloc(sizeof(*root), flags);
724 if (root)
725 __setup_root(root, fs_info, objectid);
726 return root;
727}
728
729#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
730/* Should only be used by the testing infrastructure */
731struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
732{
733 struct btrfs_root *root;
734
735 if (!fs_info)
736 return ERR_PTR(-EINVAL);
737
738 root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL);
739 if (!root)
740 return ERR_PTR(-ENOMEM);
741
742 /* We don't use the stripesize in selftest, set it as sectorsize */
743 root->alloc_bytenr = 0;
744
745 return root;
746}
747#endif
748
749static int global_root_cmp(struct rb_node *a_node, const struct rb_node *b_node)
750{
751 const struct btrfs_root *a = rb_entry(a_node, struct btrfs_root, rb_node);
752 const struct btrfs_root *b = rb_entry(b_node, struct btrfs_root, rb_node);
753
754 return btrfs_comp_cpu_keys(&a->root_key, &b->root_key);
755}
756
757static int global_root_key_cmp(const void *k, const struct rb_node *node)
758{
759 const struct btrfs_key *key = k;
760 const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node);
761
762 return btrfs_comp_cpu_keys(key, &root->root_key);
763}
764
765int btrfs_global_root_insert(struct btrfs_root *root)
766{
767 struct btrfs_fs_info *fs_info = root->fs_info;
768 struct rb_node *tmp;
769 int ret = 0;
770
771 write_lock(&fs_info->global_root_lock);
772 tmp = rb_find_add(&root->rb_node, &fs_info->global_root_tree, global_root_cmp);
773 write_unlock(&fs_info->global_root_lock);
774
775 if (tmp) {
776 ret = -EEXIST;
777 btrfs_warn(fs_info, "global root %llu %llu already exists",
778 btrfs_root_id(root), root->root_key.offset);
779 }
780 return ret;
781}
782
783void btrfs_global_root_delete(struct btrfs_root *root)
784{
785 struct btrfs_fs_info *fs_info = root->fs_info;
786
787 write_lock(&fs_info->global_root_lock);
788 rb_erase(&root->rb_node, &fs_info->global_root_tree);
789 write_unlock(&fs_info->global_root_lock);
790}
791
792struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info,
793 struct btrfs_key *key)
794{
795 struct rb_node *node;
796 struct btrfs_root *root = NULL;
797
798 read_lock(&fs_info->global_root_lock);
799 node = rb_find(key, &fs_info->global_root_tree, global_root_key_cmp);
800 if (node)
801 root = container_of(node, struct btrfs_root, rb_node);
802 read_unlock(&fs_info->global_root_lock);
803
804 return root;
805}
806
807static u64 btrfs_global_root_id(struct btrfs_fs_info *fs_info, u64 bytenr)
808{
809 struct btrfs_block_group *block_group;
810 u64 ret;
811
812 if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
813 return 0;
814
815 if (bytenr)
816 block_group = btrfs_lookup_block_group(fs_info, bytenr);
817 else
818 block_group = btrfs_lookup_first_block_group(fs_info, bytenr);
819 ASSERT(block_group);
820 if (!block_group)
821 return 0;
822 ret = block_group->global_root_id;
823 btrfs_put_block_group(block_group);
824
825 return ret;
826}
827
828struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr)
829{
830 struct btrfs_key key = {
831 .objectid = BTRFS_CSUM_TREE_OBJECTID,
832 .type = BTRFS_ROOT_ITEM_KEY,
833 .offset = btrfs_global_root_id(fs_info, bytenr),
834 };
835
836 return btrfs_global_root(fs_info, &key);
837}
838
839struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr)
840{
841 struct btrfs_key key = {
842 .objectid = BTRFS_EXTENT_TREE_OBJECTID,
843 .type = BTRFS_ROOT_ITEM_KEY,
844 .offset = btrfs_global_root_id(fs_info, bytenr),
845 };
846
847 return btrfs_global_root(fs_info, &key);
848}
849
850struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info)
851{
852 if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE))
853 return fs_info->block_group_root;
854 return btrfs_extent_root(fs_info, 0);
855}
856
857struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
858 u64 objectid)
859{
860 struct btrfs_fs_info *fs_info = trans->fs_info;
861 struct extent_buffer *leaf;
862 struct btrfs_root *tree_root = fs_info->tree_root;
863 struct btrfs_root *root;
864 struct btrfs_key key;
865 unsigned int nofs_flag;
866 int ret = 0;
867
868 /*
869 * We're holding a transaction handle, so use a NOFS memory allocation
870 * context to avoid deadlock if reclaim happens.
871 */
872 nofs_flag = memalloc_nofs_save();
873 root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL);
874 memalloc_nofs_restore(nofs_flag);
875 if (!root)
876 return ERR_PTR(-ENOMEM);
877
878 root->root_key.objectid = objectid;
879 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
880 root->root_key.offset = 0;
881
882 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
883 0, BTRFS_NESTING_NORMAL);
884 if (IS_ERR(leaf)) {
885 ret = PTR_ERR(leaf);
886 leaf = NULL;
887 goto fail;
888 }
889
890 root->node = leaf;
891 btrfs_mark_buffer_dirty(trans, leaf);
892
893 root->commit_root = btrfs_root_node(root);
894 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
895
896 btrfs_set_root_flags(&root->root_item, 0);
897 btrfs_set_root_limit(&root->root_item, 0);
898 btrfs_set_root_bytenr(&root->root_item, leaf->start);
899 btrfs_set_root_generation(&root->root_item, trans->transid);
900 btrfs_set_root_level(&root->root_item, 0);
901 btrfs_set_root_refs(&root->root_item, 1);
902 btrfs_set_root_used(&root->root_item, leaf->len);
903 btrfs_set_root_last_snapshot(&root->root_item, 0);
904 btrfs_set_root_dirid(&root->root_item, 0);
905 if (is_fstree(objectid))
906 generate_random_guid(root->root_item.uuid);
907 else
908 export_guid(root->root_item.uuid, &guid_null);
909 btrfs_set_root_drop_level(&root->root_item, 0);
910
911 btrfs_tree_unlock(leaf);
912
913 key.objectid = objectid;
914 key.type = BTRFS_ROOT_ITEM_KEY;
915 key.offset = 0;
916 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
917 if (ret)
918 goto fail;
919
920 return root;
921
922fail:
923 btrfs_put_root(root);
924
925 return ERR_PTR(ret);
926}
927
928static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
929 struct btrfs_fs_info *fs_info)
930{
931 struct btrfs_root *root;
932
933 root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS);
934 if (!root)
935 return ERR_PTR(-ENOMEM);
936
937 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
938 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
939 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
940
941 return root;
942}
943
944int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
945 struct btrfs_root *root)
946{
947 struct extent_buffer *leaf;
948
949 /*
950 * DON'T set SHAREABLE bit for log trees.
951 *
952 * Log trees are not exposed to user space thus can't be snapshotted,
953 * and they go away before a real commit is actually done.
954 *
955 * They do store pointers to file data extents, and those reference
956 * counts still get updated (along with back refs to the log tree).
957 */
958
959 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
960 NULL, 0, 0, 0, 0, BTRFS_NESTING_NORMAL);
961 if (IS_ERR(leaf))
962 return PTR_ERR(leaf);
963
964 root->node = leaf;
965
966 btrfs_mark_buffer_dirty(trans, root->node);
967 btrfs_tree_unlock(root->node);
968
969 return 0;
970}
971
972int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
973 struct btrfs_fs_info *fs_info)
974{
975 struct btrfs_root *log_root;
976
977 log_root = alloc_log_tree(trans, fs_info);
978 if (IS_ERR(log_root))
979 return PTR_ERR(log_root);
980
981 if (!btrfs_is_zoned(fs_info)) {
982 int ret = btrfs_alloc_log_tree_node(trans, log_root);
983
984 if (ret) {
985 btrfs_put_root(log_root);
986 return ret;
987 }
988 }
989
990 WARN_ON(fs_info->log_root_tree);
991 fs_info->log_root_tree = log_root;
992 return 0;
993}
994
995int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
996 struct btrfs_root *root)
997{
998 struct btrfs_fs_info *fs_info = root->fs_info;
999 struct btrfs_root *log_root;
1000 struct btrfs_inode_item *inode_item;
1001 int ret;
1002
1003 log_root = alloc_log_tree(trans, fs_info);
1004 if (IS_ERR(log_root))
1005 return PTR_ERR(log_root);
1006
1007 ret = btrfs_alloc_log_tree_node(trans, log_root);
1008 if (ret) {
1009 btrfs_put_root(log_root);
1010 return ret;
1011 }
1012
1013 log_root->last_trans = trans->transid;
1014 log_root->root_key.offset = btrfs_root_id(root);
1015
1016 inode_item = &log_root->root_item.inode;
1017 btrfs_set_stack_inode_generation(inode_item, 1);
1018 btrfs_set_stack_inode_size(inode_item, 3);
1019 btrfs_set_stack_inode_nlink(inode_item, 1);
1020 btrfs_set_stack_inode_nbytes(inode_item,
1021 fs_info->nodesize);
1022 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1023
1024 btrfs_set_root_node(&log_root->root_item, log_root->node);
1025
1026 WARN_ON(root->log_root);
1027 root->log_root = log_root;
1028 btrfs_set_root_log_transid(root, 0);
1029 root->log_transid_committed = -1;
1030 btrfs_set_root_last_log_commit(root, 0);
1031 return 0;
1032}
1033
1034static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
1035 struct btrfs_path *path,
1036 struct btrfs_key *key)
1037{
1038 struct btrfs_root *root;
1039 struct btrfs_tree_parent_check check = { 0 };
1040 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1041 u64 generation;
1042 int ret;
1043 int level;
1044
1045 root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS);
1046 if (!root)
1047 return ERR_PTR(-ENOMEM);
1048
1049 ret = btrfs_find_root(tree_root, key, path,
1050 &root->root_item, &root->root_key);
1051 if (ret) {
1052 if (ret > 0)
1053 ret = -ENOENT;
1054 goto fail;
1055 }
1056
1057 generation = btrfs_root_generation(&root->root_item);
1058 level = btrfs_root_level(&root->root_item);
1059 check.level = level;
1060 check.transid = generation;
1061 check.owner_root = key->objectid;
1062 root->node = read_tree_block(fs_info, btrfs_root_bytenr(&root->root_item),
1063 &check);
1064 if (IS_ERR(root->node)) {
1065 ret = PTR_ERR(root->node);
1066 root->node = NULL;
1067 goto fail;
1068 }
1069 if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1070 ret = -EIO;
1071 goto fail;
1072 }
1073
1074 /*
1075 * For real fs, and not log/reloc trees, root owner must
1076 * match its root node owner
1077 */
1078 if (!btrfs_is_testing(fs_info) &&
1079 btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
1080 btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
1081 btrfs_root_id(root) != btrfs_header_owner(root->node)) {
1082 btrfs_crit(fs_info,
1083"root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu",
1084 btrfs_root_id(root), root->node->start,
1085 btrfs_header_owner(root->node),
1086 btrfs_root_id(root));
1087 ret = -EUCLEAN;
1088 goto fail;
1089 }
1090 root->commit_root = btrfs_root_node(root);
1091 return root;
1092fail:
1093 btrfs_put_root(root);
1094 return ERR_PTR(ret);
1095}
1096
1097struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1098 struct btrfs_key *key)
1099{
1100 struct btrfs_root *root;
1101 struct btrfs_path *path;
1102
1103 path = btrfs_alloc_path();
1104 if (!path)
1105 return ERR_PTR(-ENOMEM);
1106 root = read_tree_root_path(tree_root, path, key);
1107 btrfs_free_path(path);
1108
1109 return root;
1110}
1111
1112/*
1113 * Initialize subvolume root in-memory structure
1114 *
1115 * @anon_dev: anonymous device to attach to the root, if zero, allocate new
1116 */
1117static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
1118{
1119 int ret;
1120
1121 btrfs_drew_lock_init(&root->snapshot_lock);
1122
1123 if (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
1124 !btrfs_is_data_reloc_root(root) &&
1125 is_fstree(btrfs_root_id(root))) {
1126 set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
1127 btrfs_check_and_init_root_item(&root->root_item);
1128 }
1129
1130 /*
1131 * Don't assign anonymous block device to roots that are not exposed to
1132 * userspace, the id pool is limited to 1M
1133 */
1134 if (is_fstree(btrfs_root_id(root)) &&
1135 btrfs_root_refs(&root->root_item) > 0) {
1136 if (!anon_dev) {
1137 ret = get_anon_bdev(&root->anon_dev);
1138 if (ret)
1139 goto fail;
1140 } else {
1141 root->anon_dev = anon_dev;
1142 }
1143 }
1144
1145 mutex_lock(&root->objectid_mutex);
1146 ret = btrfs_init_root_free_objectid(root);
1147 if (ret) {
1148 mutex_unlock(&root->objectid_mutex);
1149 goto fail;
1150 }
1151
1152 ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
1153
1154 mutex_unlock(&root->objectid_mutex);
1155
1156 return 0;
1157fail:
1158 /* The caller is responsible to call btrfs_free_fs_root */
1159 return ret;
1160}
1161
1162static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1163 u64 root_id)
1164{
1165 struct btrfs_root *root;
1166
1167 spin_lock(&fs_info->fs_roots_radix_lock);
1168 root = radix_tree_lookup(&fs_info->fs_roots_radix,
1169 (unsigned long)root_id);
1170 root = btrfs_grab_root(root);
1171 spin_unlock(&fs_info->fs_roots_radix_lock);
1172 return root;
1173}
1174
1175static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
1176 u64 objectid)
1177{
1178 struct btrfs_key key = {
1179 .objectid = objectid,
1180 .type = BTRFS_ROOT_ITEM_KEY,
1181 .offset = 0,
1182 };
1183
1184 switch (objectid) {
1185 case BTRFS_ROOT_TREE_OBJECTID:
1186 return btrfs_grab_root(fs_info->tree_root);
1187 case BTRFS_EXTENT_TREE_OBJECTID:
1188 return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1189 case BTRFS_CHUNK_TREE_OBJECTID:
1190 return btrfs_grab_root(fs_info->chunk_root);
1191 case BTRFS_DEV_TREE_OBJECTID:
1192 return btrfs_grab_root(fs_info->dev_root);
1193 case BTRFS_CSUM_TREE_OBJECTID:
1194 return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1195 case BTRFS_QUOTA_TREE_OBJECTID:
1196 return btrfs_grab_root(fs_info->quota_root);
1197 case BTRFS_UUID_TREE_OBJECTID:
1198 return btrfs_grab_root(fs_info->uuid_root);
1199 case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
1200 return btrfs_grab_root(fs_info->block_group_root);
1201 case BTRFS_FREE_SPACE_TREE_OBJECTID:
1202 return btrfs_grab_root(btrfs_global_root(fs_info, &key));
1203 case BTRFS_RAID_STRIPE_TREE_OBJECTID:
1204 return btrfs_grab_root(fs_info->stripe_root);
1205 default:
1206 return NULL;
1207 }
1208}
1209
1210int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1211 struct btrfs_root *root)
1212{
1213 int ret;
1214
1215 ret = radix_tree_preload(GFP_NOFS);
1216 if (ret)
1217 return ret;
1218
1219 spin_lock(&fs_info->fs_roots_radix_lock);
1220 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1221 (unsigned long)btrfs_root_id(root),
1222 root);
1223 if (ret == 0) {
1224 btrfs_grab_root(root);
1225 set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1226 }
1227 spin_unlock(&fs_info->fs_roots_radix_lock);
1228 radix_tree_preload_end();
1229
1230 return ret;
1231}
1232
1233void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
1234{
1235#ifdef CONFIG_BTRFS_DEBUG
1236 struct btrfs_root *root;
1237
1238 while (!list_empty(&fs_info->allocated_roots)) {
1239 char buf[BTRFS_ROOT_NAME_BUF_LEN];
1240
1241 root = list_first_entry(&fs_info->allocated_roots,
1242 struct btrfs_root, leak_list);
1243 btrfs_err(fs_info, "leaked root %s refcount %d",
1244 btrfs_root_name(&root->root_key, buf),
1245 refcount_read(&root->refs));
1246 WARN_ON_ONCE(1);
1247 while (refcount_read(&root->refs) > 1)
1248 btrfs_put_root(root);
1249 btrfs_put_root(root);
1250 }
1251#endif
1252}
1253
1254static void free_global_roots(struct btrfs_fs_info *fs_info)
1255{
1256 struct btrfs_root *root;
1257 struct rb_node *node;
1258
1259 while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) {
1260 root = rb_entry(node, struct btrfs_root, rb_node);
1261 rb_erase(&root->rb_node, &fs_info->global_root_tree);
1262 btrfs_put_root(root);
1263 }
1264}
1265
1266void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
1267{
1268 struct percpu_counter *em_counter = &fs_info->evictable_extent_maps;
1269
1270 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
1271 percpu_counter_destroy(&fs_info->delalloc_bytes);
1272 percpu_counter_destroy(&fs_info->ordered_bytes);
1273 if (percpu_counter_initialized(em_counter))
1274 ASSERT(percpu_counter_sum_positive(em_counter) == 0);
1275 percpu_counter_destroy(em_counter);
1276 percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
1277 btrfs_free_csum_hash(fs_info);
1278 btrfs_free_stripe_hash_table(fs_info);
1279 btrfs_free_ref_cache(fs_info);
1280 kfree(fs_info->balance_ctl);
1281 kfree(fs_info->delayed_root);
1282 free_global_roots(fs_info);
1283 btrfs_put_root(fs_info->tree_root);
1284 btrfs_put_root(fs_info->chunk_root);
1285 btrfs_put_root(fs_info->dev_root);
1286 btrfs_put_root(fs_info->quota_root);
1287 btrfs_put_root(fs_info->uuid_root);
1288 btrfs_put_root(fs_info->fs_root);
1289 btrfs_put_root(fs_info->data_reloc_root);
1290 btrfs_put_root(fs_info->block_group_root);
1291 btrfs_put_root(fs_info->stripe_root);
1292 btrfs_check_leaked_roots(fs_info);
1293 btrfs_extent_buffer_leak_debug_check(fs_info);
1294 kfree(fs_info->super_copy);
1295 kfree(fs_info->super_for_commit);
1296 kfree(fs_info->subpage_info);
1297 kvfree(fs_info);
1298}
1299
1300
1301/*
1302 * Get an in-memory reference of a root structure.
1303 *
1304 * For essential trees like root/extent tree, we grab it from fs_info directly.
1305 * For subvolume trees, we check the cached filesystem roots first. If not
1306 * found, then read it from disk and add it to cached fs roots.
1307 *
1308 * Caller should release the root by calling btrfs_put_root() after the usage.
1309 *
1310 * NOTE: Reloc and log trees can't be read by this function as they share the
1311 * same root objectid.
1312 *
1313 * @objectid: root id
1314 * @anon_dev: preallocated anonymous block device number for new roots,
1315 * pass NULL for a new allocation.
1316 * @check_ref: whether to check root item references, If true, return -ENOENT
1317 * for orphan roots
1318 */
1319static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
1320 u64 objectid, dev_t *anon_dev,
1321 bool check_ref)
1322{
1323 struct btrfs_root *root;
1324 struct btrfs_path *path;
1325 struct btrfs_key key;
1326 int ret;
1327
1328 root = btrfs_get_global_root(fs_info, objectid);
1329 if (root)
1330 return root;
1331
1332 /*
1333 * If we're called for non-subvolume trees, and above function didn't
1334 * find one, do not try to read it from disk.
1335 *
1336 * This is namely for free-space-tree and quota tree, which can change
1337 * at runtime and should only be grabbed from fs_info.
1338 */
1339 if (!is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
1340 return ERR_PTR(-ENOENT);
1341again:
1342 root = btrfs_lookup_fs_root(fs_info, objectid);
1343 if (root) {
1344 /*
1345 * Some other caller may have read out the newly inserted
1346 * subvolume already (for things like backref walk etc). Not
1347 * that common but still possible. In that case, we just need
1348 * to free the anon_dev.
1349 */
1350 if (unlikely(anon_dev && *anon_dev)) {
1351 free_anon_bdev(*anon_dev);
1352 *anon_dev = 0;
1353 }
1354
1355 if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1356 btrfs_put_root(root);
1357 return ERR_PTR(-ENOENT);
1358 }
1359 return root;
1360 }
1361
1362 key.objectid = objectid;
1363 key.type = BTRFS_ROOT_ITEM_KEY;
1364 key.offset = (u64)-1;
1365 root = btrfs_read_tree_root(fs_info->tree_root, &key);
1366 if (IS_ERR(root))
1367 return root;
1368
1369 if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1370 ret = -ENOENT;
1371 goto fail;
1372 }
1373
1374 ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0);
1375 if (ret)
1376 goto fail;
1377
1378 path = btrfs_alloc_path();
1379 if (!path) {
1380 ret = -ENOMEM;
1381 goto fail;
1382 }
1383 key.objectid = BTRFS_ORPHAN_OBJECTID;
1384 key.type = BTRFS_ORPHAN_ITEM_KEY;
1385 key.offset = objectid;
1386
1387 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1388 btrfs_free_path(path);
1389 if (ret < 0)
1390 goto fail;
1391 if (ret == 0)
1392 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1393
1394 ret = btrfs_insert_fs_root(fs_info, root);
1395 if (ret) {
1396 if (ret == -EEXIST) {
1397 btrfs_put_root(root);
1398 goto again;
1399 }
1400 goto fail;
1401 }
1402 return root;
1403fail:
1404 /*
1405 * If our caller provided us an anonymous device, then it's his
1406 * responsibility to free it in case we fail. So we have to set our
1407 * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
1408 * and once again by our caller.
1409 */
1410 if (anon_dev && *anon_dev)
1411 root->anon_dev = 0;
1412 btrfs_put_root(root);
1413 return ERR_PTR(ret);
1414}
1415
1416/*
1417 * Get in-memory reference of a root structure
1418 *
1419 * @objectid: tree objectid
1420 * @check_ref: if set, verify that the tree exists and the item has at least
1421 * one reference
1422 */
1423struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1424 u64 objectid, bool check_ref)
1425{
1426 return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
1427}
1428
1429/*
1430 * Get in-memory reference of a root structure, created as new, optionally pass
1431 * the anonymous block device id
1432 *
1433 * @objectid: tree objectid
1434 * @anon_dev: if NULL, allocate a new anonymous block device or use the
1435 * parameter value if not NULL
1436 */
1437struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
1438 u64 objectid, dev_t *anon_dev)
1439{
1440 return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
1441}
1442
1443/*
1444 * Return a root for the given objectid.
1445 *
1446 * @fs_info: the fs_info
1447 * @objectid: the objectid we need to lookup
1448 *
1449 * This is exclusively used for backref walking, and exists specifically because
1450 * of how qgroups does lookups. Qgroups will do a backref lookup at delayed ref
1451 * creation time, which means we may have to read the tree_root in order to look
1452 * up a fs root that is not in memory. If the root is not in memory we will
1453 * read the tree root commit root and look up the fs root from there. This is a
1454 * temporary root, it will not be inserted into the radix tree as it doesn't
1455 * have the most uptodate information, it'll simply be discarded once the
1456 * backref code is finished using the root.
1457 */
1458struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
1459 struct btrfs_path *path,
1460 u64 objectid)
1461{
1462 struct btrfs_root *root;
1463 struct btrfs_key key;
1464
1465 ASSERT(path->search_commit_root && path->skip_locking);
1466
1467 /*
1468 * This can return -ENOENT if we ask for a root that doesn't exist, but
1469 * since this is called via the backref walking code we won't be looking
1470 * up a root that doesn't exist, unless there's corruption. So if root
1471 * != NULL just return it.
1472 */
1473 root = btrfs_get_global_root(fs_info, objectid);
1474 if (root)
1475 return root;
1476
1477 root = btrfs_lookup_fs_root(fs_info, objectid);
1478 if (root)
1479 return root;
1480
1481 key.objectid = objectid;
1482 key.type = BTRFS_ROOT_ITEM_KEY;
1483 key.offset = (u64)-1;
1484 root = read_tree_root_path(fs_info->tree_root, path, &key);
1485 btrfs_release_path(path);
1486
1487 return root;
1488}
1489
1490static int cleaner_kthread(void *arg)
1491{
1492 struct btrfs_fs_info *fs_info = arg;
1493 int again;
1494
1495 while (1) {
1496 again = 0;
1497
1498 set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1499
1500 /* Make the cleaner go to sleep early. */
1501 if (btrfs_need_cleaner_sleep(fs_info))
1502 goto sleep;
1503
1504 /*
1505 * Do not do anything if we might cause open_ctree() to block
1506 * before we have finished mounting the filesystem.
1507 */
1508 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1509 goto sleep;
1510
1511 if (!mutex_trylock(&fs_info->cleaner_mutex))
1512 goto sleep;
1513
1514 /*
1515 * Avoid the problem that we change the status of the fs
1516 * during the above check and trylock.
1517 */
1518 if (btrfs_need_cleaner_sleep(fs_info)) {
1519 mutex_unlock(&fs_info->cleaner_mutex);
1520 goto sleep;
1521 }
1522
1523 if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags))
1524 btrfs_sysfs_feature_update(fs_info);
1525
1526 btrfs_run_delayed_iputs(fs_info);
1527
1528 again = btrfs_clean_one_deleted_snapshot(fs_info);
1529 mutex_unlock(&fs_info->cleaner_mutex);
1530
1531 /*
1532 * The defragger has dealt with the R/O remount and umount,
1533 * needn't do anything special here.
1534 */
1535 btrfs_run_defrag_inodes(fs_info);
1536
1537 /*
1538 * Acquires fs_info->reclaim_bgs_lock to avoid racing
1539 * with relocation (btrfs_relocate_chunk) and relocation
1540 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1541 * after acquiring fs_info->reclaim_bgs_lock. So we
1542 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1543 * unused block groups.
1544 */
1545 btrfs_delete_unused_bgs(fs_info);
1546
1547 /*
1548 * Reclaim block groups in the reclaim_bgs list after we deleted
1549 * all unused block_groups. This possibly gives us some more free
1550 * space.
1551 */
1552 btrfs_reclaim_bgs(fs_info);
1553sleep:
1554 clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1555 if (kthread_should_park())
1556 kthread_parkme();
1557 if (kthread_should_stop())
1558 return 0;
1559 if (!again) {
1560 set_current_state(TASK_INTERRUPTIBLE);
1561 schedule();
1562 __set_current_state(TASK_RUNNING);
1563 }
1564 }
1565}
1566
1567static int transaction_kthread(void *arg)
1568{
1569 struct btrfs_root *root = arg;
1570 struct btrfs_fs_info *fs_info = root->fs_info;
1571 struct btrfs_trans_handle *trans;
1572 struct btrfs_transaction *cur;
1573 u64 transid;
1574 time64_t delta;
1575 unsigned long delay;
1576 bool cannot_commit;
1577
1578 do {
1579 cannot_commit = false;
1580 delay = msecs_to_jiffies(fs_info->commit_interval * 1000);
1581 mutex_lock(&fs_info->transaction_kthread_mutex);
1582
1583 spin_lock(&fs_info->trans_lock);
1584 cur = fs_info->running_transaction;
1585 if (!cur) {
1586 spin_unlock(&fs_info->trans_lock);
1587 goto sleep;
1588 }
1589
1590 delta = ktime_get_seconds() - cur->start_time;
1591 if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) &&
1592 cur->state < TRANS_STATE_COMMIT_PREP &&
1593 delta < fs_info->commit_interval) {
1594 spin_unlock(&fs_info->trans_lock);
1595 delay -= msecs_to_jiffies((delta - 1) * 1000);
1596 delay = min(delay,
1597 msecs_to_jiffies(fs_info->commit_interval * 1000));
1598 goto sleep;
1599 }
1600 transid = cur->transid;
1601 spin_unlock(&fs_info->trans_lock);
1602
1603 /* If the file system is aborted, this will always fail. */
1604 trans = btrfs_attach_transaction(root);
1605 if (IS_ERR(trans)) {
1606 if (PTR_ERR(trans) != -ENOENT)
1607 cannot_commit = true;
1608 goto sleep;
1609 }
1610 if (transid == trans->transid) {
1611 btrfs_commit_transaction(trans);
1612 } else {
1613 btrfs_end_transaction(trans);
1614 }
1615sleep:
1616 wake_up_process(fs_info->cleaner_kthread);
1617 mutex_unlock(&fs_info->transaction_kthread_mutex);
1618
1619 if (BTRFS_FS_ERROR(fs_info))
1620 btrfs_cleanup_transaction(fs_info);
1621 if (!kthread_should_stop() &&
1622 (!btrfs_transaction_blocked(fs_info) ||
1623 cannot_commit))
1624 schedule_timeout_interruptible(delay);
1625 } while (!kthread_should_stop());
1626 return 0;
1627}
1628
1629/*
1630 * This will find the highest generation in the array of root backups. The
1631 * index of the highest array is returned, or -EINVAL if we can't find
1632 * anything.
1633 *
1634 * We check to make sure the array is valid by comparing the
1635 * generation of the latest root in the array with the generation
1636 * in the super block. If they don't match we pitch it.
1637 */
1638static int find_newest_super_backup(struct btrfs_fs_info *info)
1639{
1640 const u64 newest_gen = btrfs_super_generation(info->super_copy);
1641 u64 cur;
1642 struct btrfs_root_backup *root_backup;
1643 int i;
1644
1645 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1646 root_backup = info->super_copy->super_roots + i;
1647 cur = btrfs_backup_tree_root_gen(root_backup);
1648 if (cur == newest_gen)
1649 return i;
1650 }
1651
1652 return -EINVAL;
1653}
1654
1655/*
1656 * copy all the root pointers into the super backup array.
1657 * this will bump the backup pointer by one when it is
1658 * done
1659 */
1660static void backup_super_roots(struct btrfs_fs_info *info)
1661{
1662 const int next_backup = info->backup_root_index;
1663 struct btrfs_root_backup *root_backup;
1664
1665 root_backup = info->super_for_commit->super_roots + next_backup;
1666
1667 /*
1668 * make sure all of our padding and empty slots get zero filled
1669 * regardless of which ones we use today
1670 */
1671 memset(root_backup, 0, sizeof(*root_backup));
1672
1673 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1674
1675 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1676 btrfs_set_backup_tree_root_gen(root_backup,
1677 btrfs_header_generation(info->tree_root->node));
1678
1679 btrfs_set_backup_tree_root_level(root_backup,
1680 btrfs_header_level(info->tree_root->node));
1681
1682 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1683 btrfs_set_backup_chunk_root_gen(root_backup,
1684 btrfs_header_generation(info->chunk_root->node));
1685 btrfs_set_backup_chunk_root_level(root_backup,
1686 btrfs_header_level(info->chunk_root->node));
1687
1688 if (!btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE)) {
1689 struct btrfs_root *extent_root = btrfs_extent_root(info, 0);
1690 struct btrfs_root *csum_root = btrfs_csum_root(info, 0);
1691
1692 btrfs_set_backup_extent_root(root_backup,
1693 extent_root->node->start);
1694 btrfs_set_backup_extent_root_gen(root_backup,
1695 btrfs_header_generation(extent_root->node));
1696 btrfs_set_backup_extent_root_level(root_backup,
1697 btrfs_header_level(extent_root->node));
1698
1699 btrfs_set_backup_csum_root(root_backup, csum_root->node->start);
1700 btrfs_set_backup_csum_root_gen(root_backup,
1701 btrfs_header_generation(csum_root->node));
1702 btrfs_set_backup_csum_root_level(root_backup,
1703 btrfs_header_level(csum_root->node));
1704 }
1705
1706 /*
1707 * we might commit during log recovery, which happens before we set
1708 * the fs_root. Make sure it is valid before we fill it in.
1709 */
1710 if (info->fs_root && info->fs_root->node) {
1711 btrfs_set_backup_fs_root(root_backup,
1712 info->fs_root->node->start);
1713 btrfs_set_backup_fs_root_gen(root_backup,
1714 btrfs_header_generation(info->fs_root->node));
1715 btrfs_set_backup_fs_root_level(root_backup,
1716 btrfs_header_level(info->fs_root->node));
1717 }
1718
1719 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1720 btrfs_set_backup_dev_root_gen(root_backup,
1721 btrfs_header_generation(info->dev_root->node));
1722 btrfs_set_backup_dev_root_level(root_backup,
1723 btrfs_header_level(info->dev_root->node));
1724
1725 btrfs_set_backup_total_bytes(root_backup,
1726 btrfs_super_total_bytes(info->super_copy));
1727 btrfs_set_backup_bytes_used(root_backup,
1728 btrfs_super_bytes_used(info->super_copy));
1729 btrfs_set_backup_num_devices(root_backup,
1730 btrfs_super_num_devices(info->super_copy));
1731
1732 /*
1733 * if we don't copy this out to the super_copy, it won't get remembered
1734 * for the next commit
1735 */
1736 memcpy(&info->super_copy->super_roots,
1737 &info->super_for_commit->super_roots,
1738 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1739}
1740
1741/*
1742 * Reads a backup root based on the passed priority. Prio 0 is the newest, prio
1743 * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
1744 *
1745 * @fs_info: filesystem whose backup roots need to be read
1746 * @priority: priority of backup root required
1747 *
1748 * Returns backup root index on success and -EINVAL otherwise.
1749 */
1750static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
1751{
1752 int backup_index = find_newest_super_backup(fs_info);
1753 struct btrfs_super_block *super = fs_info->super_copy;
1754 struct btrfs_root_backup *root_backup;
1755
1756 if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
1757 if (priority == 0)
1758 return backup_index;
1759
1760 backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
1761 backup_index %= BTRFS_NUM_BACKUP_ROOTS;
1762 } else {
1763 return -EINVAL;
1764 }
1765
1766 root_backup = super->super_roots + backup_index;
1767
1768 btrfs_set_super_generation(super,
1769 btrfs_backup_tree_root_gen(root_backup));
1770 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1771 btrfs_set_super_root_level(super,
1772 btrfs_backup_tree_root_level(root_backup));
1773 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1774
1775 /*
1776 * Fixme: the total bytes and num_devices need to match or we should
1777 * need a fsck
1778 */
1779 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1780 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1781
1782 return backup_index;
1783}
1784
1785/* helper to cleanup workers */
1786static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1787{
1788 btrfs_destroy_workqueue(fs_info->fixup_workers);
1789 btrfs_destroy_workqueue(fs_info->delalloc_workers);
1790 btrfs_destroy_workqueue(fs_info->workers);
1791 if (fs_info->endio_workers)
1792 destroy_workqueue(fs_info->endio_workers);
1793 if (fs_info->rmw_workers)
1794 destroy_workqueue(fs_info->rmw_workers);
1795 if (fs_info->compressed_write_workers)
1796 destroy_workqueue(fs_info->compressed_write_workers);
1797 btrfs_destroy_workqueue(fs_info->endio_write_workers);
1798 btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
1799 btrfs_destroy_workqueue(fs_info->delayed_workers);
1800 btrfs_destroy_workqueue(fs_info->caching_workers);
1801 btrfs_destroy_workqueue(fs_info->flush_workers);
1802 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
1803 if (fs_info->discard_ctl.discard_workers)
1804 destroy_workqueue(fs_info->discard_ctl.discard_workers);
1805 /*
1806 * Now that all other work queues are destroyed, we can safely destroy
1807 * the queues used for metadata I/O, since tasks from those other work
1808 * queues can do metadata I/O operations.
1809 */
1810 if (fs_info->endio_meta_workers)
1811 destroy_workqueue(fs_info->endio_meta_workers);
1812}
1813
1814static void free_root_extent_buffers(struct btrfs_root *root)
1815{
1816 if (root) {
1817 free_extent_buffer(root->node);
1818 free_extent_buffer(root->commit_root);
1819 root->node = NULL;
1820 root->commit_root = NULL;
1821 }
1822}
1823
1824static void free_global_root_pointers(struct btrfs_fs_info *fs_info)
1825{
1826 struct btrfs_root *root, *tmp;
1827
1828 rbtree_postorder_for_each_entry_safe(root, tmp,
1829 &fs_info->global_root_tree,
1830 rb_node)
1831 free_root_extent_buffers(root);
1832}
1833
1834/* helper to cleanup tree roots */
1835static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
1836{
1837 free_root_extent_buffers(info->tree_root);
1838
1839 free_global_root_pointers(info);
1840 free_root_extent_buffers(info->dev_root);
1841 free_root_extent_buffers(info->quota_root);
1842 free_root_extent_buffers(info->uuid_root);
1843 free_root_extent_buffers(info->fs_root);
1844 free_root_extent_buffers(info->data_reloc_root);
1845 free_root_extent_buffers(info->block_group_root);
1846 free_root_extent_buffers(info->stripe_root);
1847 if (free_chunk_root)
1848 free_root_extent_buffers(info->chunk_root);
1849}
1850
1851void btrfs_put_root(struct btrfs_root *root)
1852{
1853 if (!root)
1854 return;
1855
1856 if (refcount_dec_and_test(&root->refs)) {
1857 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
1858 WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
1859 if (root->anon_dev)
1860 free_anon_bdev(root->anon_dev);
1861 free_root_extent_buffers(root);
1862#ifdef CONFIG_BTRFS_DEBUG
1863 spin_lock(&root->fs_info->fs_roots_radix_lock);
1864 list_del_init(&root->leak_list);
1865 spin_unlock(&root->fs_info->fs_roots_radix_lock);
1866#endif
1867 kfree(root);
1868 }
1869}
1870
1871void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
1872{
1873 int ret;
1874 struct btrfs_root *gang[8];
1875 int i;
1876
1877 while (!list_empty(&fs_info->dead_roots)) {
1878 gang[0] = list_entry(fs_info->dead_roots.next,
1879 struct btrfs_root, root_list);
1880 list_del(&gang[0]->root_list);
1881
1882 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
1883 btrfs_drop_and_free_fs_root(fs_info, gang[0]);
1884 btrfs_put_root(gang[0]);
1885 }
1886
1887 while (1) {
1888 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1889 (void **)gang, 0,
1890 ARRAY_SIZE(gang));
1891 if (!ret)
1892 break;
1893 for (i = 0; i < ret; i++)
1894 btrfs_drop_and_free_fs_root(fs_info, gang[i]);
1895 }
1896}
1897
1898static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
1899{
1900 mutex_init(&fs_info->scrub_lock);
1901 atomic_set(&fs_info->scrubs_running, 0);
1902 atomic_set(&fs_info->scrub_pause_req, 0);
1903 atomic_set(&fs_info->scrubs_paused, 0);
1904 atomic_set(&fs_info->scrub_cancel_req, 0);
1905 init_waitqueue_head(&fs_info->scrub_pause_wait);
1906 refcount_set(&fs_info->scrub_workers_refcnt, 0);
1907}
1908
1909static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
1910{
1911 spin_lock_init(&fs_info->balance_lock);
1912 mutex_init(&fs_info->balance_mutex);
1913 atomic_set(&fs_info->balance_pause_req, 0);
1914 atomic_set(&fs_info->balance_cancel_req, 0);
1915 fs_info->balance_ctl = NULL;
1916 init_waitqueue_head(&fs_info->balance_wait_q);
1917 atomic_set(&fs_info->reloc_cancel_req, 0);
1918}
1919
1920static int btrfs_init_btree_inode(struct super_block *sb)
1921{
1922 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1923 unsigned long hash = btrfs_inode_hash(BTRFS_BTREE_INODE_OBJECTID,
1924 fs_info->tree_root);
1925 struct inode *inode;
1926
1927 inode = new_inode(sb);
1928 if (!inode)
1929 return -ENOMEM;
1930
1931 inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
1932 set_nlink(inode, 1);
1933 /*
1934 * we set the i_size on the btree inode to the max possible int.
1935 * the real end of the address space is determined by all of
1936 * the devices in the system
1937 */
1938 inode->i_size = OFFSET_MAX;
1939 inode->i_mapping->a_ops = &btree_aops;
1940 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
1941
1942 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
1943 extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
1944 IO_TREE_BTREE_INODE_IO);
1945 extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
1946
1947 BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
1948 BTRFS_I(inode)->location.objectid = BTRFS_BTREE_INODE_OBJECTID;
1949 BTRFS_I(inode)->location.type = 0;
1950 BTRFS_I(inode)->location.offset = 0;
1951 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
1952 __insert_inode_hash(inode, hash);
1953 fs_info->btree_inode = inode;
1954
1955 return 0;
1956}
1957
1958static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
1959{
1960 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
1961 init_rwsem(&fs_info->dev_replace.rwsem);
1962 init_waitqueue_head(&fs_info->dev_replace.replace_wait);
1963}
1964
1965static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
1966{
1967 spin_lock_init(&fs_info->qgroup_lock);
1968 mutex_init(&fs_info->qgroup_ioctl_lock);
1969 fs_info->qgroup_tree = RB_ROOT;
1970 INIT_LIST_HEAD(&fs_info->dirty_qgroups);
1971 fs_info->qgroup_seq = 1;
1972 fs_info->qgroup_ulist = NULL;
1973 fs_info->qgroup_rescan_running = false;
1974 fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL;
1975 mutex_init(&fs_info->qgroup_rescan_lock);
1976}
1977
1978static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
1979{
1980 u32 max_active = fs_info->thread_pool_size;
1981 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
1982 unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE;
1983
1984 fs_info->workers =
1985 btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16);
1986
1987 fs_info->delalloc_workers =
1988 btrfs_alloc_workqueue(fs_info, "delalloc",
1989 flags, max_active, 2);
1990
1991 fs_info->flush_workers =
1992 btrfs_alloc_workqueue(fs_info, "flush_delalloc",
1993 flags, max_active, 0);
1994
1995 fs_info->caching_workers =
1996 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
1997
1998 fs_info->fixup_workers =
1999 btrfs_alloc_ordered_workqueue(fs_info, "fixup", ordered_flags);
2000
2001 fs_info->endio_workers =
2002 alloc_workqueue("btrfs-endio", flags, max_active);
2003 fs_info->endio_meta_workers =
2004 alloc_workqueue("btrfs-endio-meta", flags, max_active);
2005 fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
2006 fs_info->endio_write_workers =
2007 btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2008 max_active, 2);
2009 fs_info->compressed_write_workers =
2010 alloc_workqueue("btrfs-compressed-write", flags, max_active);
2011 fs_info->endio_freespace_worker =
2012 btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2013 max_active, 0);
2014 fs_info->delayed_workers =
2015 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2016 max_active, 0);
2017 fs_info->qgroup_rescan_workers =
2018 btrfs_alloc_ordered_workqueue(fs_info, "qgroup-rescan",
2019 ordered_flags);
2020 fs_info->discard_ctl.discard_workers =
2021 alloc_ordered_workqueue("btrfs_discard", WQ_FREEZABLE);
2022
2023 if (!(fs_info->workers &&
2024 fs_info->delalloc_workers && fs_info->flush_workers &&
2025 fs_info->endio_workers && fs_info->endio_meta_workers &&
2026 fs_info->compressed_write_workers &&
2027 fs_info->endio_write_workers &&
2028 fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2029 fs_info->caching_workers && fs_info->fixup_workers &&
2030 fs_info->delayed_workers && fs_info->qgroup_rescan_workers &&
2031 fs_info->discard_ctl.discard_workers)) {
2032 return -ENOMEM;
2033 }
2034
2035 return 0;
2036}
2037
2038static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
2039{
2040 struct crypto_shash *csum_shash;
2041 const char *csum_driver = btrfs_super_csum_driver(csum_type);
2042
2043 csum_shash = crypto_alloc_shash(csum_driver, 0, 0);
2044
2045 if (IS_ERR(csum_shash)) {
2046 btrfs_err(fs_info, "error allocating %s hash for checksum",
2047 csum_driver);
2048 return PTR_ERR(csum_shash);
2049 }
2050
2051 fs_info->csum_shash = csum_shash;
2052
2053 /*
2054 * Check if the checksum implementation is a fast accelerated one.
2055 * As-is this is a bit of a hack and should be replaced once the csum
2056 * implementations provide that information themselves.
2057 */
2058 switch (csum_type) {
2059 case BTRFS_CSUM_TYPE_CRC32:
2060 if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
2061 set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2062 break;
2063 case BTRFS_CSUM_TYPE_XXHASH:
2064 set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
2065 break;
2066 default:
2067 break;
2068 }
2069
2070 btrfs_info(fs_info, "using %s (%s) checksum algorithm",
2071 btrfs_super_csum_name(csum_type),
2072 crypto_shash_driver_name(csum_shash));
2073 return 0;
2074}
2075
2076static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2077 struct btrfs_fs_devices *fs_devices)
2078{
2079 int ret;
2080 struct btrfs_tree_parent_check check = { 0 };
2081 struct btrfs_root *log_tree_root;
2082 struct btrfs_super_block *disk_super = fs_info->super_copy;
2083 u64 bytenr = btrfs_super_log_root(disk_super);
2084 int level = btrfs_super_log_root_level(disk_super);
2085
2086 if (fs_devices->rw_devices == 0) {
2087 btrfs_warn(fs_info, "log replay required on RO media");
2088 return -EIO;
2089 }
2090
2091 log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID,
2092 GFP_KERNEL);
2093 if (!log_tree_root)
2094 return -ENOMEM;
2095
2096 check.level = level;
2097 check.transid = fs_info->generation + 1;
2098 check.owner_root = BTRFS_TREE_LOG_OBJECTID;
2099 log_tree_root->node = read_tree_block(fs_info, bytenr, &check);
2100 if (IS_ERR(log_tree_root->node)) {
2101 btrfs_warn(fs_info, "failed to read log tree");
2102 ret = PTR_ERR(log_tree_root->node);
2103 log_tree_root->node = NULL;
2104 btrfs_put_root(log_tree_root);
2105 return ret;
2106 }
2107 if (!extent_buffer_uptodate(log_tree_root->node)) {
2108 btrfs_err(fs_info, "failed to read log tree");
2109 btrfs_put_root(log_tree_root);
2110 return -EIO;
2111 }
2112
2113 /* returns with log_tree_root freed on success */
2114 ret = btrfs_recover_log_trees(log_tree_root);
2115 if (ret) {
2116 btrfs_handle_fs_error(fs_info, ret,
2117 "Failed to recover log tree");
2118 btrfs_put_root(log_tree_root);
2119 return ret;
2120 }
2121
2122 if (sb_rdonly(fs_info->sb)) {
2123 ret = btrfs_commit_super(fs_info);
2124 if (ret)
2125 return ret;
2126 }
2127
2128 return 0;
2129}
2130
2131static int load_global_roots_objectid(struct btrfs_root *tree_root,
2132 struct btrfs_path *path, u64 objectid,
2133 const char *name)
2134{
2135 struct btrfs_fs_info *fs_info = tree_root->fs_info;
2136 struct btrfs_root *root;
2137 u64 max_global_id = 0;
2138 int ret;
2139 struct btrfs_key key = {
2140 .objectid = objectid,
2141 .type = BTRFS_ROOT_ITEM_KEY,
2142 .offset = 0,
2143 };
2144 bool found = false;
2145
2146 /* If we have IGNOREDATACSUMS skip loading these roots. */
2147 if (objectid == BTRFS_CSUM_TREE_OBJECTID &&
2148 btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
2149 set_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
2150 return 0;
2151 }
2152
2153 while (1) {
2154 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2155 if (ret < 0)
2156 break;
2157
2158 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2159 ret = btrfs_next_leaf(tree_root, path);
2160 if (ret) {
2161 if (ret > 0)
2162 ret = 0;
2163 break;
2164 }
2165 }
2166 ret = 0;
2167
2168 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2169 if (key.objectid != objectid)
2170 break;
2171 btrfs_release_path(path);
2172
2173 /*
2174 * Just worry about this for extent tree, it'll be the same for
2175 * everybody.
2176 */
2177 if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2178 max_global_id = max(max_global_id, key.offset);
2179
2180 found = true;
2181 root = read_tree_root_path(tree_root, path, &key);
2182 if (IS_ERR(root)) {
2183 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2184 ret = PTR_ERR(root);
2185 break;
2186 }
2187 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2188 ret = btrfs_global_root_insert(root);
2189 if (ret) {
2190 btrfs_put_root(root);
2191 break;
2192 }
2193 key.offset++;
2194 }
2195 btrfs_release_path(path);
2196
2197 if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2198 fs_info->nr_global_roots = max_global_id + 1;
2199
2200 if (!found || ret) {
2201 if (objectid == BTRFS_CSUM_TREE_OBJECTID)
2202 set_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
2203
2204 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2205 ret = ret ? ret : -ENOENT;
2206 else
2207 ret = 0;
2208 btrfs_err(fs_info, "failed to load root %s", name);
2209 }
2210 return ret;
2211}
2212
2213static int load_global_roots(struct btrfs_root *tree_root)
2214{
2215 struct btrfs_path *path;
2216 int ret = 0;
2217
2218 path = btrfs_alloc_path();
2219 if (!path)
2220 return -ENOMEM;
2221
2222 ret = load_global_roots_objectid(tree_root, path,
2223 BTRFS_EXTENT_TREE_OBJECTID, "extent");
2224 if (ret)
2225 goto out;
2226 ret = load_global_roots_objectid(tree_root, path,
2227 BTRFS_CSUM_TREE_OBJECTID, "csum");
2228 if (ret)
2229 goto out;
2230 if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE))
2231 goto out;
2232 ret = load_global_roots_objectid(tree_root, path,
2233 BTRFS_FREE_SPACE_TREE_OBJECTID,
2234 "free space");
2235out:
2236 btrfs_free_path(path);
2237 return ret;
2238}
2239
2240static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2241{
2242 struct btrfs_root *tree_root = fs_info->tree_root;
2243 struct btrfs_root *root;
2244 struct btrfs_key location;
2245 int ret;
2246
2247 ASSERT(fs_info->tree_root);
2248
2249 ret = load_global_roots(tree_root);
2250 if (ret)
2251 return ret;
2252
2253 location.type = BTRFS_ROOT_ITEM_KEY;
2254 location.offset = 0;
2255
2256 if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
2257 location.objectid = BTRFS_BLOCK_GROUP_TREE_OBJECTID;
2258 root = btrfs_read_tree_root(tree_root, &location);
2259 if (IS_ERR(root)) {
2260 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2261 ret = PTR_ERR(root);
2262 goto out;
2263 }
2264 } else {
2265 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2266 fs_info->block_group_root = root;
2267 }
2268 }
2269
2270 location.objectid = BTRFS_DEV_TREE_OBJECTID;
2271 root = btrfs_read_tree_root(tree_root, &location);
2272 if (IS_ERR(root)) {
2273 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2274 ret = PTR_ERR(root);
2275 goto out;
2276 }
2277 } else {
2278 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2279 fs_info->dev_root = root;
2280 }
2281 /* Initialize fs_info for all devices in any case */
2282 ret = btrfs_init_devices_late(fs_info);
2283 if (ret)
2284 goto out;
2285
2286 /*
2287 * This tree can share blocks with some other fs tree during relocation
2288 * and we need a proper setup by btrfs_get_fs_root
2289 */
2290 root = btrfs_get_fs_root(tree_root->fs_info,
2291 BTRFS_DATA_RELOC_TREE_OBJECTID, true);
2292 if (IS_ERR(root)) {
2293 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2294 ret = PTR_ERR(root);
2295 goto out;
2296 }
2297 } else {
2298 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2299 fs_info->data_reloc_root = root;
2300 }
2301
2302 location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2303 root = btrfs_read_tree_root(tree_root, &location);
2304 if (!IS_ERR(root)) {
2305 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2306 fs_info->quota_root = root;
2307 }
2308
2309 location.objectid = BTRFS_UUID_TREE_OBJECTID;
2310 root = btrfs_read_tree_root(tree_root, &location);
2311 if (IS_ERR(root)) {
2312 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2313 ret = PTR_ERR(root);
2314 if (ret != -ENOENT)
2315 goto out;
2316 }
2317 } else {
2318 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2319 fs_info->uuid_root = root;
2320 }
2321
2322 if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
2323 location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
2324 root = btrfs_read_tree_root(tree_root, &location);
2325 if (IS_ERR(root)) {
2326 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2327 ret = PTR_ERR(root);
2328 goto out;
2329 }
2330 } else {
2331 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2332 fs_info->stripe_root = root;
2333 }
2334 }
2335
2336 return 0;
2337out:
2338 btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2339 location.objectid, ret);
2340 return ret;
2341}
2342
2343/*
2344 * Real super block validation
2345 * NOTE: super csum type and incompat features will not be checked here.
2346 *
2347 * @sb: super block to check
2348 * @mirror_num: the super block number to check its bytenr:
2349 * 0 the primary (1st) sb
2350 * 1, 2 2nd and 3rd backup copy
2351 * -1 skip bytenr check
2352 */
2353int btrfs_validate_super(struct btrfs_fs_info *fs_info,
2354 struct btrfs_super_block *sb, int mirror_num)
2355{
2356 u64 nodesize = btrfs_super_nodesize(sb);
2357 u64 sectorsize = btrfs_super_sectorsize(sb);
2358 int ret = 0;
2359
2360 if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2361 btrfs_err(fs_info, "no valid FS found");
2362 ret = -EINVAL;
2363 }
2364 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2365 btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2366 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2367 ret = -EINVAL;
2368 }
2369 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2370 btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2371 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2372 ret = -EINVAL;
2373 }
2374 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2375 btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2376 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2377 ret = -EINVAL;
2378 }
2379 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2380 btrfs_err(fs_info, "log_root level too big: %d >= %d",
2381 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2382 ret = -EINVAL;
2383 }
2384
2385 /*
2386 * Check sectorsize and nodesize first, other check will need it.
2387 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2388 */
2389 if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2390 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2391 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2392 ret = -EINVAL;
2393 }
2394
2395 /*
2396 * We only support at most two sectorsizes: 4K and PAGE_SIZE.
2397 *
2398 * We can support 16K sectorsize with 64K page size without problem,
2399 * but such sectorsize/pagesize combination doesn't make much sense.
2400 * 4K will be our future standard, PAGE_SIZE is supported from the very
2401 * beginning.
2402 */
2403 if (sectorsize > PAGE_SIZE || (sectorsize != SZ_4K && sectorsize != PAGE_SIZE)) {
2404 btrfs_err(fs_info,
2405 "sectorsize %llu not yet supported for page size %lu",
2406 sectorsize, PAGE_SIZE);
2407 ret = -EINVAL;
2408 }
2409
2410 if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2411 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2412 btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2413 ret = -EINVAL;
2414 }
2415 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2416 btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2417 le32_to_cpu(sb->__unused_leafsize), nodesize);
2418 ret = -EINVAL;
2419 }
2420
2421 /* Root alignment check */
2422 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2423 btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2424 btrfs_super_root(sb));
2425 ret = -EINVAL;
2426 }
2427 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2428 btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2429 btrfs_super_chunk_root(sb));
2430 ret = -EINVAL;
2431 }
2432 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2433 btrfs_warn(fs_info, "log_root block unaligned: %llu",
2434 btrfs_super_log_root(sb));
2435 ret = -EINVAL;
2436 }
2437
2438 if (!fs_info->fs_devices->temp_fsid &&
2439 memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
2440 btrfs_err(fs_info,
2441 "superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
2442 sb->fsid, fs_info->fs_devices->fsid);
2443 ret = -EINVAL;
2444 }
2445
2446 if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
2447 BTRFS_FSID_SIZE) != 0) {
2448 btrfs_err(fs_info,
2449"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
2450 btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
2451 ret = -EINVAL;
2452 }
2453
2454 if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2455 BTRFS_FSID_SIZE) != 0) {
2456 btrfs_err(fs_info,
2457 "dev_item UUID does not match metadata fsid: %pU != %pU",
2458 fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2459 ret = -EINVAL;
2460 }
2461
2462 /*
2463 * Artificial requirement for block-group-tree to force newer features
2464 * (free-space-tree, no-holes) so the test matrix is smaller.
2465 */
2466 if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
2467 (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) ||
2468 !btrfs_fs_incompat(fs_info, NO_HOLES))) {
2469 btrfs_err(fs_info,
2470 "block-group-tree feature requires fres-space-tree and no-holes");
2471 ret = -EINVAL;
2472 }
2473
2474 /*
2475 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2476 * done later
2477 */
2478 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2479 btrfs_err(fs_info, "bytes_used is too small %llu",
2480 btrfs_super_bytes_used(sb));
2481 ret = -EINVAL;
2482 }
2483 if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2484 btrfs_err(fs_info, "invalid stripesize %u",
2485 btrfs_super_stripesize(sb));
2486 ret = -EINVAL;
2487 }
2488 if (btrfs_super_num_devices(sb) > (1UL << 31))
2489 btrfs_warn(fs_info, "suspicious number of devices: %llu",
2490 btrfs_super_num_devices(sb));
2491 if (btrfs_super_num_devices(sb) == 0) {
2492 btrfs_err(fs_info, "number of devices is 0");
2493 ret = -EINVAL;
2494 }
2495
2496 if (mirror_num >= 0 &&
2497 btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2498 btrfs_err(fs_info, "super offset mismatch %llu != %u",
2499 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2500 ret = -EINVAL;
2501 }
2502
2503 /*
2504 * Obvious sys_chunk_array corruptions, it must hold at least one key
2505 * and one chunk
2506 */
2507 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2508 btrfs_err(fs_info, "system chunk array too big %u > %u",
2509 btrfs_super_sys_array_size(sb),
2510 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2511 ret = -EINVAL;
2512 }
2513 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2514 + sizeof(struct btrfs_chunk)) {
2515 btrfs_err(fs_info, "system chunk array too small %u < %zu",
2516 btrfs_super_sys_array_size(sb),
2517 sizeof(struct btrfs_disk_key)
2518 + sizeof(struct btrfs_chunk));
2519 ret = -EINVAL;
2520 }
2521
2522 /*
2523 * The generation is a global counter, we'll trust it more than the others
2524 * but it's still possible that it's the one that's wrong.
2525 */
2526 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2527 btrfs_warn(fs_info,
2528 "suspicious: generation < chunk_root_generation: %llu < %llu",
2529 btrfs_super_generation(sb),
2530 btrfs_super_chunk_root_generation(sb));
2531 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2532 && btrfs_super_cache_generation(sb) != (u64)-1)
2533 btrfs_warn(fs_info,
2534 "suspicious: generation < cache_generation: %llu < %llu",
2535 btrfs_super_generation(sb),
2536 btrfs_super_cache_generation(sb));
2537
2538 return ret;
2539}
2540
2541/*
2542 * Validation of super block at mount time.
2543 * Some checks already done early at mount time, like csum type and incompat
2544 * flags will be skipped.
2545 */
2546static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2547{
2548 return btrfs_validate_super(fs_info, fs_info->super_copy, 0);
2549}
2550
2551/*
2552 * Validation of super block at write time.
2553 * Some checks like bytenr check will be skipped as their values will be
2554 * overwritten soon.
2555 * Extra checks like csum type and incompat flags will be done here.
2556 */
2557static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2558 struct btrfs_super_block *sb)
2559{
2560 int ret;
2561
2562 ret = btrfs_validate_super(fs_info, sb, -1);
2563 if (ret < 0)
2564 goto out;
2565 if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) {
2566 ret = -EUCLEAN;
2567 btrfs_err(fs_info, "invalid csum type, has %u want %u",
2568 btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2569 goto out;
2570 }
2571 if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2572 ret = -EUCLEAN;
2573 btrfs_err(fs_info,
2574 "invalid incompat flags, has 0x%llx valid mask 0x%llx",
2575 btrfs_super_incompat_flags(sb),
2576 (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2577 goto out;
2578 }
2579out:
2580 if (ret < 0)
2581 btrfs_err(fs_info,
2582 "super block corruption detected before writing it to disk");
2583 return ret;
2584}
2585
2586static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int level)
2587{
2588 struct btrfs_tree_parent_check check = {
2589 .level = level,
2590 .transid = gen,
2591 .owner_root = btrfs_root_id(root)
2592 };
2593 int ret = 0;
2594
2595 root->node = read_tree_block(root->fs_info, bytenr, &check);
2596 if (IS_ERR(root->node)) {
2597 ret = PTR_ERR(root->node);
2598 root->node = NULL;
2599 return ret;
2600 }
2601 if (!extent_buffer_uptodate(root->node)) {
2602 free_extent_buffer(root->node);
2603 root->node = NULL;
2604 return -EIO;
2605 }
2606
2607 btrfs_set_root_node(&root->root_item, root->node);
2608 root->commit_root = btrfs_root_node(root);
2609 btrfs_set_root_refs(&root->root_item, 1);
2610 return ret;
2611}
2612
2613static int load_important_roots(struct btrfs_fs_info *fs_info)
2614{
2615 struct btrfs_super_block *sb = fs_info->super_copy;
2616 u64 gen, bytenr;
2617 int level, ret;
2618
2619 bytenr = btrfs_super_root(sb);
2620 gen = btrfs_super_generation(sb);
2621 level = btrfs_super_root_level(sb);
2622 ret = load_super_root(fs_info->tree_root, bytenr, gen, level);
2623 if (ret) {
2624 btrfs_warn(fs_info, "couldn't read tree root");
2625 return ret;
2626 }
2627 return 0;
2628}
2629
2630static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
2631{
2632 int backup_index = find_newest_super_backup(fs_info);
2633 struct btrfs_super_block *sb = fs_info->super_copy;
2634 struct btrfs_root *tree_root = fs_info->tree_root;
2635 bool handle_error = false;
2636 int ret = 0;
2637 int i;
2638
2639 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2640 if (handle_error) {
2641 if (!IS_ERR(tree_root->node))
2642 free_extent_buffer(tree_root->node);
2643 tree_root->node = NULL;
2644
2645 if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
2646 break;
2647
2648 free_root_pointers(fs_info, 0);
2649
2650 /*
2651 * Don't use the log in recovery mode, it won't be
2652 * valid
2653 */
2654 btrfs_set_super_log_root(sb, 0);
2655
2656 btrfs_warn(fs_info, "try to load backup roots slot %d", i);
2657 ret = read_backup_root(fs_info, i);
2658 backup_index = ret;
2659 if (ret < 0)
2660 return ret;
2661 }
2662
2663 ret = load_important_roots(fs_info);
2664 if (ret) {
2665 handle_error = true;
2666 continue;
2667 }
2668
2669 /*
2670 * No need to hold btrfs_root::objectid_mutex since the fs
2671 * hasn't been fully initialised and we are the only user
2672 */
2673 ret = btrfs_init_root_free_objectid(tree_root);
2674 if (ret < 0) {
2675 handle_error = true;
2676 continue;
2677 }
2678
2679 ASSERT(tree_root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
2680
2681 ret = btrfs_read_roots(fs_info);
2682 if (ret < 0) {
2683 handle_error = true;
2684 continue;
2685 }
2686
2687 /* All successful */
2688 fs_info->generation = btrfs_header_generation(tree_root->node);
2689 btrfs_set_last_trans_committed(fs_info, fs_info->generation);
2690 fs_info->last_reloc_trans = 0;
2691
2692 /* Always begin writing backup roots after the one being used */
2693 if (backup_index < 0) {
2694 fs_info->backup_root_index = 0;
2695 } else {
2696 fs_info->backup_root_index = backup_index + 1;
2697 fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
2698 }
2699 break;
2700 }
2701
2702 return ret;
2703}
2704
2705void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
2706{
2707 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2708 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2709 INIT_LIST_HEAD(&fs_info->trans_list);
2710 INIT_LIST_HEAD(&fs_info->dead_roots);
2711 INIT_LIST_HEAD(&fs_info->delayed_iputs);
2712 INIT_LIST_HEAD(&fs_info->delalloc_roots);
2713 INIT_LIST_HEAD(&fs_info->caching_block_groups);
2714 spin_lock_init(&fs_info->delalloc_root_lock);
2715 spin_lock_init(&fs_info->trans_lock);
2716 spin_lock_init(&fs_info->fs_roots_radix_lock);
2717 spin_lock_init(&fs_info->delayed_iput_lock);
2718 spin_lock_init(&fs_info->defrag_inodes_lock);
2719 spin_lock_init(&fs_info->super_lock);
2720 spin_lock_init(&fs_info->buffer_lock);
2721 spin_lock_init(&fs_info->unused_bgs_lock);
2722 spin_lock_init(&fs_info->treelog_bg_lock);
2723 spin_lock_init(&fs_info->zone_active_bgs_lock);
2724 spin_lock_init(&fs_info->relocation_bg_lock);
2725 rwlock_init(&fs_info->tree_mod_log_lock);
2726 rwlock_init(&fs_info->global_root_lock);
2727 mutex_init(&fs_info->unused_bg_unpin_mutex);
2728 mutex_init(&fs_info->reclaim_bgs_lock);
2729 mutex_init(&fs_info->reloc_mutex);
2730 mutex_init(&fs_info->delalloc_root_mutex);
2731 mutex_init(&fs_info->zoned_meta_io_lock);
2732 mutex_init(&fs_info->zoned_data_reloc_io_lock);
2733 seqlock_init(&fs_info->profiles_lock);
2734
2735 btrfs_lockdep_init_map(fs_info, btrfs_trans_num_writers);
2736 btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
2737 btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
2738 btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
2739 btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
2740 BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2741 btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
2742 BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2743 btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
2744 BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2745 btrfs_state_lockdep_init_map(fs_info, btrfs_trans_completed,
2746 BTRFS_LOCKDEP_TRANS_COMPLETED);
2747
2748 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2749 INIT_LIST_HEAD(&fs_info->space_info);
2750 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2751 INIT_LIST_HEAD(&fs_info->unused_bgs);
2752 INIT_LIST_HEAD(&fs_info->reclaim_bgs);
2753 INIT_LIST_HEAD(&fs_info->zone_active_bgs);
2754#ifdef CONFIG_BTRFS_DEBUG
2755 INIT_LIST_HEAD(&fs_info->allocated_roots);
2756 INIT_LIST_HEAD(&fs_info->allocated_ebs);
2757 spin_lock_init(&fs_info->eb_leak_lock);
2758#endif
2759 fs_info->mapping_tree = RB_ROOT_CACHED;
2760 rwlock_init(&fs_info->mapping_tree_lock);
2761 btrfs_init_block_rsv(&fs_info->global_block_rsv,
2762 BTRFS_BLOCK_RSV_GLOBAL);
2763 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2764 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2765 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2766 btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2767 BTRFS_BLOCK_RSV_DELOPS);
2768 btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2769 BTRFS_BLOCK_RSV_DELREFS);
2770
2771 atomic_set(&fs_info->async_delalloc_pages, 0);
2772 atomic_set(&fs_info->defrag_running, 0);
2773 atomic_set(&fs_info->nr_delayed_iputs, 0);
2774 atomic64_set(&fs_info->tree_mod_seq, 0);
2775 fs_info->global_root_tree = RB_ROOT;
2776 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2777 fs_info->metadata_ratio = 0;
2778 fs_info->defrag_inodes = RB_ROOT;
2779 atomic64_set(&fs_info->free_chunk_space, 0);
2780 fs_info->tree_mod_log = RB_ROOT;
2781 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2782 btrfs_init_ref_verify(fs_info);
2783
2784 fs_info->thread_pool_size = min_t(unsigned long,
2785 num_online_cpus() + 2, 8);
2786
2787 INIT_LIST_HEAD(&fs_info->ordered_roots);
2788 spin_lock_init(&fs_info->ordered_root_lock);
2789
2790 btrfs_init_scrub(fs_info);
2791 btrfs_init_balance(fs_info);
2792 btrfs_init_async_reclaim_work(fs_info);
2793
2794 rwlock_init(&fs_info->block_group_cache_lock);
2795 fs_info->block_group_cache_tree = RB_ROOT_CACHED;
2796
2797 extent_io_tree_init(fs_info, &fs_info->excluded_extents,
2798 IO_TREE_FS_EXCLUDED_EXTENTS);
2799
2800 mutex_init(&fs_info->ordered_operations_mutex);
2801 mutex_init(&fs_info->tree_log_mutex);
2802 mutex_init(&fs_info->chunk_mutex);
2803 mutex_init(&fs_info->transaction_kthread_mutex);
2804 mutex_init(&fs_info->cleaner_mutex);
2805 mutex_init(&fs_info->ro_block_group_mutex);
2806 init_rwsem(&fs_info->commit_root_sem);
2807 init_rwsem(&fs_info->cleanup_work_sem);
2808 init_rwsem(&fs_info->subvol_sem);
2809 sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2810
2811 btrfs_init_dev_replace_locks(fs_info);
2812 btrfs_init_qgroup(fs_info);
2813 btrfs_discard_init(fs_info);
2814
2815 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2816 btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2817
2818 init_waitqueue_head(&fs_info->transaction_throttle);
2819 init_waitqueue_head(&fs_info->transaction_wait);
2820 init_waitqueue_head(&fs_info->transaction_blocked_wait);
2821 init_waitqueue_head(&fs_info->async_submit_wait);
2822 init_waitqueue_head(&fs_info->delayed_iputs_wait);
2823
2824 /* Usable values until the real ones are cached from the superblock */
2825 fs_info->nodesize = 4096;
2826 fs_info->sectorsize = 4096;
2827 fs_info->sectorsize_bits = ilog2(4096);
2828 fs_info->stripesize = 4096;
2829
2830 /* Default compress algorithm when user does -o compress */
2831 fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2832
2833 fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE;
2834
2835 spin_lock_init(&fs_info->swapfile_pins_lock);
2836 fs_info->swapfile_pins = RB_ROOT;
2837
2838 fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH;
2839 INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work);
2840}
2841
2842static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb)
2843{
2844 int ret;
2845
2846 fs_info->sb = sb;
2847 /* Temporary fixed values for block size until we read the superblock. */
2848 sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2849 sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2850
2851 ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL);
2852 if (ret)
2853 return ret;
2854
2855 ret = percpu_counter_init(&fs_info->evictable_extent_maps, 0, GFP_KERNEL);
2856 if (ret)
2857 return ret;
2858
2859 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2860 if (ret)
2861 return ret;
2862
2863 fs_info->dirty_metadata_batch = PAGE_SIZE *
2864 (1 + ilog2(nr_cpu_ids));
2865
2866 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2867 if (ret)
2868 return ret;
2869
2870 ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2871 GFP_KERNEL);
2872 if (ret)
2873 return ret;
2874
2875 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2876 GFP_KERNEL);
2877 if (!fs_info->delayed_root)
2878 return -ENOMEM;
2879 btrfs_init_delayed_root(fs_info->delayed_root);
2880
2881 if (sb_rdonly(sb))
2882 set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
2883
2884 return btrfs_alloc_stripe_hash_table(fs_info);
2885}
2886
2887static int btrfs_uuid_rescan_kthread(void *data)
2888{
2889 struct btrfs_fs_info *fs_info = data;
2890 int ret;
2891
2892 /*
2893 * 1st step is to iterate through the existing UUID tree and
2894 * to delete all entries that contain outdated data.
2895 * 2nd step is to add all missing entries to the UUID tree.
2896 */
2897 ret = btrfs_uuid_tree_iterate(fs_info);
2898 if (ret < 0) {
2899 if (ret != -EINTR)
2900 btrfs_warn(fs_info, "iterating uuid_tree failed %d",
2901 ret);
2902 up(&fs_info->uuid_tree_rescan_sem);
2903 return ret;
2904 }
2905 return btrfs_uuid_scan_kthread(data);
2906}
2907
2908static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
2909{
2910 struct task_struct *task;
2911
2912 down(&fs_info->uuid_tree_rescan_sem);
2913 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
2914 if (IS_ERR(task)) {
2915 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
2916 btrfs_warn(fs_info, "failed to start uuid_rescan task");
2917 up(&fs_info->uuid_tree_rescan_sem);
2918 return PTR_ERR(task);
2919 }
2920
2921 return 0;
2922}
2923
2924static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2925{
2926 u64 root_objectid = 0;
2927 struct btrfs_root *gang[8];
2928 int i = 0;
2929 int err = 0;
2930 unsigned int ret = 0;
2931
2932 while (1) {
2933 spin_lock(&fs_info->fs_roots_radix_lock);
2934 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2935 (void **)gang, root_objectid,
2936 ARRAY_SIZE(gang));
2937 if (!ret) {
2938 spin_unlock(&fs_info->fs_roots_radix_lock);
2939 break;
2940 }
2941 root_objectid = btrfs_root_id(gang[ret - 1]) + 1;
2942
2943 for (i = 0; i < ret; i++) {
2944 /* Avoid to grab roots in dead_roots. */
2945 if (btrfs_root_refs(&gang[i]->root_item) == 0) {
2946 gang[i] = NULL;
2947 continue;
2948 }
2949 /* Grab all the search result for later use. */
2950 gang[i] = btrfs_grab_root(gang[i]);
2951 }
2952 spin_unlock(&fs_info->fs_roots_radix_lock);
2953
2954 for (i = 0; i < ret; i++) {
2955 if (!gang[i])
2956 continue;
2957 root_objectid = btrfs_root_id(gang[i]);
2958 err = btrfs_orphan_cleanup(gang[i]);
2959 if (err)
2960 goto out;
2961 btrfs_put_root(gang[i]);
2962 }
2963 root_objectid++;
2964 }
2965out:
2966 /* Release the uncleaned roots due to error. */
2967 for (; i < ret; i++) {
2968 if (gang[i])
2969 btrfs_put_root(gang[i]);
2970 }
2971 return err;
2972}
2973
2974/*
2975 * Mounting logic specific to read-write file systems. Shared by open_ctree
2976 * and btrfs_remount when remounting from read-only to read-write.
2977 */
2978int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
2979{
2980 int ret;
2981 const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
2982 bool rebuild_free_space_tree = false;
2983
2984 if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
2985 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2986 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
2987 btrfs_warn(fs_info,
2988 "'clear_cache' option is ignored with extent tree v2");
2989 else
2990 rebuild_free_space_tree = true;
2991 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
2992 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
2993 btrfs_warn(fs_info, "free space tree is invalid");
2994 rebuild_free_space_tree = true;
2995 }
2996
2997 if (rebuild_free_space_tree) {
2998 btrfs_info(fs_info, "rebuilding free space tree");
2999 ret = btrfs_rebuild_free_space_tree(fs_info);
3000 if (ret) {
3001 btrfs_warn(fs_info,
3002 "failed to rebuild free space tree: %d", ret);
3003 goto out;
3004 }
3005 }
3006
3007 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3008 !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
3009 btrfs_info(fs_info, "disabling free space tree");
3010 ret = btrfs_delete_free_space_tree(fs_info);
3011 if (ret) {
3012 btrfs_warn(fs_info,
3013 "failed to disable free space tree: %d", ret);
3014 goto out;
3015 }
3016 }
3017
3018 /*
3019 * btrfs_find_orphan_roots() is responsible for finding all the dead
3020 * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
3021 * them into the fs_info->fs_roots_radix tree. This must be done before
3022 * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
3023 * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
3024 * item before the root's tree is deleted - this means that if we unmount
3025 * or crash before the deletion completes, on the next mount we will not
3026 * delete what remains of the tree because the orphan item does not
3027 * exists anymore, which is what tells us we have a pending deletion.
3028 */
3029 ret = btrfs_find_orphan_roots(fs_info);
3030 if (ret)
3031 goto out;
3032
3033 ret = btrfs_cleanup_fs_roots(fs_info);
3034 if (ret)
3035 goto out;
3036
3037 down_read(&fs_info->cleanup_work_sem);
3038 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3039 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3040 up_read(&fs_info->cleanup_work_sem);
3041 goto out;
3042 }
3043 up_read(&fs_info->cleanup_work_sem);
3044
3045 mutex_lock(&fs_info->cleaner_mutex);
3046 ret = btrfs_recover_relocation(fs_info);
3047 mutex_unlock(&fs_info->cleaner_mutex);
3048 if (ret < 0) {
3049 btrfs_warn(fs_info, "failed to recover relocation: %d", ret);
3050 goto out;
3051 }
3052
3053 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3054 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3055 btrfs_info(fs_info, "creating free space tree");
3056 ret = btrfs_create_free_space_tree(fs_info);
3057 if (ret) {
3058 btrfs_warn(fs_info,
3059 "failed to create free space tree: %d", ret);
3060 goto out;
3061 }
3062 }
3063
3064 if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) {
3065 ret = btrfs_set_free_space_cache_v1_active(fs_info, cache_opt);
3066 if (ret)
3067 goto out;
3068 }
3069
3070 ret = btrfs_resume_balance_async(fs_info);
3071 if (ret)
3072 goto out;
3073
3074 ret = btrfs_resume_dev_replace_async(fs_info);
3075 if (ret) {
3076 btrfs_warn(fs_info, "failed to resume dev_replace");
3077 goto out;
3078 }
3079
3080 btrfs_qgroup_rescan_resume(fs_info);
3081
3082 if (!fs_info->uuid_root) {
3083 btrfs_info(fs_info, "creating UUID tree");
3084 ret = btrfs_create_uuid_tree(fs_info);
3085 if (ret) {
3086 btrfs_warn(fs_info,
3087 "failed to create the UUID tree %d", ret);
3088 goto out;
3089 }
3090 }
3091
3092out:
3093 return ret;
3094}
3095
3096/*
3097 * Do various sanity and dependency checks of different features.
3098 *
3099 * @is_rw_mount: If the mount is read-write.
3100 *
3101 * This is the place for less strict checks (like for subpage or artificial
3102 * feature dependencies).
3103 *
3104 * For strict checks or possible corruption detection, see
3105 * btrfs_validate_super().
3106 *
3107 * This should be called after btrfs_parse_options(), as some mount options
3108 * (space cache related) can modify on-disk format like free space tree and
3109 * screw up certain feature dependencies.
3110 */
3111int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
3112{
3113 struct btrfs_super_block *disk_super = fs_info->super_copy;
3114 u64 incompat = btrfs_super_incompat_flags(disk_super);
3115 const u64 compat_ro = btrfs_super_compat_ro_flags(disk_super);
3116 const u64 compat_ro_unsupp = (compat_ro & ~BTRFS_FEATURE_COMPAT_RO_SUPP);
3117
3118 if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
3119 btrfs_err(fs_info,
3120 "cannot mount because of unknown incompat features (0x%llx)",
3121 incompat);
3122 return -EINVAL;
3123 }
3124
3125 /* Runtime limitation for mixed block groups. */
3126 if ((incompat & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
3127 (fs_info->sectorsize != fs_info->nodesize)) {
3128 btrfs_err(fs_info,
3129"unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
3130 fs_info->nodesize, fs_info->sectorsize);
3131 return -EINVAL;
3132 }
3133
3134 /* Mixed backref is an always-enabled feature. */
3135 incompat |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
3136
3137 /* Set compression related flags just in case. */
3138 if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
3139 incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
3140 else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
3141 incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
3142
3143 /*
3144 * An ancient flag, which should really be marked deprecated.
3145 * Such runtime limitation doesn't really need a incompat flag.
3146 */
3147 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
3148 incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
3149
3150 if (compat_ro_unsupp && is_rw_mount) {
3151 btrfs_err(fs_info,
3152 "cannot mount read-write because of unknown compat_ro features (0x%llx)",
3153 compat_ro);
3154 return -EINVAL;
3155 }
3156
3157 /*
3158 * We have unsupported RO compat features, although RO mounted, we
3159 * should not cause any metadata writes, including log replay.
3160 * Or we could screw up whatever the new feature requires.
3161 */
3162 if (compat_ro_unsupp && btrfs_super_log_root(disk_super) &&
3163 !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3164 btrfs_err(fs_info,
3165"cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
3166 compat_ro);
3167 return -EINVAL;
3168 }
3169
3170 /*
3171 * Artificial limitations for block group tree, to force
3172 * block-group-tree to rely on no-holes and free-space-tree.
3173 */
3174 if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
3175 (!btrfs_fs_incompat(fs_info, NO_HOLES) ||
3176 !btrfs_test_opt(fs_info, FREE_SPACE_TREE))) {
3177 btrfs_err(fs_info,
3178"block-group-tree feature requires no-holes and free-space-tree features");
3179 return -EINVAL;
3180 }
3181
3182 /*
3183 * Subpage runtime limitation on v1 cache.
3184 *
3185 * V1 space cache still has some hard codeed PAGE_SIZE usage, while
3186 * we're already defaulting to v2 cache, no need to bother v1 as it's
3187 * going to be deprecated anyway.
3188 */
3189 if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
3190 btrfs_warn(fs_info,
3191 "v1 space cache is not supported for page size %lu with sectorsize %u",
3192 PAGE_SIZE, fs_info->sectorsize);
3193 return -EINVAL;
3194 }
3195
3196 /* This can be called by remount, we need to protect the super block. */
3197 spin_lock(&fs_info->super_lock);
3198 btrfs_set_super_incompat_flags(disk_super, incompat);
3199 spin_unlock(&fs_info->super_lock);
3200
3201 return 0;
3202}
3203
3204int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
3205 char *options)
3206{
3207 u32 sectorsize;
3208 u32 nodesize;
3209 u32 stripesize;
3210 u64 generation;
3211 u16 csum_type;
3212 struct btrfs_super_block *disk_super;
3213 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
3214 struct btrfs_root *tree_root;
3215 struct btrfs_root *chunk_root;
3216 int ret;
3217 int level;
3218
3219 ret = init_mount_fs_info(fs_info, sb);
3220 if (ret)
3221 goto fail;
3222
3223 /* These need to be init'ed before we start creating inodes and such. */
3224 tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
3225 GFP_KERNEL);
3226 fs_info->tree_root = tree_root;
3227 chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
3228 GFP_KERNEL);
3229 fs_info->chunk_root = chunk_root;
3230 if (!tree_root || !chunk_root) {
3231 ret = -ENOMEM;
3232 goto fail;
3233 }
3234
3235 ret = btrfs_init_btree_inode(sb);
3236 if (ret)
3237 goto fail;
3238
3239 invalidate_bdev(fs_devices->latest_dev->bdev);
3240
3241 /*
3242 * Read super block and check the signature bytes only
3243 */
3244 disk_super = btrfs_read_dev_super(fs_devices->latest_dev->bdev);
3245 if (IS_ERR(disk_super)) {
3246 ret = PTR_ERR(disk_super);
3247 goto fail_alloc;
3248 }
3249
3250 btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
3251 /*
3252 * Verify the type first, if that or the checksum value are
3253 * corrupted, we'll find out
3254 */
3255 csum_type = btrfs_super_csum_type(disk_super);
3256 if (!btrfs_supported_super_csum(csum_type)) {
3257 btrfs_err(fs_info, "unsupported checksum algorithm: %u",
3258 csum_type);
3259 ret = -EINVAL;
3260 btrfs_release_disk_super(disk_super);
3261 goto fail_alloc;
3262 }
3263
3264 fs_info->csum_size = btrfs_super_csum_size(disk_super);
3265
3266 ret = btrfs_init_csum_hash(fs_info, csum_type);
3267 if (ret) {
3268 btrfs_release_disk_super(disk_super);
3269 goto fail_alloc;
3270 }
3271
3272 /*
3273 * We want to check superblock checksum, the type is stored inside.
3274 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
3275 */
3276 if (btrfs_check_super_csum(fs_info, disk_super)) {
3277 btrfs_err(fs_info, "superblock checksum mismatch");
3278 ret = -EINVAL;
3279 btrfs_release_disk_super(disk_super);
3280 goto fail_alloc;
3281 }
3282
3283 /*
3284 * super_copy is zeroed at allocation time and we never touch the
3285 * following bytes up to INFO_SIZE, the checksum is calculated from
3286 * the whole block of INFO_SIZE
3287 */
3288 memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy));
3289 btrfs_release_disk_super(disk_super);
3290
3291 disk_super = fs_info->super_copy;
3292
3293 memcpy(fs_info->super_for_commit, fs_info->super_copy,
3294 sizeof(*fs_info->super_for_commit));
3295
3296 ret = btrfs_validate_mount_super(fs_info);
3297 if (ret) {
3298 btrfs_err(fs_info, "superblock contains fatal errors");
3299 ret = -EINVAL;
3300 goto fail_alloc;
3301 }
3302
3303 if (!btrfs_super_root(disk_super)) {
3304 btrfs_err(fs_info, "invalid superblock tree root bytenr");
3305 ret = -EINVAL;
3306 goto fail_alloc;
3307 }
3308
3309 /* check FS state, whether FS is broken. */
3310 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
3311 WRITE_ONCE(fs_info->fs_error, -EUCLEAN);
3312
3313 /* Set up fs_info before parsing mount options */
3314 nodesize = btrfs_super_nodesize(disk_super);
3315 sectorsize = btrfs_super_sectorsize(disk_super);
3316 stripesize = sectorsize;
3317 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
3318 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
3319
3320 fs_info->nodesize = nodesize;
3321 fs_info->sectorsize = sectorsize;
3322 fs_info->sectorsize_bits = ilog2(sectorsize);
3323 fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
3324 fs_info->stripesize = stripesize;
3325
3326 /*
3327 * Handle the space caching options appropriately now that we have the
3328 * super block loaded and validated.
3329 */
3330 btrfs_set_free_space_cache_settings(fs_info);
3331
3332 if (!btrfs_check_options(fs_info, &fs_info->mount_opt, sb->s_flags)) {
3333 ret = -EINVAL;
3334 goto fail_alloc;
3335 }
3336
3337 ret = btrfs_check_features(fs_info, !sb_rdonly(sb));
3338 if (ret < 0)
3339 goto fail_alloc;
3340
3341 /*
3342 * At this point our mount options are validated, if we set ->max_inline
3343 * to something non-standard make sure we truncate it to sectorsize.
3344 */
3345 fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize);
3346
3347 if (sectorsize < PAGE_SIZE) {
3348 struct btrfs_subpage_info *subpage_info;
3349
3350 btrfs_warn(fs_info,
3351 "read-write for sector size %u with page size %lu is experimental",
3352 sectorsize, PAGE_SIZE);
3353 subpage_info = kzalloc(sizeof(*subpage_info), GFP_KERNEL);
3354 if (!subpage_info) {
3355 ret = -ENOMEM;
3356 goto fail_alloc;
3357 }
3358 btrfs_init_subpage_info(subpage_info, sectorsize);
3359 fs_info->subpage_info = subpage_info;
3360 }
3361
3362 ret = btrfs_init_workqueues(fs_info);
3363 if (ret)
3364 goto fail_sb_buffer;
3365
3366 sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
3367 sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
3368
3369 /* Update the values for the current filesystem. */
3370 sb->s_blocksize = sectorsize;
3371 sb->s_blocksize_bits = blksize_bits(sectorsize);
3372 memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
3373
3374 mutex_lock(&fs_info->chunk_mutex);
3375 ret = btrfs_read_sys_array(fs_info);
3376 mutex_unlock(&fs_info->chunk_mutex);
3377 if (ret) {
3378 btrfs_err(fs_info, "failed to read the system array: %d", ret);
3379 goto fail_sb_buffer;
3380 }
3381
3382 generation = btrfs_super_chunk_root_generation(disk_super);
3383 level = btrfs_super_chunk_root_level(disk_super);
3384 ret = load_super_root(chunk_root, btrfs_super_chunk_root(disk_super),
3385 generation, level);
3386 if (ret) {
3387 btrfs_err(fs_info, "failed to read chunk root");
3388 goto fail_tree_roots;
3389 }
3390
3391 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
3392 offsetof(struct btrfs_header, chunk_tree_uuid),
3393 BTRFS_UUID_SIZE);
3394
3395 ret = btrfs_read_chunk_tree(fs_info);
3396 if (ret) {
3397 btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3398 goto fail_tree_roots;
3399 }
3400
3401 /*
3402 * At this point we know all the devices that make this filesystem,
3403 * including the seed devices but we don't know yet if the replace
3404 * target is required. So free devices that are not part of this
3405 * filesystem but skip the replace target device which is checked
3406 * below in btrfs_init_dev_replace().
3407 */
3408 btrfs_free_extra_devids(fs_devices);
3409 if (!fs_devices->latest_dev->bdev) {
3410 btrfs_err(fs_info, "failed to read devices");
3411 ret = -EIO;
3412 goto fail_tree_roots;
3413 }
3414
3415 ret = init_tree_roots(fs_info);
3416 if (ret)
3417 goto fail_tree_roots;
3418
3419 /*
3420 * Get zone type information of zoned block devices. This will also
3421 * handle emulation of a zoned filesystem if a regular device has the
3422 * zoned incompat feature flag set.
3423 */
3424 ret = btrfs_get_dev_zone_info_all_devices(fs_info);
3425 if (ret) {
3426 btrfs_err(fs_info,
3427 "zoned: failed to read device zone info: %d", ret);
3428 goto fail_block_groups;
3429 }
3430
3431 /*
3432 * If we have a uuid root and we're not being told to rescan we need to
3433 * check the generation here so we can set the
3434 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the
3435 * transaction during a balance or the log replay without updating the
3436 * uuid generation, and then if we crash we would rescan the uuid tree,
3437 * even though it was perfectly fine.
3438 */
3439 if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
3440 fs_info->generation == btrfs_super_uuid_tree_generation(disk_super))
3441 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3442
3443 ret = btrfs_verify_dev_extents(fs_info);
3444 if (ret) {
3445 btrfs_err(fs_info,
3446 "failed to verify dev extents against chunks: %d",
3447 ret);
3448 goto fail_block_groups;
3449 }
3450 ret = btrfs_recover_balance(fs_info);
3451 if (ret) {
3452 btrfs_err(fs_info, "failed to recover balance: %d", ret);
3453 goto fail_block_groups;
3454 }
3455
3456 ret = btrfs_init_dev_stats(fs_info);
3457 if (ret) {
3458 btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3459 goto fail_block_groups;
3460 }
3461
3462 ret = btrfs_init_dev_replace(fs_info);
3463 if (ret) {
3464 btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3465 goto fail_block_groups;
3466 }
3467
3468 ret = btrfs_check_zoned_mode(fs_info);
3469 if (ret) {
3470 btrfs_err(fs_info, "failed to initialize zoned mode: %d",
3471 ret);
3472 goto fail_block_groups;
3473 }
3474
3475 ret = btrfs_sysfs_add_fsid(fs_devices);
3476 if (ret) {
3477 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3478 ret);
3479 goto fail_block_groups;
3480 }
3481
3482 ret = btrfs_sysfs_add_mounted(fs_info);
3483 if (ret) {
3484 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3485 goto fail_fsdev_sysfs;
3486 }
3487
3488 ret = btrfs_init_space_info(fs_info);
3489 if (ret) {
3490 btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3491 goto fail_sysfs;
3492 }
3493
3494 ret = btrfs_read_block_groups(fs_info);
3495 if (ret) {
3496 btrfs_err(fs_info, "failed to read block groups: %d", ret);
3497 goto fail_sysfs;
3498 }
3499
3500 btrfs_free_zone_cache(fs_info);
3501
3502 btrfs_check_active_zone_reservation(fs_info);
3503
3504 if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&
3505 !btrfs_check_rw_degradable(fs_info, NULL)) {
3506 btrfs_warn(fs_info,
3507 "writable mount is not allowed due to too many missing devices");
3508 ret = -EINVAL;
3509 goto fail_sysfs;
3510 }
3511
3512 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info,
3513 "btrfs-cleaner");
3514 if (IS_ERR(fs_info->cleaner_kthread)) {
3515 ret = PTR_ERR(fs_info->cleaner_kthread);
3516 goto fail_sysfs;
3517 }
3518
3519 fs_info->transaction_kthread = kthread_run(transaction_kthread,
3520 tree_root,
3521 "btrfs-transaction");
3522 if (IS_ERR(fs_info->transaction_kthread)) {
3523 ret = PTR_ERR(fs_info->transaction_kthread);
3524 goto fail_cleaner;
3525 }
3526
3527 ret = btrfs_read_qgroup_config(fs_info);
3528 if (ret)
3529 goto fail_trans_kthread;
3530
3531 if (btrfs_build_ref_tree(fs_info))
3532 btrfs_err(fs_info, "couldn't build ref tree");
3533
3534 /* do not make disk changes in broken FS or nologreplay is given */
3535 if (btrfs_super_log_root(disk_super) != 0 &&
3536 !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3537 btrfs_info(fs_info, "start tree-log replay");
3538 ret = btrfs_replay_log(fs_info, fs_devices);
3539 if (ret)
3540 goto fail_qgroup;
3541 }
3542
3543 fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
3544 if (IS_ERR(fs_info->fs_root)) {
3545 ret = PTR_ERR(fs_info->fs_root);
3546 btrfs_warn(fs_info, "failed to read fs tree: %d", ret);
3547 fs_info->fs_root = NULL;
3548 goto fail_qgroup;
3549 }
3550
3551 if (sb_rdonly(sb))
3552 return 0;
3553
3554 ret = btrfs_start_pre_rw_mount(fs_info);
3555 if (ret) {
3556 close_ctree(fs_info);
3557 return ret;
3558 }
3559 btrfs_discard_resume(fs_info);
3560
3561 if (fs_info->uuid_root &&
3562 (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3563 fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
3564 btrfs_info(fs_info, "checking UUID tree");
3565 ret = btrfs_check_uuid_tree(fs_info);
3566 if (ret) {
3567 btrfs_warn(fs_info,
3568 "failed to check the UUID tree: %d", ret);
3569 close_ctree(fs_info);
3570 return ret;
3571 }
3572 }
3573
3574 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3575
3576 /* Kick the cleaner thread so it'll start deleting snapshots. */
3577 if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags))
3578 wake_up_process(fs_info->cleaner_kthread);
3579
3580 return 0;
3581
3582fail_qgroup:
3583 btrfs_free_qgroup_config(fs_info);
3584fail_trans_kthread:
3585 kthread_stop(fs_info->transaction_kthread);
3586 btrfs_cleanup_transaction(fs_info);
3587 btrfs_free_fs_roots(fs_info);
3588fail_cleaner:
3589 kthread_stop(fs_info->cleaner_kthread);
3590
3591 /*
3592 * make sure we're done with the btree inode before we stop our
3593 * kthreads
3594 */
3595 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3596
3597fail_sysfs:
3598 btrfs_sysfs_remove_mounted(fs_info);
3599
3600fail_fsdev_sysfs:
3601 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3602
3603fail_block_groups:
3604 btrfs_put_block_group_cache(fs_info);
3605
3606fail_tree_roots:
3607 if (fs_info->data_reloc_root)
3608 btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root);
3609 free_root_pointers(fs_info, true);
3610 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3611
3612fail_sb_buffer:
3613 btrfs_stop_all_workers(fs_info);
3614 btrfs_free_block_groups(fs_info);
3615fail_alloc:
3616 btrfs_mapping_tree_free(fs_info);
3617
3618 iput(fs_info->btree_inode);
3619fail:
3620 btrfs_close_devices(fs_info->fs_devices);
3621 ASSERT(ret < 0);
3622 return ret;
3623}
3624ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3625
3626static void btrfs_end_super_write(struct bio *bio)
3627{
3628 struct btrfs_device *device = bio->bi_private;
3629 struct folio_iter fi;
3630
3631 bio_for_each_folio_all(fi, bio) {
3632 if (bio->bi_status) {
3633 btrfs_warn_rl_in_rcu(device->fs_info,
3634 "lost super block write due to IO error on %s (%d)",
3635 btrfs_dev_name(device),
3636 blk_status_to_errno(bio->bi_status));
3637 btrfs_dev_stat_inc_and_print(device,
3638 BTRFS_DEV_STAT_WRITE_ERRS);
3639 /* Ensure failure if the primary sb fails. */
3640 if (bio->bi_opf & REQ_FUA)
3641 atomic_add(BTRFS_SUPER_PRIMARY_WRITE_ERROR,
3642 &device->sb_write_errors);
3643 else
3644 atomic_inc(&device->sb_write_errors);
3645 }
3646 folio_unlock(fi.folio);
3647 folio_put(fi.folio);
3648 }
3649
3650 bio_put(bio);
3651}
3652
3653struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
3654 int copy_num, bool drop_cache)
3655{
3656 struct btrfs_super_block *super;
3657 struct page *page;
3658 u64 bytenr, bytenr_orig;
3659 struct address_space *mapping = bdev->bd_mapping;
3660 int ret;
3661
3662 bytenr_orig = btrfs_sb_offset(copy_num);
3663 ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr);
3664 if (ret == -ENOENT)
3665 return ERR_PTR(-EINVAL);
3666 else if (ret)
3667 return ERR_PTR(ret);
3668
3669 if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev))
3670 return ERR_PTR(-EINVAL);
3671
3672 if (drop_cache) {
3673 /* This should only be called with the primary sb. */
3674 ASSERT(copy_num == 0);
3675
3676 /*
3677 * Drop the page of the primary superblock, so later read will
3678 * always read from the device.
3679 */
3680 invalidate_inode_pages2_range(mapping,
3681 bytenr >> PAGE_SHIFT,
3682 (bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT);
3683 }
3684
3685 page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
3686 if (IS_ERR(page))
3687 return ERR_CAST(page);
3688
3689 super = page_address(page);
3690 if (btrfs_super_magic(super) != BTRFS_MAGIC) {
3691 btrfs_release_disk_super(super);
3692 return ERR_PTR(-ENODATA);
3693 }
3694
3695 if (btrfs_super_bytenr(super) != bytenr_orig) {
3696 btrfs_release_disk_super(super);
3697 return ERR_PTR(-EINVAL);
3698 }
3699
3700 return super;
3701}
3702
3703
3704struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev)
3705{
3706 struct btrfs_super_block *super, *latest = NULL;
3707 int i;
3708 u64 transid = 0;
3709
3710 /* we would like to check all the supers, but that would make
3711 * a btrfs mount succeed after a mkfs from a different FS.
3712 * So, we need to add a special mount option to scan for
3713 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3714 */
3715 for (i = 0; i < 1; i++) {
3716 super = btrfs_read_dev_one_super(bdev, i, false);
3717 if (IS_ERR(super))
3718 continue;
3719
3720 if (!latest || btrfs_super_generation(super) > transid) {
3721 if (latest)
3722 btrfs_release_disk_super(super);
3723
3724 latest = super;
3725 transid = btrfs_super_generation(super);
3726 }
3727 }
3728
3729 return super;
3730}
3731
3732/*
3733 * Write superblock @sb to the @device. Do not wait for completion, all the
3734 * folios we use for writing are locked.
3735 *
3736 * Write @max_mirrors copies of the superblock, where 0 means default that fit
3737 * the expected device size at commit time. Note that max_mirrors must be
3738 * same for write and wait phases.
3739 *
3740 * Return number of errors when folio is not found or submission fails.
3741 */
3742static int write_dev_supers(struct btrfs_device *device,
3743 struct btrfs_super_block *sb, int max_mirrors)
3744{
3745 struct btrfs_fs_info *fs_info = device->fs_info;
3746 struct address_space *mapping = device->bdev->bd_mapping;
3747 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3748 int i;
3749 int ret;
3750 u64 bytenr, bytenr_orig;
3751
3752 atomic_set(&device->sb_write_errors, 0);
3753
3754 if (max_mirrors == 0)
3755 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3756
3757 shash->tfm = fs_info->csum_shash;
3758
3759 for (i = 0; i < max_mirrors; i++) {
3760 struct folio *folio;
3761 struct bio *bio;
3762 struct btrfs_super_block *disk_super;
3763 size_t offset;
3764
3765 bytenr_orig = btrfs_sb_offset(i);
3766 ret = btrfs_sb_log_location(device, i, WRITE, &bytenr);
3767 if (ret == -ENOENT) {
3768 continue;
3769 } else if (ret < 0) {
3770 btrfs_err(device->fs_info,
3771 "couldn't get super block location for mirror %d",
3772 i);
3773 atomic_inc(&device->sb_write_errors);
3774 continue;
3775 }
3776 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3777 device->commit_total_bytes)
3778 break;
3779
3780 btrfs_set_super_bytenr(sb, bytenr_orig);
3781
3782 crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE,
3783 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
3784 sb->csum);
3785
3786 folio = __filemap_get_folio(mapping, bytenr >> PAGE_SHIFT,
3787 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
3788 GFP_NOFS);
3789 if (IS_ERR(folio)) {
3790 btrfs_err(device->fs_info,
3791 "couldn't get super block page for bytenr %llu",
3792 bytenr);
3793 atomic_inc(&device->sb_write_errors);
3794 continue;
3795 }
3796 ASSERT(folio_order(folio) == 0);
3797
3798 offset = offset_in_folio(folio, bytenr);
3799 disk_super = folio_address(folio) + offset;
3800 memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
3801
3802 /*
3803 * Directly use bios here instead of relying on the page cache
3804 * to do I/O, so we don't lose the ability to do integrity
3805 * checking.
3806 */
3807 bio = bio_alloc(device->bdev, 1,
3808 REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO,
3809 GFP_NOFS);
3810 bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
3811 bio->bi_private = device;
3812 bio->bi_end_io = btrfs_end_super_write;
3813 bio_add_folio_nofail(bio, folio, BTRFS_SUPER_INFO_SIZE, offset);
3814
3815 /*
3816 * We FUA only the first super block. The others we allow to
3817 * go down lazy and there's a short window where the on-disk
3818 * copies might still contain the older version.
3819 */
3820 if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3821 bio->bi_opf |= REQ_FUA;
3822 submit_bio(bio);
3823
3824 if (btrfs_advance_sb_log(device, i))
3825 atomic_inc(&device->sb_write_errors);
3826 }
3827 return atomic_read(&device->sb_write_errors) < i ? 0 : -1;
3828}
3829
3830/*
3831 * Wait for write completion of superblocks done by write_dev_supers,
3832 * @max_mirrors same for write and wait phases.
3833 *
3834 * Return -1 if primary super block write failed or when there were no super block
3835 * copies written. Otherwise 0.
3836 */
3837static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3838{
3839 int i;
3840 int errors = 0;
3841 bool primary_failed = false;
3842 int ret;
3843 u64 bytenr;
3844
3845 if (max_mirrors == 0)
3846 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3847
3848 for (i = 0; i < max_mirrors; i++) {
3849 struct folio *folio;
3850
3851 ret = btrfs_sb_log_location(device, i, READ, &bytenr);
3852 if (ret == -ENOENT) {
3853 break;
3854 } else if (ret < 0) {
3855 errors++;
3856 if (i == 0)
3857 primary_failed = true;
3858 continue;
3859 }
3860 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3861 device->commit_total_bytes)
3862 break;
3863
3864 folio = filemap_get_folio(device->bdev->bd_mapping,
3865 bytenr >> PAGE_SHIFT);
3866 /* If the folio has been removed, then we know it completed. */
3867 if (IS_ERR(folio))
3868 continue;
3869 ASSERT(folio_order(folio) == 0);
3870
3871 /* Folio will be unlocked once the write completes. */
3872 folio_wait_locked(folio);
3873 folio_put(folio);
3874 }
3875
3876 errors += atomic_read(&device->sb_write_errors);
3877 if (errors >= BTRFS_SUPER_PRIMARY_WRITE_ERROR)
3878 primary_failed = true;
3879 if (primary_failed) {
3880 btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3881 device->devid);
3882 return -1;
3883 }
3884
3885 return errors < i ? 0 : -1;
3886}
3887
3888/*
3889 * endio for the write_dev_flush, this will wake anyone waiting
3890 * for the barrier when it is done
3891 */
3892static void btrfs_end_empty_barrier(struct bio *bio)
3893{
3894 bio_uninit(bio);
3895 complete(bio->bi_private);
3896}
3897
3898/*
3899 * Submit a flush request to the device if it supports it. Error handling is
3900 * done in the waiting counterpart.
3901 */
3902static void write_dev_flush(struct btrfs_device *device)
3903{
3904 struct bio *bio = &device->flush_bio;
3905
3906 device->last_flush_error = BLK_STS_OK;
3907
3908 bio_init(bio, device->bdev, NULL, 0,
3909 REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
3910 bio->bi_end_io = btrfs_end_empty_barrier;
3911 init_completion(&device->flush_wait);
3912 bio->bi_private = &device->flush_wait;
3913 submit_bio(bio);
3914 set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3915}
3916
3917/*
3918 * If the flush bio has been submitted by write_dev_flush, wait for it.
3919 * Return true for any error, and false otherwise.
3920 */
3921static bool wait_dev_flush(struct btrfs_device *device)
3922{
3923 struct bio *bio = &device->flush_bio;
3924
3925 if (!test_and_clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3926 return false;
3927
3928 wait_for_completion_io(&device->flush_wait);
3929
3930 if (bio->bi_status) {
3931 device->last_flush_error = bio->bi_status;
3932 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_FLUSH_ERRS);
3933 return true;
3934 }
3935
3936 return false;
3937}
3938
3939/*
3940 * send an empty flush down to each device in parallel,
3941 * then wait for them
3942 */
3943static int barrier_all_devices(struct btrfs_fs_info *info)
3944{
3945 struct list_head *head;
3946 struct btrfs_device *dev;
3947 int errors_wait = 0;
3948
3949 lockdep_assert_held(&info->fs_devices->device_list_mutex);
3950 /* send down all the barriers */
3951 head = &info->fs_devices->devices;
3952 list_for_each_entry(dev, head, dev_list) {
3953 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3954 continue;
3955 if (!dev->bdev)
3956 continue;
3957 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3958 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3959 continue;
3960
3961 write_dev_flush(dev);
3962 }
3963
3964 /* wait for all the barriers */
3965 list_for_each_entry(dev, head, dev_list) {
3966 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3967 continue;
3968 if (!dev->bdev) {
3969 errors_wait++;
3970 continue;
3971 }
3972 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3973 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3974 continue;
3975
3976 if (wait_dev_flush(dev))
3977 errors_wait++;
3978 }
3979
3980 /*
3981 * Checks last_flush_error of disks in order to determine the device
3982 * state.
3983 */
3984 if (errors_wait && !btrfs_check_rw_degradable(info, NULL))
3985 return -EIO;
3986
3987 return 0;
3988}
3989
3990int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3991{
3992 int raid_type;
3993 int min_tolerated = INT_MAX;
3994
3995 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3996 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3997 min_tolerated = min_t(int, min_tolerated,
3998 btrfs_raid_array[BTRFS_RAID_SINGLE].
3999 tolerated_failures);
4000
4001 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
4002 if (raid_type == BTRFS_RAID_SINGLE)
4003 continue;
4004 if (!(flags & btrfs_raid_array[raid_type].bg_flag))
4005 continue;
4006 min_tolerated = min_t(int, min_tolerated,
4007 btrfs_raid_array[raid_type].
4008 tolerated_failures);
4009 }
4010
4011 if (min_tolerated == INT_MAX) {
4012 pr_warn("BTRFS: unknown raid flag: %llu", flags);
4013 min_tolerated = 0;
4014 }
4015
4016 return min_tolerated;
4017}
4018
4019int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
4020{
4021 struct list_head *head;
4022 struct btrfs_device *dev;
4023 struct btrfs_super_block *sb;
4024 struct btrfs_dev_item *dev_item;
4025 int ret;
4026 int do_barriers;
4027 int max_errors;
4028 int total_errors = 0;
4029 u64 flags;
4030
4031 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
4032
4033 /*
4034 * max_mirrors == 0 indicates we're from commit_transaction,
4035 * not from fsync where the tree roots in fs_info have not
4036 * been consistent on disk.
4037 */
4038 if (max_mirrors == 0)
4039 backup_super_roots(fs_info);
4040
4041 sb = fs_info->super_for_commit;
4042 dev_item = &sb->dev_item;
4043
4044 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4045 head = &fs_info->fs_devices->devices;
4046 max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
4047
4048 if (do_barriers) {
4049 ret = barrier_all_devices(fs_info);
4050 if (ret) {
4051 mutex_unlock(
4052 &fs_info->fs_devices->device_list_mutex);
4053 btrfs_handle_fs_error(fs_info, ret,
4054 "errors while submitting device barriers.");
4055 return ret;
4056 }
4057 }
4058
4059 list_for_each_entry(dev, head, dev_list) {
4060 if (!dev->bdev) {
4061 total_errors++;
4062 continue;
4063 }
4064 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4065 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4066 continue;
4067
4068 btrfs_set_stack_device_generation(dev_item, 0);
4069 btrfs_set_stack_device_type(dev_item, dev->type);
4070 btrfs_set_stack_device_id(dev_item, dev->devid);
4071 btrfs_set_stack_device_total_bytes(dev_item,
4072 dev->commit_total_bytes);
4073 btrfs_set_stack_device_bytes_used(dev_item,
4074 dev->commit_bytes_used);
4075 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
4076 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
4077 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
4078 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
4079 memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
4080 BTRFS_FSID_SIZE);
4081
4082 flags = btrfs_super_flags(sb);
4083 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
4084
4085 ret = btrfs_validate_write_super(fs_info, sb);
4086 if (ret < 0) {
4087 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4088 btrfs_handle_fs_error(fs_info, -EUCLEAN,
4089 "unexpected superblock corruption detected");
4090 return -EUCLEAN;
4091 }
4092
4093 ret = write_dev_supers(dev, sb, max_mirrors);
4094 if (ret)
4095 total_errors++;
4096 }
4097 if (total_errors > max_errors) {
4098 btrfs_err(fs_info, "%d errors while writing supers",
4099 total_errors);
4100 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4101
4102 /* FUA is masked off if unsupported and can't be the reason */
4103 btrfs_handle_fs_error(fs_info, -EIO,
4104 "%d errors while writing supers",
4105 total_errors);
4106 return -EIO;
4107 }
4108
4109 total_errors = 0;
4110 list_for_each_entry(dev, head, dev_list) {
4111 if (!dev->bdev)
4112 continue;
4113 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4114 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4115 continue;
4116
4117 ret = wait_dev_supers(dev, max_mirrors);
4118 if (ret)
4119 total_errors++;
4120 }
4121 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4122 if (total_errors > max_errors) {
4123 btrfs_handle_fs_error(fs_info, -EIO,
4124 "%d errors while writing supers",
4125 total_errors);
4126 return -EIO;
4127 }
4128 return 0;
4129}
4130
4131/* Drop a fs root from the radix tree and free it. */
4132void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
4133 struct btrfs_root *root)
4134{
4135 bool drop_ref = false;
4136
4137 spin_lock(&fs_info->fs_roots_radix_lock);
4138 radix_tree_delete(&fs_info->fs_roots_radix,
4139 (unsigned long)btrfs_root_id(root));
4140 if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
4141 drop_ref = true;
4142 spin_unlock(&fs_info->fs_roots_radix_lock);
4143
4144 if (BTRFS_FS_ERROR(fs_info)) {
4145 ASSERT(root->log_root == NULL);
4146 if (root->reloc_root) {
4147 btrfs_put_root(root->reloc_root);
4148 root->reloc_root = NULL;
4149 }
4150 }
4151
4152 if (drop_ref)
4153 btrfs_put_root(root);
4154}
4155
4156int btrfs_commit_super(struct btrfs_fs_info *fs_info)
4157{
4158 struct btrfs_root *root = fs_info->tree_root;
4159 struct btrfs_trans_handle *trans;
4160
4161 mutex_lock(&fs_info->cleaner_mutex);
4162 btrfs_run_delayed_iputs(fs_info);
4163 mutex_unlock(&fs_info->cleaner_mutex);
4164 wake_up_process(fs_info->cleaner_kthread);
4165
4166 /* wait until ongoing cleanup work done */
4167 down_write(&fs_info->cleanup_work_sem);
4168 up_write(&fs_info->cleanup_work_sem);
4169
4170 trans = btrfs_join_transaction(root);
4171 if (IS_ERR(trans))
4172 return PTR_ERR(trans);
4173 return btrfs_commit_transaction(trans);
4174}
4175
4176static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
4177{
4178 struct btrfs_transaction *trans;
4179 struct btrfs_transaction *tmp;
4180 bool found = false;
4181
4182 /*
4183 * This function is only called at the very end of close_ctree(),
4184 * thus no other running transaction, no need to take trans_lock.
4185 */
4186 ASSERT(test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags));
4187 list_for_each_entry_safe(trans, tmp, &fs_info->trans_list, list) {
4188 struct extent_state *cached = NULL;
4189 u64 dirty_bytes = 0;
4190 u64 cur = 0;
4191 u64 found_start;
4192 u64 found_end;
4193
4194 found = true;
4195 while (find_first_extent_bit(&trans->dirty_pages, cur,
4196 &found_start, &found_end, EXTENT_DIRTY, &cached)) {
4197 dirty_bytes += found_end + 1 - found_start;
4198 cur = found_end + 1;
4199 }
4200 btrfs_warn(fs_info,
4201 "transaction %llu (with %llu dirty metadata bytes) is not committed",
4202 trans->transid, dirty_bytes);
4203 btrfs_cleanup_one_transaction(trans, fs_info);
4204
4205 if (trans == fs_info->running_transaction)
4206 fs_info->running_transaction = NULL;
4207 list_del_init(&trans->list);
4208
4209 btrfs_put_transaction(trans);
4210 trace_btrfs_transaction_commit(fs_info);
4211 }
4212 ASSERT(!found);
4213}
4214
4215void __cold close_ctree(struct btrfs_fs_info *fs_info)
4216{
4217 int ret;
4218
4219 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
4220
4221 /*
4222 * If we had UNFINISHED_DROPS we could still be processing them, so
4223 * clear that bit and wake up relocation so it can stop.
4224 * We must do this before stopping the block group reclaim task, because
4225 * at btrfs_relocate_block_group() we wait for this bit, and after the
4226 * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
4227 * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
4228 * return 1.
4229 */
4230 btrfs_wake_unfinished_drop(fs_info);
4231
4232 /*
4233 * We may have the reclaim task running and relocating a data block group,
4234 * in which case it may create delayed iputs. So stop it before we park
4235 * the cleaner kthread otherwise we can get new delayed iputs after
4236 * parking the cleaner, and that can make the async reclaim task to hang
4237 * if it's waiting for delayed iputs to complete, since the cleaner is
4238 * parked and can not run delayed iputs - this will make us hang when
4239 * trying to stop the async reclaim task.
4240 */
4241 cancel_work_sync(&fs_info->reclaim_bgs_work);
4242 /*
4243 * We don't want the cleaner to start new transactions, add more delayed
4244 * iputs, etc. while we're closing. We can't use kthread_stop() yet
4245 * because that frees the task_struct, and the transaction kthread might
4246 * still try to wake up the cleaner.
4247 */
4248 kthread_park(fs_info->cleaner_kthread);
4249
4250 /* wait for the qgroup rescan worker to stop */
4251 btrfs_qgroup_wait_for_completion(fs_info, false);
4252
4253 /* wait for the uuid_scan task to finish */
4254 down(&fs_info->uuid_tree_rescan_sem);
4255 /* avoid complains from lockdep et al., set sem back to initial state */
4256 up(&fs_info->uuid_tree_rescan_sem);
4257
4258 /* pause restriper - we want to resume on mount */
4259 btrfs_pause_balance(fs_info);
4260
4261 btrfs_dev_replace_suspend_for_unmount(fs_info);
4262
4263 btrfs_scrub_cancel(fs_info);
4264
4265 /* wait for any defraggers to finish */
4266 wait_event(fs_info->transaction_wait,
4267 (atomic_read(&fs_info->defrag_running) == 0));
4268
4269 /* clear out the rbtree of defraggable inodes */
4270 btrfs_cleanup_defrag_inodes(fs_info);
4271
4272 /*
4273 * After we parked the cleaner kthread, ordered extents may have
4274 * completed and created new delayed iputs. If one of the async reclaim
4275 * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
4276 * can hang forever trying to stop it, because if a delayed iput is
4277 * added after it ran btrfs_run_delayed_iputs() and before it called
4278 * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
4279 * no one else to run iputs.
4280 *
4281 * So wait for all ongoing ordered extents to complete and then run
4282 * delayed iputs. This works because once we reach this point no one
4283 * can either create new ordered extents nor create delayed iputs
4284 * through some other means.
4285 *
4286 * Also note that btrfs_wait_ordered_roots() is not safe here, because
4287 * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
4288 * but the delayed iput for the respective inode is made only when doing
4289 * the final btrfs_put_ordered_extent() (which must happen at
4290 * btrfs_finish_ordered_io() when we are unmounting).
4291 */
4292 btrfs_flush_workqueue(fs_info->endio_write_workers);
4293 /* Ordered extents for free space inodes. */
4294 btrfs_flush_workqueue(fs_info->endio_freespace_worker);
4295 btrfs_run_delayed_iputs(fs_info);
4296
4297 cancel_work_sync(&fs_info->async_reclaim_work);
4298 cancel_work_sync(&fs_info->async_data_reclaim_work);
4299 cancel_work_sync(&fs_info->preempt_reclaim_work);
4300
4301 /* Cancel or finish ongoing discard work */
4302 btrfs_discard_cleanup(fs_info);
4303
4304 if (!sb_rdonly(fs_info->sb)) {
4305 /*
4306 * The cleaner kthread is stopped, so do one final pass over
4307 * unused block groups.
4308 */
4309 btrfs_delete_unused_bgs(fs_info);
4310
4311 /*
4312 * There might be existing delayed inode workers still running
4313 * and holding an empty delayed inode item. We must wait for
4314 * them to complete first because they can create a transaction.
4315 * This happens when someone calls btrfs_balance_delayed_items()
4316 * and then a transaction commit runs the same delayed nodes
4317 * before any delayed worker has done something with the nodes.
4318 * We must wait for any worker here and not at transaction
4319 * commit time since that could cause a deadlock.
4320 * This is a very rare case.
4321 */
4322 btrfs_flush_workqueue(fs_info->delayed_workers);
4323
4324 ret = btrfs_commit_super(fs_info);
4325 if (ret)
4326 btrfs_err(fs_info, "commit super ret %d", ret);
4327 }
4328
4329 if (BTRFS_FS_ERROR(fs_info))
4330 btrfs_error_commit_super(fs_info);
4331
4332 kthread_stop(fs_info->transaction_kthread);
4333 kthread_stop(fs_info->cleaner_kthread);
4334
4335 ASSERT(list_empty(&fs_info->delayed_iputs));
4336 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4337
4338 if (btrfs_check_quota_leak(fs_info)) {
4339 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4340 btrfs_err(fs_info, "qgroup reserved space leaked");
4341 }
4342
4343 btrfs_free_qgroup_config(fs_info);
4344 ASSERT(list_empty(&fs_info->delalloc_roots));
4345
4346 if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4347 btrfs_info(fs_info, "at unmount delalloc count %lld",
4348 percpu_counter_sum(&fs_info->delalloc_bytes));
4349 }
4350
4351 if (percpu_counter_sum(&fs_info->ordered_bytes))
4352 btrfs_info(fs_info, "at unmount dio bytes count %lld",
4353 percpu_counter_sum(&fs_info->ordered_bytes));
4354
4355 btrfs_sysfs_remove_mounted(fs_info);
4356 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4357
4358 btrfs_put_block_group_cache(fs_info);
4359
4360 /*
4361 * we must make sure there is not any read request to
4362 * submit after we stopping all workers.
4363 */
4364 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4365 btrfs_stop_all_workers(fs_info);
4366
4367 /* We shouldn't have any transaction open at this point */
4368 warn_about_uncommitted_trans(fs_info);
4369
4370 clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4371 free_root_pointers(fs_info, true);
4372 btrfs_free_fs_roots(fs_info);
4373
4374 /*
4375 * We must free the block groups after dropping the fs_roots as we could
4376 * have had an IO error and have left over tree log blocks that aren't
4377 * cleaned up until the fs roots are freed. This makes the block group
4378 * accounting appear to be wrong because there's pending reserved bytes,
4379 * so make sure we do the block group cleanup afterwards.
4380 */
4381 btrfs_free_block_groups(fs_info);
4382
4383 iput(fs_info->btree_inode);
4384
4385 btrfs_mapping_tree_free(fs_info);
4386 btrfs_close_devices(fs_info->fs_devices);
4387}
4388
4389void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
4390 struct extent_buffer *buf)
4391{
4392 struct btrfs_fs_info *fs_info = buf->fs_info;
4393 u64 transid = btrfs_header_generation(buf);
4394
4395#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4396 /*
4397 * This is a fast path so only do this check if we have sanity tests
4398 * enabled. Normal people shouldn't be using unmapped buffers as dirty
4399 * outside of the sanity tests.
4400 */
4401 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4402 return;
4403#endif
4404 /* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
4405 ASSERT(trans->transid == fs_info->generation);
4406 btrfs_assert_tree_write_locked(buf);
4407 if (unlikely(transid != fs_info->generation)) {
4408 btrfs_abort_transaction(trans, -EUCLEAN);
4409 btrfs_crit(fs_info,
4410"dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu",
4411 buf->start, transid, fs_info->generation);
4412 }
4413 set_extent_buffer_dirty(buf);
4414}
4415
4416static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4417 int flush_delayed)
4418{
4419 /*
4420 * looks as though older kernels can get into trouble with
4421 * this code, they end up stuck in balance_dirty_pages forever
4422 */
4423 int ret;
4424
4425 if (current->flags & PF_MEMALLOC)
4426 return;
4427
4428 if (flush_delayed)
4429 btrfs_balance_delayed_items(fs_info);
4430
4431 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4432 BTRFS_DIRTY_METADATA_THRESH,
4433 fs_info->dirty_metadata_batch);
4434 if (ret > 0) {
4435 balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4436 }
4437}
4438
4439void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4440{
4441 __btrfs_btree_balance_dirty(fs_info, 1);
4442}
4443
4444void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4445{
4446 __btrfs_btree_balance_dirty(fs_info, 0);
4447}
4448
4449static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4450{
4451 /* cleanup FS via transaction */
4452 btrfs_cleanup_transaction(fs_info);
4453
4454 mutex_lock(&fs_info->cleaner_mutex);
4455 btrfs_run_delayed_iputs(fs_info);
4456 mutex_unlock(&fs_info->cleaner_mutex);
4457
4458 down_write(&fs_info->cleanup_work_sem);
4459 up_write(&fs_info->cleanup_work_sem);
4460}
4461
4462static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
4463{
4464 struct btrfs_root *gang[8];
4465 u64 root_objectid = 0;
4466 int ret;
4467
4468 spin_lock(&fs_info->fs_roots_radix_lock);
4469 while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
4470 (void **)gang, root_objectid,
4471 ARRAY_SIZE(gang))) != 0) {
4472 int i;
4473
4474 for (i = 0; i < ret; i++)
4475 gang[i] = btrfs_grab_root(gang[i]);
4476 spin_unlock(&fs_info->fs_roots_radix_lock);
4477
4478 for (i = 0; i < ret; i++) {
4479 if (!gang[i])
4480 continue;
4481 root_objectid = btrfs_root_id(gang[i]);
4482 btrfs_free_log(NULL, gang[i]);
4483 btrfs_put_root(gang[i]);
4484 }
4485 root_objectid++;
4486 spin_lock(&fs_info->fs_roots_radix_lock);
4487 }
4488 spin_unlock(&fs_info->fs_roots_radix_lock);
4489 btrfs_free_log_root_tree(NULL, fs_info);
4490}
4491
4492static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4493{
4494 struct btrfs_ordered_extent *ordered;
4495
4496 spin_lock(&root->ordered_extent_lock);
4497 /*
4498 * This will just short circuit the ordered completion stuff which will
4499 * make sure the ordered extent gets properly cleaned up.
4500 */
4501 list_for_each_entry(ordered, &root->ordered_extents,
4502 root_extent_list)
4503 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4504 spin_unlock(&root->ordered_extent_lock);
4505}
4506
4507static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4508{
4509 struct btrfs_root *root;
4510 LIST_HEAD(splice);
4511
4512 spin_lock(&fs_info->ordered_root_lock);
4513 list_splice_init(&fs_info->ordered_roots, &splice);
4514 while (!list_empty(&splice)) {
4515 root = list_first_entry(&splice, struct btrfs_root,
4516 ordered_root);
4517 list_move_tail(&root->ordered_root,
4518 &fs_info->ordered_roots);
4519
4520 spin_unlock(&fs_info->ordered_root_lock);
4521 btrfs_destroy_ordered_extents(root);
4522
4523 cond_resched();
4524 spin_lock(&fs_info->ordered_root_lock);
4525 }
4526 spin_unlock(&fs_info->ordered_root_lock);
4527
4528 /*
4529 * We need this here because if we've been flipped read-only we won't
4530 * get sync() from the umount, so we need to make sure any ordered
4531 * extents that haven't had their dirty pages IO start writeout yet
4532 * actually get run and error out properly.
4533 */
4534 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
4535}
4536
4537static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4538 struct btrfs_fs_info *fs_info)
4539{
4540 struct rb_node *node;
4541 struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
4542 struct btrfs_delayed_ref_node *ref;
4543
4544 spin_lock(&delayed_refs->lock);
4545 while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4546 struct btrfs_delayed_ref_head *head;
4547 struct rb_node *n;
4548 bool pin_bytes = false;
4549
4550 head = rb_entry(node, struct btrfs_delayed_ref_head,
4551 href_node);
4552 if (btrfs_delayed_ref_lock(delayed_refs, head))
4553 continue;
4554
4555 spin_lock(&head->lock);
4556 while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4557 ref = rb_entry(n, struct btrfs_delayed_ref_node,
4558 ref_node);
4559 rb_erase_cached(&ref->ref_node, &head->ref_tree);
4560 RB_CLEAR_NODE(&ref->ref_node);
4561 if (!list_empty(&ref->add_list))
4562 list_del(&ref->add_list);
4563 atomic_dec(&delayed_refs->num_entries);
4564 btrfs_put_delayed_ref(ref);
4565 btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
4566 }
4567 if (head->must_insert_reserved)
4568 pin_bytes = true;
4569 btrfs_free_delayed_extent_op(head->extent_op);
4570 btrfs_delete_ref_head(delayed_refs, head);
4571 spin_unlock(&head->lock);
4572 spin_unlock(&delayed_refs->lock);
4573 mutex_unlock(&head->mutex);
4574
4575 if (pin_bytes) {
4576 struct btrfs_block_group *cache;
4577
4578 cache = btrfs_lookup_block_group(fs_info, head->bytenr);
4579 BUG_ON(!cache);
4580
4581 spin_lock(&cache->space_info->lock);
4582 spin_lock(&cache->lock);
4583 cache->pinned += head->num_bytes;
4584 btrfs_space_info_update_bytes_pinned(fs_info,
4585 cache->space_info, head->num_bytes);
4586 cache->reserved -= head->num_bytes;
4587 cache->space_info->bytes_reserved -= head->num_bytes;
4588 spin_unlock(&cache->lock);
4589 spin_unlock(&cache->space_info->lock);
4590
4591 btrfs_put_block_group(cache);
4592
4593 btrfs_error_unpin_extent_range(fs_info, head->bytenr,
4594 head->bytenr + head->num_bytes - 1);
4595 }
4596 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4597 btrfs_put_delayed_ref_head(head);
4598 cond_resched();
4599 spin_lock(&delayed_refs->lock);
4600 }
4601 btrfs_qgroup_destroy_extent_records(trans);
4602
4603 spin_unlock(&delayed_refs->lock);
4604}
4605
4606static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4607{
4608 struct btrfs_inode *btrfs_inode;
4609 LIST_HEAD(splice);
4610
4611 spin_lock(&root->delalloc_lock);
4612 list_splice_init(&root->delalloc_inodes, &splice);
4613
4614 while (!list_empty(&splice)) {
4615 struct inode *inode = NULL;
4616 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4617 delalloc_inodes);
4618 btrfs_del_delalloc_inode(btrfs_inode);
4619 spin_unlock(&root->delalloc_lock);
4620
4621 /*
4622 * Make sure we get a live inode and that it'll not disappear
4623 * meanwhile.
4624 */
4625 inode = igrab(&btrfs_inode->vfs_inode);
4626 if (inode) {
4627 unsigned int nofs_flag;
4628
4629 nofs_flag = memalloc_nofs_save();
4630 invalidate_inode_pages2(inode->i_mapping);
4631 memalloc_nofs_restore(nofs_flag);
4632 iput(inode);
4633 }
4634 spin_lock(&root->delalloc_lock);
4635 }
4636 spin_unlock(&root->delalloc_lock);
4637}
4638
4639static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4640{
4641 struct btrfs_root *root;
4642 LIST_HEAD(splice);
4643
4644 spin_lock(&fs_info->delalloc_root_lock);
4645 list_splice_init(&fs_info->delalloc_roots, &splice);
4646 while (!list_empty(&splice)) {
4647 root = list_first_entry(&splice, struct btrfs_root,
4648 delalloc_root);
4649 root = btrfs_grab_root(root);
4650 BUG_ON(!root);
4651 spin_unlock(&fs_info->delalloc_root_lock);
4652
4653 btrfs_destroy_delalloc_inodes(root);
4654 btrfs_put_root(root);
4655
4656 spin_lock(&fs_info->delalloc_root_lock);
4657 }
4658 spin_unlock(&fs_info->delalloc_root_lock);
4659}
4660
4661static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4662 struct extent_io_tree *dirty_pages,
4663 int mark)
4664{
4665 struct extent_buffer *eb;
4666 u64 start = 0;
4667 u64 end;
4668
4669 while (find_first_extent_bit(dirty_pages, start, &start, &end,
4670 mark, NULL)) {
4671 clear_extent_bits(dirty_pages, start, end, mark);
4672 while (start <= end) {
4673 eb = find_extent_buffer(fs_info, start);
4674 start += fs_info->nodesize;
4675 if (!eb)
4676 continue;
4677
4678 btrfs_tree_lock(eb);
4679 wait_on_extent_buffer_writeback(eb);
4680 btrfs_clear_buffer_dirty(NULL, eb);
4681 btrfs_tree_unlock(eb);
4682
4683 free_extent_buffer_stale(eb);
4684 }
4685 }
4686}
4687
4688static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4689 struct extent_io_tree *unpin)
4690{
4691 u64 start;
4692 u64 end;
4693
4694 while (1) {
4695 struct extent_state *cached_state = NULL;
4696
4697 /*
4698 * The btrfs_finish_extent_commit() may get the same range as
4699 * ours between find_first_extent_bit and clear_extent_dirty.
4700 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4701 * the same extent range.
4702 */
4703 mutex_lock(&fs_info->unused_bg_unpin_mutex);
4704 if (!find_first_extent_bit(unpin, 0, &start, &end,
4705 EXTENT_DIRTY, &cached_state)) {
4706 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4707 break;
4708 }
4709
4710 clear_extent_dirty(unpin, start, end, &cached_state);
4711 free_extent_state(cached_state);
4712 btrfs_error_unpin_extent_range(fs_info, start, end);
4713 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4714 cond_resched();
4715 }
4716}
4717
4718static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
4719{
4720 struct inode *inode;
4721
4722 inode = cache->io_ctl.inode;
4723 if (inode) {
4724 unsigned int nofs_flag;
4725
4726 nofs_flag = memalloc_nofs_save();
4727 invalidate_inode_pages2(inode->i_mapping);
4728 memalloc_nofs_restore(nofs_flag);
4729
4730 BTRFS_I(inode)->generation = 0;
4731 cache->io_ctl.inode = NULL;
4732 iput(inode);
4733 }
4734 ASSERT(cache->io_ctl.pages == NULL);
4735 btrfs_put_block_group(cache);
4736}
4737
4738void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4739 struct btrfs_fs_info *fs_info)
4740{
4741 struct btrfs_block_group *cache;
4742
4743 spin_lock(&cur_trans->dirty_bgs_lock);
4744 while (!list_empty(&cur_trans->dirty_bgs)) {
4745 cache = list_first_entry(&cur_trans->dirty_bgs,
4746 struct btrfs_block_group,
4747 dirty_list);
4748
4749 if (!list_empty(&cache->io_list)) {
4750 spin_unlock(&cur_trans->dirty_bgs_lock);
4751 list_del_init(&cache->io_list);
4752 btrfs_cleanup_bg_io(cache);
4753 spin_lock(&cur_trans->dirty_bgs_lock);
4754 }
4755
4756 list_del_init(&cache->dirty_list);
4757 spin_lock(&cache->lock);
4758 cache->disk_cache_state = BTRFS_DC_ERROR;
4759 spin_unlock(&cache->lock);
4760
4761 spin_unlock(&cur_trans->dirty_bgs_lock);
4762 btrfs_put_block_group(cache);
4763 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
4764 spin_lock(&cur_trans->dirty_bgs_lock);
4765 }
4766 spin_unlock(&cur_trans->dirty_bgs_lock);
4767
4768 /*
4769 * Refer to the definition of io_bgs member for details why it's safe
4770 * to use it without any locking
4771 */
4772 while (!list_empty(&cur_trans->io_bgs)) {
4773 cache = list_first_entry(&cur_trans->io_bgs,
4774 struct btrfs_block_group,
4775 io_list);
4776
4777 list_del_init(&cache->io_list);
4778 spin_lock(&cache->lock);
4779 cache->disk_cache_state = BTRFS_DC_ERROR;
4780 spin_unlock(&cache->lock);
4781 btrfs_cleanup_bg_io(cache);
4782 }
4783}
4784
4785static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
4786{
4787 struct btrfs_root *gang[8];
4788 int i;
4789 int ret;
4790
4791 spin_lock(&fs_info->fs_roots_radix_lock);
4792 while (1) {
4793 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
4794 (void **)gang, 0,
4795 ARRAY_SIZE(gang),
4796 BTRFS_ROOT_TRANS_TAG);
4797 if (ret == 0)
4798 break;
4799 for (i = 0; i < ret; i++) {
4800 struct btrfs_root *root = gang[i];
4801
4802 btrfs_qgroup_free_meta_all_pertrans(root);
4803 radix_tree_tag_clear(&fs_info->fs_roots_radix,
4804 (unsigned long)btrfs_root_id(root),
4805 BTRFS_ROOT_TRANS_TAG);
4806 }
4807 }
4808 spin_unlock(&fs_info->fs_roots_radix_lock);
4809}
4810
4811void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4812 struct btrfs_fs_info *fs_info)
4813{
4814 struct btrfs_device *dev, *tmp;
4815
4816 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4817 ASSERT(list_empty(&cur_trans->dirty_bgs));
4818 ASSERT(list_empty(&cur_trans->io_bgs));
4819
4820 list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4821 post_commit_list) {
4822 list_del_init(&dev->post_commit_list);
4823 }
4824
4825 btrfs_destroy_delayed_refs(cur_trans, fs_info);
4826
4827 cur_trans->state = TRANS_STATE_COMMIT_START;
4828 wake_up(&fs_info->transaction_blocked_wait);
4829
4830 cur_trans->state = TRANS_STATE_UNBLOCKED;
4831 wake_up(&fs_info->transaction_wait);
4832
4833 btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4834 EXTENT_DIRTY);
4835 btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
4836
4837 cur_trans->state =TRANS_STATE_COMPLETED;
4838 wake_up(&cur_trans->commit_wait);
4839}
4840
4841static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4842{
4843 struct btrfs_transaction *t;
4844
4845 mutex_lock(&fs_info->transaction_kthread_mutex);
4846
4847 spin_lock(&fs_info->trans_lock);
4848 while (!list_empty(&fs_info->trans_list)) {
4849 t = list_first_entry(&fs_info->trans_list,
4850 struct btrfs_transaction, list);
4851 if (t->state >= TRANS_STATE_COMMIT_PREP) {
4852 refcount_inc(&t->use_count);
4853 spin_unlock(&fs_info->trans_lock);
4854 btrfs_wait_for_commit(fs_info, t->transid);
4855 btrfs_put_transaction(t);
4856 spin_lock(&fs_info->trans_lock);
4857 continue;
4858 }
4859 if (t == fs_info->running_transaction) {
4860 t->state = TRANS_STATE_COMMIT_DOING;
4861 spin_unlock(&fs_info->trans_lock);
4862 /*
4863 * We wait for 0 num_writers since we don't hold a trans
4864 * handle open currently for this transaction.
4865 */
4866 wait_event(t->writer_wait,
4867 atomic_read(&t->num_writers) == 0);
4868 } else {
4869 spin_unlock(&fs_info->trans_lock);
4870 }
4871 btrfs_cleanup_one_transaction(t, fs_info);
4872
4873 spin_lock(&fs_info->trans_lock);
4874 if (t == fs_info->running_transaction)
4875 fs_info->running_transaction = NULL;
4876 list_del_init(&t->list);
4877 spin_unlock(&fs_info->trans_lock);
4878
4879 btrfs_put_transaction(t);
4880 trace_btrfs_transaction_commit(fs_info);
4881 spin_lock(&fs_info->trans_lock);
4882 }
4883 spin_unlock(&fs_info->trans_lock);
4884 btrfs_destroy_all_ordered_extents(fs_info);
4885 btrfs_destroy_delayed_inodes(fs_info);
4886 btrfs_assert_delayed_root_empty(fs_info);
4887 btrfs_destroy_all_delalloc_inodes(fs_info);
4888 btrfs_drop_all_logs(fs_info);
4889 btrfs_free_all_qgroup_pertrans(fs_info);
4890 mutex_unlock(&fs_info->transaction_kthread_mutex);
4891
4892 return 0;
4893}
4894
4895int btrfs_init_root_free_objectid(struct btrfs_root *root)
4896{
4897 struct btrfs_path *path;
4898 int ret;
4899 struct extent_buffer *l;
4900 struct btrfs_key search_key;
4901 struct btrfs_key found_key;
4902 int slot;
4903
4904 path = btrfs_alloc_path();
4905 if (!path)
4906 return -ENOMEM;
4907
4908 search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
4909 search_key.type = -1;
4910 search_key.offset = (u64)-1;
4911 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
4912 if (ret < 0)
4913 goto error;
4914 if (ret == 0) {
4915 /*
4916 * Key with offset -1 found, there would have to exist a root
4917 * with such id, but this is out of valid range.
4918 */
4919 ret = -EUCLEAN;
4920 goto error;
4921 }
4922 if (path->slots[0] > 0) {
4923 slot = path->slots[0] - 1;
4924 l = path->nodes[0];
4925 btrfs_item_key_to_cpu(l, &found_key, slot);
4926 root->free_objectid = max_t(u64, found_key.objectid + 1,
4927 BTRFS_FIRST_FREE_OBJECTID);
4928 } else {
4929 root->free_objectid = BTRFS_FIRST_FREE_OBJECTID;
4930 }
4931 ret = 0;
4932error:
4933 btrfs_free_path(path);
4934 return ret;
4935}
4936
4937int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
4938{
4939 int ret;
4940 mutex_lock(&root->objectid_mutex);
4941
4942 if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
4943 btrfs_warn(root->fs_info,
4944 "the objectid of root %llu reaches its highest value",
4945 btrfs_root_id(root));
4946 ret = -ENOSPC;
4947 goto out;
4948 }
4949
4950 *objectid = root->free_objectid++;
4951 ret = 0;
4952out:
4953 mutex_unlock(&root->objectid_mutex);
4954 return ret;
4955}