1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
7 #include <linux/blkdev.h>
8 #include <linux/radix-tree.h>
9 #include <linux/writeback.h>
10 #include <linux/buffer_head.h>
11 #include <linux/workqueue.h>
12 #include <linux/kthread.h>
13 #include <linux/slab.h>
14 #include <linux/migrate.h>
15 #include <linux/ratelimit.h>
16 #include <linux/uuid.h>
17 #include <linux/semaphore.h>
18 #include <linux/error-injection.h>
19 #include <linux/crc32c.h>
20 #include <linux/sched/mm.h>
21 #include <asm/unaligned.h>
24 #include "transaction.h"
25 #include "btrfs_inode.h"
27 #include "print-tree.h"
30 #include "free-space-cache.h"
31 #include "free-space-tree.h"
32 #include "inode-map.h"
33 #include "check-integrity.h"
34 #include "rcu-string.h"
35 #include "dev-replace.h"
39 #include "compression.h"
40 #include "tree-checker.h"
41 #include "ref-verify.h"
44 #include <asm/cpufeature.h>
47 #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
48 BTRFS_HEADER_FLAG_RELOC |\
49 BTRFS_SUPER_FLAG_ERROR |\
50 BTRFS_SUPER_FLAG_SEEDING |\
51 BTRFS_SUPER_FLAG_METADUMP |\
52 BTRFS_SUPER_FLAG_METADUMP_V2)
54 static const struct extent_io_ops btree_extent_io_ops;
55 static void end_workqueue_fn(struct btrfs_work *work);
56 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
57 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
58 struct btrfs_fs_info *fs_info);
59 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
60 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
61 struct extent_io_tree *dirty_pages,
63 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
64 struct extent_io_tree *pinned_extents);
65 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
66 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
69 * btrfs_end_io_wq structs are used to do processing in task context when an IO
70 * is complete. This is used during reads to verify checksums, and it is used
71 * by writes to insert metadata for new file extents after IO is complete.
73 struct btrfs_end_io_wq {
77 struct btrfs_fs_info *info;
79 enum btrfs_wq_endio_type metadata;
80 struct btrfs_work work;
83 static struct kmem_cache *btrfs_end_io_wq_cache;
85 int __init btrfs_end_io_wq_init(void)
87 btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
88 sizeof(struct btrfs_end_io_wq),
92 if (!btrfs_end_io_wq_cache)
97 void __cold btrfs_end_io_wq_exit(void)
99 kmem_cache_destroy(btrfs_end_io_wq_cache);
103 * async submit bios are used to offload expensive checksumming
104 * onto the worker threads. They checksum file and metadata bios
105 * just before they are sent down the IO stack.
107 struct async_submit_bio {
110 extent_submit_bio_start_t *submit_bio_start;
113 * bio_offset is optional, can be used if the pages in the bio
114 * can't tell us where in the file the bio should go
117 struct btrfs_work work;
122 * Lockdep class keys for extent_buffer->lock's in this root. For a given
123 * eb, the lockdep key is determined by the btrfs_root it belongs to and
124 * the level the eb occupies in the tree.
126 * Different roots are used for different purposes and may nest inside each
127 * other and they require separate keysets. As lockdep keys should be
128 * static, assign keysets according to the purpose of the root as indicated
129 * by btrfs_root->root_key.objectid. This ensures that all special purpose
130 * roots have separate keysets.
132 * Lock-nesting across peer nodes is always done with the immediate parent
133 * node locked thus preventing deadlock. As lockdep doesn't know this, use
134 * subclass to avoid triggering lockdep warning in such cases.
136 * The key is set by the readpage_end_io_hook after the buffer has passed
137 * csum validation but before the pages are unlocked. It is also set by
138 * btrfs_init_new_buffer on freshly allocated blocks.
140 * We also add a check to make sure the highest level of the tree is the
141 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
142 * needs update as well.
144 #ifdef CONFIG_DEBUG_LOCK_ALLOC
145 # if BTRFS_MAX_LEVEL != 8
149 static struct btrfs_lockdep_keyset {
150 u64 id; /* root objectid */
151 const char *name_stem; /* lock name stem */
152 char names[BTRFS_MAX_LEVEL + 1][20];
153 struct lock_class_key keys[BTRFS_MAX_LEVEL + 1];
154 } btrfs_lockdep_keysets[] = {
155 { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" },
156 { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" },
157 { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" },
158 { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
159 { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
160 { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
161 { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" },
162 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
163 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
164 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
165 { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" },
166 { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" },
167 { .id = 0, .name_stem = "tree" },
170 void __init btrfs_init_lockdep(void)
174 /* initialize lockdep class names */
175 for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
176 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
178 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
179 snprintf(ks->names[j], sizeof(ks->names[j]),
180 "btrfs-%s-%02d", ks->name_stem, j);
184 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
187 struct btrfs_lockdep_keyset *ks;
189 BUG_ON(level >= ARRAY_SIZE(ks->keys));
191 /* find the matching keyset, id 0 is the default entry */
192 for (ks = btrfs_lockdep_keysets; ks->id; ks++)
193 if (ks->id == objectid)
196 lockdep_set_class_and_name(&eb->lock,
197 &ks->keys[level], ks->names[level]);
203 * extents on the btree inode are pretty simple, there's one extent
204 * that covers the entire device
206 struct extent_map *btree_get_extent(struct btrfs_inode *inode,
207 struct page *page, size_t pg_offset, u64 start, u64 len,
210 struct btrfs_fs_info *fs_info = inode->root->fs_info;
211 struct extent_map_tree *em_tree = &inode->extent_tree;
212 struct extent_map *em;
215 read_lock(&em_tree->lock);
216 em = lookup_extent_mapping(em_tree, start, len);
218 em->bdev = fs_info->fs_devices->latest_bdev;
219 read_unlock(&em_tree->lock);
222 read_unlock(&em_tree->lock);
224 em = alloc_extent_map();
226 em = ERR_PTR(-ENOMEM);
231 em->block_len = (u64)-1;
233 em->bdev = fs_info->fs_devices->latest_bdev;
235 write_lock(&em_tree->lock);
236 ret = add_extent_mapping(em_tree, em, 0);
237 if (ret == -EEXIST) {
239 em = lookup_extent_mapping(em_tree, start, len);
246 write_unlock(&em_tree->lock);
252 u32 btrfs_csum_data(const char *data, u32 seed, size_t len)
254 return crc32c(seed, data, len);
257 void btrfs_csum_final(u32 crc, u8 *result)
259 put_unaligned_le32(~crc, result);
263 * compute the csum for a btree block, and either verify it or write it
264 * into the csum field of the block.
266 static int csum_tree_block(struct btrfs_fs_info *fs_info,
267 struct extent_buffer *buf,
270 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
271 char result[BTRFS_CSUM_SIZE];
273 unsigned long cur_len;
274 unsigned long offset = BTRFS_CSUM_SIZE;
276 unsigned long map_start;
277 unsigned long map_len;
281 len = buf->len - offset;
284 * Note: we don't need to check for the err == 1 case here, as
285 * with the given combination of 'start = BTRFS_CSUM_SIZE (32)'
286 * and 'min_len = 32' and the currently implemented mapping
287 * algorithm we cannot cross a page boundary.
289 err = map_private_extent_buffer(buf, offset, 32,
290 &kaddr, &map_start, &map_len);
293 cur_len = min(len, map_len - (offset - map_start));
294 crc = btrfs_csum_data(kaddr + offset - map_start,
299 memset(result, 0, BTRFS_CSUM_SIZE);
301 btrfs_csum_final(crc, result);
304 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
307 memcpy(&found, result, csum_size);
309 read_extent_buffer(buf, &val, 0, csum_size);
310 btrfs_warn_rl(fs_info,
311 "%s checksum verify failed on %llu wanted %X found %X level %d",
312 fs_info->sb->s_id, buf->start,
313 val, found, btrfs_header_level(buf));
317 write_extent_buffer(buf, result, 0, csum_size);
324 * we can't consider a given block up to date unless the transid of the
325 * block matches the transid in the parent node's pointer. This is how we
326 * detect blocks that either didn't get written at all or got written
327 * in the wrong place.
329 static int verify_parent_transid(struct extent_io_tree *io_tree,
330 struct extent_buffer *eb, u64 parent_transid,
333 struct extent_state *cached_state = NULL;
335 bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
337 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
344 btrfs_tree_read_lock(eb);
345 btrfs_set_lock_blocking_read(eb);
348 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
350 if (extent_buffer_uptodate(eb) &&
351 btrfs_header_generation(eb) == parent_transid) {
355 btrfs_err_rl(eb->fs_info,
356 "parent transid verify failed on %llu wanted %llu found %llu",
358 parent_transid, btrfs_header_generation(eb));
362 * Things reading via commit roots that don't have normal protection,
363 * like send, can have a really old block in cache that may point at a
364 * block that has been freed and re-allocated. So don't clear uptodate
365 * if we find an eb that is under IO (dirty/writeback) because we could
366 * end up reading in the stale data and then writing it back out and
367 * making everybody very sad.
369 if (!extent_buffer_under_io(eb))
370 clear_extent_buffer_uptodate(eb);
372 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
375 btrfs_tree_read_unlock_blocking(eb);
380 * Return 0 if the superblock checksum type matches the checksum value of that
381 * algorithm. Pass the raw disk superblock data.
383 static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
386 struct btrfs_super_block *disk_sb =
387 (struct btrfs_super_block *)raw_disk_sb;
388 u16 csum_type = btrfs_super_csum_type(disk_sb);
391 if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
393 char result[sizeof(crc)];
396 * The super_block structure does not span the whole
397 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
398 * is filled with zeros and is included in the checksum.
400 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
401 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
402 btrfs_csum_final(crc, result);
404 if (memcmp(raw_disk_sb, result, sizeof(result)))
408 if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
409 btrfs_err(fs_info, "unsupported checksum algorithm %u",
417 static int verify_level_key(struct btrfs_fs_info *fs_info,
418 struct extent_buffer *eb, int level,
419 struct btrfs_key *first_key, u64 parent_transid)
422 struct btrfs_key found_key;
425 found_level = btrfs_header_level(eb);
426 if (found_level != level) {
427 #ifdef CONFIG_BTRFS_DEBUG
430 "tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
431 eb->start, level, found_level);
440 * For live tree block (new tree blocks in current transaction),
441 * we need proper lock context to avoid race, which is impossible here.
442 * So we only checks tree blocks which is read from disk, whose
443 * generation <= fs_info->last_trans_committed.
445 if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
448 btrfs_node_key_to_cpu(eb, &found_key, 0);
450 btrfs_item_key_to_cpu(eb, &found_key, 0);
451 ret = btrfs_comp_cpu_keys(first_key, &found_key);
453 #ifdef CONFIG_BTRFS_DEBUG
457 "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
458 eb->start, parent_transid, first_key->objectid,
459 first_key->type, first_key->offset,
460 found_key.objectid, found_key.type,
468 * helper to read a given tree block, doing retries as required when
469 * the checksums don't match and we have alternate mirrors to try.
471 * @parent_transid: expected transid, skip check if 0
472 * @level: expected level, mandatory check
473 * @first_key: expected key of first slot, skip check if NULL
475 static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
476 struct extent_buffer *eb,
477 u64 parent_transid, int level,
478 struct btrfs_key *first_key)
480 struct extent_io_tree *io_tree;
485 int failed_mirror = 0;
487 io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
489 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
490 ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
493 if (verify_parent_transid(io_tree, eb,
496 else if (verify_level_key(fs_info, eb, level,
497 first_key, parent_transid))
503 num_copies = btrfs_num_copies(fs_info,
508 if (!failed_mirror) {
510 failed_mirror = eb->read_mirror;
514 if (mirror_num == failed_mirror)
517 if (mirror_num > num_copies)
521 if (failed && !ret && failed_mirror)
522 repair_eb_io_failure(fs_info, eb, failed_mirror);
528 * checksum a dirty tree block before IO. This has extra checks to make sure
529 * we only fill in the checksum field in the first page of a multi-page block
532 static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
534 u64 start = page_offset(page);
536 struct extent_buffer *eb;
538 eb = (struct extent_buffer *)page->private;
539 if (page != eb->pages[0])
542 found_start = btrfs_header_bytenr(eb);
544 * Please do not consolidate these warnings into a single if.
545 * It is useful to know what went wrong.
547 if (WARN_ON(found_start != start))
549 if (WARN_ON(!PageUptodate(page)))
552 ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
553 btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
555 return csum_tree_block(fs_info, eb, 0);
558 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
559 struct extent_buffer *eb)
561 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
562 u8 fsid[BTRFS_FSID_SIZE];
565 read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
570 * Checking the incompat flag is only valid for the current
571 * fs. For seed devices it's forbidden to have their uuid
572 * changed so reading ->fsid in this case is fine
574 if (fs_devices == fs_info->fs_devices &&
575 btrfs_fs_incompat(fs_info, METADATA_UUID))
576 metadata_uuid = fs_devices->metadata_uuid;
578 metadata_uuid = fs_devices->fsid;
580 if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) {
584 fs_devices = fs_devices->seed;
589 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
590 u64 phy_offset, struct page *page,
591 u64 start, u64 end, int mirror)
595 struct extent_buffer *eb;
596 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
597 struct btrfs_fs_info *fs_info = root->fs_info;
604 eb = (struct extent_buffer *)page->private;
606 /* the pending IO might have been the only thing that kept this buffer
607 * in memory. Make sure we have a ref for all this other checks
609 extent_buffer_get(eb);
611 reads_done = atomic_dec_and_test(&eb->io_pages);
615 eb->read_mirror = mirror;
616 if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
621 found_start = btrfs_header_bytenr(eb);
622 if (found_start != eb->start) {
623 btrfs_err_rl(fs_info, "bad tree block start, want %llu have %llu",
624 eb->start, found_start);
628 if (check_tree_block_fsid(fs_info, eb)) {
629 btrfs_err_rl(fs_info, "bad fsid on block %llu",
634 found_level = btrfs_header_level(eb);
635 if (found_level >= BTRFS_MAX_LEVEL) {
636 btrfs_err(fs_info, "bad tree block level %d on %llu",
637 (int)btrfs_header_level(eb), eb->start);
642 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
645 ret = csum_tree_block(fs_info, eb, 1);
650 * If this is a leaf block and it is corrupt, set the corrupt bit so
651 * that we don't try and read the other copies of this block, just
654 if (found_level == 0 && btrfs_check_leaf_full(fs_info, eb)) {
655 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
659 if (found_level > 0 && btrfs_check_node(fs_info, eb))
663 set_extent_buffer_uptodate(eb);
666 test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
667 btree_readahead_hook(eb, ret);
671 * our io error hook is going to dec the io pages
672 * again, we have to make sure it has something
675 atomic_inc(&eb->io_pages);
676 clear_extent_buffer_uptodate(eb);
678 free_extent_buffer(eb);
683 static void end_workqueue_bio(struct bio *bio)
685 struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
686 struct btrfs_fs_info *fs_info;
687 struct btrfs_workqueue *wq;
688 btrfs_work_func_t func;
690 fs_info = end_io_wq->info;
691 end_io_wq->status = bio->bi_status;
693 if (bio_op(bio) == REQ_OP_WRITE) {
694 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
695 wq = fs_info->endio_meta_write_workers;
696 func = btrfs_endio_meta_write_helper;
697 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
698 wq = fs_info->endio_freespace_worker;
699 func = btrfs_freespace_write_helper;
700 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
701 wq = fs_info->endio_raid56_workers;
702 func = btrfs_endio_raid56_helper;
704 wq = fs_info->endio_write_workers;
705 func = btrfs_endio_write_helper;
708 if (unlikely(end_io_wq->metadata ==
709 BTRFS_WQ_ENDIO_DIO_REPAIR)) {
710 wq = fs_info->endio_repair_workers;
711 func = btrfs_endio_repair_helper;
712 } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
713 wq = fs_info->endio_raid56_workers;
714 func = btrfs_endio_raid56_helper;
715 } else if (end_io_wq->metadata) {
716 wq = fs_info->endio_meta_workers;
717 func = btrfs_endio_meta_helper;
719 wq = fs_info->endio_workers;
720 func = btrfs_endio_helper;
724 btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
725 btrfs_queue_work(wq, &end_io_wq->work);
728 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
729 enum btrfs_wq_endio_type metadata)
731 struct btrfs_end_io_wq *end_io_wq;
733 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
735 return BLK_STS_RESOURCE;
737 end_io_wq->private = bio->bi_private;
738 end_io_wq->end_io = bio->bi_end_io;
739 end_io_wq->info = info;
740 end_io_wq->status = 0;
741 end_io_wq->bio = bio;
742 end_io_wq->metadata = metadata;
744 bio->bi_private = end_io_wq;
745 bio->bi_end_io = end_workqueue_bio;
749 static void run_one_async_start(struct btrfs_work *work)
751 struct async_submit_bio *async;
754 async = container_of(work, struct async_submit_bio, work);
755 ret = async->submit_bio_start(async->private_data, async->bio,
762 * In order to insert checksums into the metadata in large chunks, we wait
763 * until bio submission time. All the pages in the bio are checksummed and
764 * sums are attached onto the ordered extent record.
766 * At IO completion time the csums attached on the ordered extent record are
767 * inserted into the tree.
769 static void run_one_async_done(struct btrfs_work *work)
771 struct async_submit_bio *async;
775 async = container_of(work, struct async_submit_bio, work);
776 inode = async->private_data;
778 /* If an error occurred we just want to clean up the bio and move on */
780 async->bio->bi_status = async->status;
781 bio_endio(async->bio);
785 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio,
786 async->mirror_num, 1);
788 async->bio->bi_status = ret;
789 bio_endio(async->bio);
793 static void run_one_async_free(struct btrfs_work *work)
795 struct async_submit_bio *async;
797 async = container_of(work, struct async_submit_bio, work);
801 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
802 int mirror_num, unsigned long bio_flags,
803 u64 bio_offset, void *private_data,
804 extent_submit_bio_start_t *submit_bio_start)
806 struct async_submit_bio *async;
808 async = kmalloc(sizeof(*async), GFP_NOFS);
810 return BLK_STS_RESOURCE;
812 async->private_data = private_data;
814 async->mirror_num = mirror_num;
815 async->submit_bio_start = submit_bio_start;
817 btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
818 run_one_async_done, run_one_async_free);
820 async->bio_offset = bio_offset;
824 if (op_is_sync(bio->bi_opf))
825 btrfs_set_work_high_priority(&async->work);
827 btrfs_queue_work(fs_info->workers, &async->work);
831 static blk_status_t btree_csum_one_bio(struct bio *bio)
833 struct bio_vec *bvec;
834 struct btrfs_root *root;
836 struct bvec_iter_all iter_all;
838 ASSERT(!bio_flagged(bio, BIO_CLONED));
839 bio_for_each_segment_all(bvec, bio, i, iter_all) {
840 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
841 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
846 return errno_to_blk_status(ret);
849 static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
853 * when we're called for a write, we're already in the async
854 * submission context. Just jump into btrfs_map_bio
856 return btree_csum_one_bio(bio);
859 static int check_async_write(struct btrfs_inode *bi)
861 if (atomic_read(&bi->sync_writers))
864 if (static_cpu_has(X86_FEATURE_XMM4_2))
870 static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
871 int mirror_num, unsigned long bio_flags,
874 struct inode *inode = private_data;
875 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
876 int async = check_async_write(BTRFS_I(inode));
879 if (bio_op(bio) != REQ_OP_WRITE) {
881 * called for a read, do the setup so that checksum validation
882 * can happen in the async kernel threads
884 ret = btrfs_bio_wq_end_io(fs_info, bio,
885 BTRFS_WQ_ENDIO_METADATA);
888 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
890 ret = btree_csum_one_bio(bio);
893 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
896 * kthread helpers are used to submit writes so that
897 * checksumming can happen in parallel across all CPUs
899 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
900 bio_offset, private_data,
901 btree_submit_bio_start);
909 bio->bi_status = ret;
914 #ifdef CONFIG_MIGRATION
915 static int btree_migratepage(struct address_space *mapping,
916 struct page *newpage, struct page *page,
917 enum migrate_mode mode)
920 * we can't safely write a btree page from here,
921 * we haven't done the locking hook
926 * Buffers may be managed in a filesystem specific way.
927 * We must have no buffers or drop them.
929 if (page_has_private(page) &&
930 !try_to_release_page(page, GFP_KERNEL))
932 return migrate_page(mapping, newpage, page, mode);
937 static int btree_writepages(struct address_space *mapping,
938 struct writeback_control *wbc)
940 struct btrfs_fs_info *fs_info;
943 if (wbc->sync_mode == WB_SYNC_NONE) {
945 if (wbc->for_kupdate)
948 fs_info = BTRFS_I(mapping->host)->root->fs_info;
949 /* this is a bit racy, but that's ok */
950 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
951 BTRFS_DIRTY_METADATA_THRESH,
952 fs_info->dirty_metadata_batch);
956 return btree_write_cache_pages(mapping, wbc);
959 static int btree_readpage(struct file *file, struct page *page)
961 struct extent_io_tree *tree;
962 tree = &BTRFS_I(page->mapping->host)->io_tree;
963 return extent_read_full_page(tree, page, btree_get_extent, 0);
966 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
968 if (PageWriteback(page) || PageDirty(page))
971 return try_release_extent_buffer(page);
974 static void btree_invalidatepage(struct page *page, unsigned int offset,
977 struct extent_io_tree *tree;
978 tree = &BTRFS_I(page->mapping->host)->io_tree;
979 extent_invalidatepage(tree, page, offset);
980 btree_releasepage(page, GFP_NOFS);
981 if (PagePrivate(page)) {
982 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
983 "page private not zero on page %llu",
984 (unsigned long long)page_offset(page));
985 ClearPagePrivate(page);
986 set_page_private(page, 0);
991 static int btree_set_page_dirty(struct page *page)
994 struct extent_buffer *eb;
996 BUG_ON(!PagePrivate(page));
997 eb = (struct extent_buffer *)page->private;
999 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1000 BUG_ON(!atomic_read(&eb->refs));
1001 btrfs_assert_tree_locked(eb);
1003 return __set_page_dirty_nobuffers(page);
1006 static const struct address_space_operations btree_aops = {
1007 .readpage = btree_readpage,
1008 .writepages = btree_writepages,
1009 .releasepage = btree_releasepage,
1010 .invalidatepage = btree_invalidatepage,
1011 #ifdef CONFIG_MIGRATION
1012 .migratepage = btree_migratepage,
1014 .set_page_dirty = btree_set_page_dirty,
1017 void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
1019 struct extent_buffer *buf = NULL;
1020 struct inode *btree_inode = fs_info->btree_inode;
1022 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1025 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1027 free_extent_buffer(buf);
1030 int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
1031 int mirror_num, struct extent_buffer **eb)
1033 struct extent_buffer *buf = NULL;
1034 struct inode *btree_inode = fs_info->btree_inode;
1035 struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1038 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1042 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1044 ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
1047 free_extent_buffer(buf);
1051 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1052 free_extent_buffer(buf);
1054 } else if (extent_buffer_uptodate(buf)) {
1057 free_extent_buffer(buf);
1062 struct extent_buffer *btrfs_find_create_tree_block(
1063 struct btrfs_fs_info *fs_info,
1066 if (btrfs_is_testing(fs_info))
1067 return alloc_test_extent_buffer(fs_info, bytenr);
1068 return alloc_extent_buffer(fs_info, bytenr);
1072 int btrfs_write_tree_block(struct extent_buffer *buf)
1074 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1075 buf->start + buf->len - 1);
1078 void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1080 filemap_fdatawait_range(buf->pages[0]->mapping,
1081 buf->start, buf->start + buf->len - 1);
1085 * Read tree block at logical address @bytenr and do variant basic but critical
1088 * @parent_transid: expected transid of this tree block, skip check if 0
1089 * @level: expected level, mandatory check
1090 * @first_key: expected key in slot 0, skip check if NULL
1092 struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
1093 u64 parent_transid, int level,
1094 struct btrfs_key *first_key)
1096 struct extent_buffer *buf = NULL;
1099 buf = btrfs_find_create_tree_block(fs_info, bytenr);
1103 ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
1106 free_extent_buffer(buf);
1107 return ERR_PTR(ret);
1113 void clean_tree_block(struct btrfs_fs_info *fs_info,
1114 struct extent_buffer *buf)
1116 if (btrfs_header_generation(buf) ==
1117 fs_info->running_transaction->transid) {
1118 btrfs_assert_tree_locked(buf);
1120 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1121 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1123 fs_info->dirty_metadata_batch);
1124 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1125 btrfs_set_lock_blocking_write(buf);
1126 clear_extent_buffer_dirty(buf);
1131 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1133 struct btrfs_subvolume_writers *writers;
1136 writers = kmalloc(sizeof(*writers), GFP_NOFS);
1138 return ERR_PTR(-ENOMEM);
1140 ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
1143 return ERR_PTR(ret);
1146 init_waitqueue_head(&writers->wait);
1151 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1153 percpu_counter_destroy(&writers->counter);
1157 static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1160 bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1162 root->commit_root = NULL;
1164 root->orphan_cleanup_state = 0;
1166 root->last_trans = 0;
1167 root->highest_objectid = 0;
1168 root->nr_delalloc_inodes = 0;
1169 root->nr_ordered_extents = 0;
1170 root->inode_tree = RB_ROOT;
1171 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1172 root->block_rsv = NULL;
1174 INIT_LIST_HEAD(&root->dirty_list);
1175 INIT_LIST_HEAD(&root->root_list);
1176 INIT_LIST_HEAD(&root->delalloc_inodes);
1177 INIT_LIST_HEAD(&root->delalloc_root);
1178 INIT_LIST_HEAD(&root->ordered_extents);
1179 INIT_LIST_HEAD(&root->ordered_root);
1180 INIT_LIST_HEAD(&root->reloc_dirty_list);
1181 INIT_LIST_HEAD(&root->logged_list[0]);
1182 INIT_LIST_HEAD(&root->logged_list[1]);
1183 spin_lock_init(&root->inode_lock);
1184 spin_lock_init(&root->delalloc_lock);
1185 spin_lock_init(&root->ordered_extent_lock);
1186 spin_lock_init(&root->accounting_lock);
1187 spin_lock_init(&root->log_extents_lock[0]);
1188 spin_lock_init(&root->log_extents_lock[1]);
1189 spin_lock_init(&root->qgroup_meta_rsv_lock);
1190 mutex_init(&root->objectid_mutex);
1191 mutex_init(&root->log_mutex);
1192 mutex_init(&root->ordered_extent_mutex);
1193 mutex_init(&root->delalloc_mutex);
1194 init_waitqueue_head(&root->log_writer_wait);
1195 init_waitqueue_head(&root->log_commit_wait[0]);
1196 init_waitqueue_head(&root->log_commit_wait[1]);
1197 INIT_LIST_HEAD(&root->log_ctxs[0]);
1198 INIT_LIST_HEAD(&root->log_ctxs[1]);
1199 atomic_set(&root->log_commit[0], 0);
1200 atomic_set(&root->log_commit[1], 0);
1201 atomic_set(&root->log_writers, 0);
1202 atomic_set(&root->log_batch, 0);
1203 refcount_set(&root->refs, 1);
1204 atomic_set(&root->will_be_snapshotted, 0);
1205 atomic_set(&root->snapshot_force_cow, 0);
1206 atomic_set(&root->nr_swapfiles, 0);
1207 root->log_transid = 0;
1208 root->log_transid_committed = -1;
1209 root->last_log_commit = 0;
1211 extent_io_tree_init(&root->dirty_log_pages, NULL);
1213 memset(&root->root_key, 0, sizeof(root->root_key));
1214 memset(&root->root_item, 0, sizeof(root->root_item));
1215 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1217 root->defrag_trans_start = fs_info->generation;
1219 root->defrag_trans_start = 0;
1220 root->root_key.objectid = objectid;
1223 spin_lock_init(&root->root_item_lock);
1224 btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks);
1227 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1230 struct btrfs_root *root = kzalloc(sizeof(*root), flags);
1232 root->fs_info = fs_info;
1236 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1237 /* Should only be used by the testing infrastructure */
1238 struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
1240 struct btrfs_root *root;
1243 return ERR_PTR(-EINVAL);
1245 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1247 return ERR_PTR(-ENOMEM);
1249 /* We don't use the stripesize in selftest, set it as sectorsize */
1250 __setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
1251 root->alloc_bytenr = 0;
1257 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1258 struct btrfs_fs_info *fs_info,
1261 struct extent_buffer *leaf;
1262 struct btrfs_root *tree_root = fs_info->tree_root;
1263 struct btrfs_root *root;
1264 struct btrfs_key key;
1265 unsigned int nofs_flag;
1267 uuid_le uuid = NULL_UUID_LE;
1270 * We're holding a transaction handle, so use a NOFS memory allocation
1271 * context to avoid deadlock if reclaim happens.
1273 nofs_flag = memalloc_nofs_save();
1274 root = btrfs_alloc_root(fs_info, GFP_KERNEL);
1275 memalloc_nofs_restore(nofs_flag);
1277 return ERR_PTR(-ENOMEM);
1279 __setup_root(root, fs_info, objectid);
1280 root->root_key.objectid = objectid;
1281 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1282 root->root_key.offset = 0;
1284 leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
1286 ret = PTR_ERR(leaf);
1292 btrfs_mark_buffer_dirty(leaf);
1294 root->commit_root = btrfs_root_node(root);
1295 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
1297 root->root_item.flags = 0;
1298 root->root_item.byte_limit = 0;
1299 btrfs_set_root_bytenr(&root->root_item, leaf->start);
1300 btrfs_set_root_generation(&root->root_item, trans->transid);
1301 btrfs_set_root_level(&root->root_item, 0);
1302 btrfs_set_root_refs(&root->root_item, 1);
1303 btrfs_set_root_used(&root->root_item, leaf->len);
1304 btrfs_set_root_last_snapshot(&root->root_item, 0);
1305 btrfs_set_root_dirid(&root->root_item, 0);
1306 if (is_fstree(objectid))
1308 memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1309 root->root_item.drop_level = 0;
1311 key.objectid = objectid;
1312 key.type = BTRFS_ROOT_ITEM_KEY;
1314 ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1318 btrfs_tree_unlock(leaf);
1324 btrfs_tree_unlock(leaf);
1325 free_extent_buffer(root->commit_root);
1326 free_extent_buffer(leaf);
1330 return ERR_PTR(ret);
1333 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1334 struct btrfs_fs_info *fs_info)
1336 struct btrfs_root *root;
1337 struct extent_buffer *leaf;
1339 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1341 return ERR_PTR(-ENOMEM);
1343 __setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1345 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1346 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1347 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1350 * DON'T set REF_COWS for log trees
1352 * log trees do not get reference counted because they go away
1353 * before a real commit is actually done. They do store pointers
1354 * to file data extents, and those reference counts still get
1355 * updated (along with back refs to the log tree).
1358 leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
1362 return ERR_CAST(leaf);
1367 btrfs_mark_buffer_dirty(root->node);
1368 btrfs_tree_unlock(root->node);
1372 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1373 struct btrfs_fs_info *fs_info)
1375 struct btrfs_root *log_root;
1377 log_root = alloc_log_tree(trans, fs_info);
1378 if (IS_ERR(log_root))
1379 return PTR_ERR(log_root);
1380 WARN_ON(fs_info->log_root_tree);
1381 fs_info->log_root_tree = log_root;
1385 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1386 struct btrfs_root *root)
1388 struct btrfs_fs_info *fs_info = root->fs_info;
1389 struct btrfs_root *log_root;
1390 struct btrfs_inode_item *inode_item;
1392 log_root = alloc_log_tree(trans, fs_info);
1393 if (IS_ERR(log_root))
1394 return PTR_ERR(log_root);
1396 log_root->last_trans = trans->transid;
1397 log_root->root_key.offset = root->root_key.objectid;
1399 inode_item = &log_root->root_item.inode;
1400 btrfs_set_stack_inode_generation(inode_item, 1);
1401 btrfs_set_stack_inode_size(inode_item, 3);
1402 btrfs_set_stack_inode_nlink(inode_item, 1);
1403 btrfs_set_stack_inode_nbytes(inode_item,
1405 btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1407 btrfs_set_root_node(&log_root->root_item, log_root->node);
1409 WARN_ON(root->log_root);
1410 root->log_root = log_root;
1411 root->log_transid = 0;
1412 root->log_transid_committed = -1;
1413 root->last_log_commit = 0;
1417 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1418 struct btrfs_key *key)
1420 struct btrfs_root *root;
1421 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1422 struct btrfs_path *path;
1427 path = btrfs_alloc_path();
1429 return ERR_PTR(-ENOMEM);
1431 root = btrfs_alloc_root(fs_info, GFP_NOFS);
1437 __setup_root(root, fs_info, key->objectid);
1439 ret = btrfs_find_root(tree_root, key, path,
1440 &root->root_item, &root->root_key);
1447 generation = btrfs_root_generation(&root->root_item);
1448 level = btrfs_root_level(&root->root_item);
1449 root->node = read_tree_block(fs_info,
1450 btrfs_root_bytenr(&root->root_item),
1451 generation, level, NULL);
1452 if (IS_ERR(root->node)) {
1453 ret = PTR_ERR(root->node);
1455 } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1457 free_extent_buffer(root->node);
1460 root->commit_root = btrfs_root_node(root);
1462 btrfs_free_path(path);
1468 root = ERR_PTR(ret);
1472 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1473 struct btrfs_key *location)
1475 struct btrfs_root *root;
1477 root = btrfs_read_tree_root(tree_root, location);
1481 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1482 set_bit(BTRFS_ROOT_REF_COWS, &root->state);
1483 btrfs_check_and_init_root_item(&root->root_item);
1489 int btrfs_init_fs_root(struct btrfs_root *root)
1492 struct btrfs_subvolume_writers *writers;
1494 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1495 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1497 if (!root->free_ino_pinned || !root->free_ino_ctl) {
1502 writers = btrfs_alloc_subvolume_writers();
1503 if (IS_ERR(writers)) {
1504 ret = PTR_ERR(writers);
1507 root->subv_writers = writers;
1509 btrfs_init_free_ino_ctl(root);
1510 spin_lock_init(&root->ino_cache_lock);
1511 init_waitqueue_head(&root->ino_cache_wait);
1513 ret = get_anon_bdev(&root->anon_dev);
1517 mutex_lock(&root->objectid_mutex);
1518 ret = btrfs_find_highest_objectid(root,
1519 &root->highest_objectid);
1521 mutex_unlock(&root->objectid_mutex);
1525 ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
1527 mutex_unlock(&root->objectid_mutex);
1531 /* The caller is responsible to call btrfs_free_fs_root */
1535 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1538 struct btrfs_root *root;
1540 spin_lock(&fs_info->fs_roots_radix_lock);
1541 root = radix_tree_lookup(&fs_info->fs_roots_radix,
1542 (unsigned long)root_id);
1543 spin_unlock(&fs_info->fs_roots_radix_lock);
1547 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1548 struct btrfs_root *root)
1552 ret = radix_tree_preload(GFP_NOFS);
1556 spin_lock(&fs_info->fs_roots_radix_lock);
1557 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1558 (unsigned long)root->root_key.objectid,
1561 set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
1562 spin_unlock(&fs_info->fs_roots_radix_lock);
1563 radix_tree_preload_end();
1568 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1569 struct btrfs_key *location,
1572 struct btrfs_root *root;
1573 struct btrfs_path *path;
1574 struct btrfs_key key;
1577 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1578 return fs_info->tree_root;
1579 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1580 return fs_info->extent_root;
1581 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1582 return fs_info->chunk_root;
1583 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1584 return fs_info->dev_root;
1585 if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1586 return fs_info->csum_root;
1587 if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1588 return fs_info->quota_root ? fs_info->quota_root :
1590 if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1591 return fs_info->uuid_root ? fs_info->uuid_root :
1593 if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
1594 return fs_info->free_space_root ? fs_info->free_space_root :
1597 root = btrfs_lookup_fs_root(fs_info, location->objectid);
1599 if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1600 return ERR_PTR(-ENOENT);
1604 root = btrfs_read_fs_root(fs_info->tree_root, location);
1608 if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1613 ret = btrfs_init_fs_root(root);
1617 path = btrfs_alloc_path();
1622 key.objectid = BTRFS_ORPHAN_OBJECTID;
1623 key.type = BTRFS_ORPHAN_ITEM_KEY;
1624 key.offset = location->objectid;
1626 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
1627 btrfs_free_path(path);
1631 set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1633 ret = btrfs_insert_fs_root(fs_info, root);
1635 if (ret == -EEXIST) {
1636 btrfs_free_fs_root(root);
1643 btrfs_free_fs_root(root);
1644 return ERR_PTR(ret);
1647 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1649 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1651 struct btrfs_device *device;
1652 struct backing_dev_info *bdi;
1655 list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1658 bdi = device->bdev->bd_bdi;
1659 if (bdi_congested(bdi, bdi_bits)) {
1669 * called by the kthread helper functions to finally call the bio end_io
1670 * functions. This is where read checksum verification actually happens
1672 static void end_workqueue_fn(struct btrfs_work *work)
1675 struct btrfs_end_io_wq *end_io_wq;
1677 end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
1678 bio = end_io_wq->bio;
1680 bio->bi_status = end_io_wq->status;
1681 bio->bi_private = end_io_wq->private;
1682 bio->bi_end_io = end_io_wq->end_io;
1683 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
1687 static int cleaner_kthread(void *arg)
1689 struct btrfs_root *root = arg;
1690 struct btrfs_fs_info *fs_info = root->fs_info;
1696 set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1698 /* Make the cleaner go to sleep early. */
1699 if (btrfs_need_cleaner_sleep(fs_info))
1703 * Do not do anything if we might cause open_ctree() to block
1704 * before we have finished mounting the filesystem.
1706 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1709 if (!mutex_trylock(&fs_info->cleaner_mutex))
1713 * Avoid the problem that we change the status of the fs
1714 * during the above check and trylock.
1716 if (btrfs_need_cleaner_sleep(fs_info)) {
1717 mutex_unlock(&fs_info->cleaner_mutex);
1721 btrfs_run_delayed_iputs(fs_info);
1723 again = btrfs_clean_one_deleted_snapshot(root);
1724 mutex_unlock(&fs_info->cleaner_mutex);
1727 * The defragger has dealt with the R/O remount and umount,
1728 * needn't do anything special here.
1730 btrfs_run_defrag_inodes(fs_info);
1733 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1734 * with relocation (btrfs_relocate_chunk) and relocation
1735 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1736 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1737 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1738 * unused block groups.
1740 btrfs_delete_unused_bgs(fs_info);
1742 clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1743 if (kthread_should_park())
1745 if (kthread_should_stop())
1748 set_current_state(TASK_INTERRUPTIBLE);
1750 __set_current_state(TASK_RUNNING);
1755 static int transaction_kthread(void *arg)
1757 struct btrfs_root *root = arg;
1758 struct btrfs_fs_info *fs_info = root->fs_info;
1759 struct btrfs_trans_handle *trans;
1760 struct btrfs_transaction *cur;
1763 unsigned long delay;
1767 cannot_commit = false;
1768 delay = HZ * fs_info->commit_interval;
1769 mutex_lock(&fs_info->transaction_kthread_mutex);
1771 spin_lock(&fs_info->trans_lock);
1772 cur = fs_info->running_transaction;
1774 spin_unlock(&fs_info->trans_lock);
1778 now = ktime_get_seconds();
1779 if (cur->state < TRANS_STATE_BLOCKED &&
1780 !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
1781 (now < cur->start_time ||
1782 now - cur->start_time < fs_info->commit_interval)) {
1783 spin_unlock(&fs_info->trans_lock);
1787 transid = cur->transid;
1788 spin_unlock(&fs_info->trans_lock);
1790 /* If the file system is aborted, this will always fail. */
1791 trans = btrfs_attach_transaction(root);
1792 if (IS_ERR(trans)) {
1793 if (PTR_ERR(trans) != -ENOENT)
1794 cannot_commit = true;
1797 if (transid == trans->transid) {
1798 btrfs_commit_transaction(trans);
1800 btrfs_end_transaction(trans);
1803 wake_up_process(fs_info->cleaner_kthread);
1804 mutex_unlock(&fs_info->transaction_kthread_mutex);
1806 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1807 &fs_info->fs_state)))
1808 btrfs_cleanup_transaction(fs_info);
1809 if (!kthread_should_stop() &&
1810 (!btrfs_transaction_blocked(fs_info) ||
1812 schedule_timeout_interruptible(delay);
1813 } while (!kthread_should_stop());
1818 * this will find the highest generation in the array of
1819 * root backups. The index of the highest array is returned,
1820 * or -1 if we can't find anything.
1822 * We check to make sure the array is valid by comparing the
1823 * generation of the latest root in the array with the generation
1824 * in the super block. If they don't match we pitch it.
1826 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1829 int newest_index = -1;
1830 struct btrfs_root_backup *root_backup;
1833 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1834 root_backup = info->super_copy->super_roots + i;
1835 cur = btrfs_backup_tree_root_gen(root_backup);
1836 if (cur == newest_gen)
1840 /* check to see if we actually wrapped around */
1841 if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1842 root_backup = info->super_copy->super_roots;
1843 cur = btrfs_backup_tree_root_gen(root_backup);
1844 if (cur == newest_gen)
1847 return newest_index;
1852 * find the oldest backup so we know where to store new entries
1853 * in the backup array. This will set the backup_root_index
1854 * field in the fs_info struct
1856 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1859 int newest_index = -1;
1861 newest_index = find_newest_super_backup(info, newest_gen);
1862 /* if there was garbage in there, just move along */
1863 if (newest_index == -1) {
1864 info->backup_root_index = 0;
1866 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1871 * copy all the root pointers into the super backup array.
1872 * this will bump the backup pointer by one when it is
1875 static void backup_super_roots(struct btrfs_fs_info *info)
1878 struct btrfs_root_backup *root_backup;
1881 next_backup = info->backup_root_index;
1882 last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1883 BTRFS_NUM_BACKUP_ROOTS;
1886 * just overwrite the last backup if we're at the same generation
1887 * this happens only at umount
1889 root_backup = info->super_for_commit->super_roots + last_backup;
1890 if (btrfs_backup_tree_root_gen(root_backup) ==
1891 btrfs_header_generation(info->tree_root->node))
1892 next_backup = last_backup;
1894 root_backup = info->super_for_commit->super_roots + next_backup;
1897 * make sure all of our padding and empty slots get zero filled
1898 * regardless of which ones we use today
1900 memset(root_backup, 0, sizeof(*root_backup));
1902 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1904 btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1905 btrfs_set_backup_tree_root_gen(root_backup,
1906 btrfs_header_generation(info->tree_root->node));
1908 btrfs_set_backup_tree_root_level(root_backup,
1909 btrfs_header_level(info->tree_root->node));
1911 btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1912 btrfs_set_backup_chunk_root_gen(root_backup,
1913 btrfs_header_generation(info->chunk_root->node));
1914 btrfs_set_backup_chunk_root_level(root_backup,
1915 btrfs_header_level(info->chunk_root->node));
1917 btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1918 btrfs_set_backup_extent_root_gen(root_backup,
1919 btrfs_header_generation(info->extent_root->node));
1920 btrfs_set_backup_extent_root_level(root_backup,
1921 btrfs_header_level(info->extent_root->node));
1924 * we might commit during log recovery, which happens before we set
1925 * the fs_root. Make sure it is valid before we fill it in.
1927 if (info->fs_root && info->fs_root->node) {
1928 btrfs_set_backup_fs_root(root_backup,
1929 info->fs_root->node->start);
1930 btrfs_set_backup_fs_root_gen(root_backup,
1931 btrfs_header_generation(info->fs_root->node));
1932 btrfs_set_backup_fs_root_level(root_backup,
1933 btrfs_header_level(info->fs_root->node));
1936 btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1937 btrfs_set_backup_dev_root_gen(root_backup,
1938 btrfs_header_generation(info->dev_root->node));
1939 btrfs_set_backup_dev_root_level(root_backup,
1940 btrfs_header_level(info->dev_root->node));
1942 btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1943 btrfs_set_backup_csum_root_gen(root_backup,
1944 btrfs_header_generation(info->csum_root->node));
1945 btrfs_set_backup_csum_root_level(root_backup,
1946 btrfs_header_level(info->csum_root->node));
1948 btrfs_set_backup_total_bytes(root_backup,
1949 btrfs_super_total_bytes(info->super_copy));
1950 btrfs_set_backup_bytes_used(root_backup,
1951 btrfs_super_bytes_used(info->super_copy));
1952 btrfs_set_backup_num_devices(root_backup,
1953 btrfs_super_num_devices(info->super_copy));
1956 * if we don't copy this out to the super_copy, it won't get remembered
1957 * for the next commit
1959 memcpy(&info->super_copy->super_roots,
1960 &info->super_for_commit->super_roots,
1961 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1965 * this copies info out of the root backup array and back into
1966 * the in-memory super block. It is meant to help iterate through
1967 * the array, so you send it the number of backups you've already
1968 * tried and the last backup index you used.
1970 * this returns -1 when it has tried all the backups
1972 static noinline int next_root_backup(struct btrfs_fs_info *info,
1973 struct btrfs_super_block *super,
1974 int *num_backups_tried, int *backup_index)
1976 struct btrfs_root_backup *root_backup;
1977 int newest = *backup_index;
1979 if (*num_backups_tried == 0) {
1980 u64 gen = btrfs_super_generation(super);
1982 newest = find_newest_super_backup(info, gen);
1986 *backup_index = newest;
1987 *num_backups_tried = 1;
1988 } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1989 /* we've tried all the backups, all done */
1992 /* jump to the next oldest backup */
1993 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1994 BTRFS_NUM_BACKUP_ROOTS;
1995 *backup_index = newest;
1996 *num_backups_tried += 1;
1998 root_backup = super->super_roots + newest;
2000 btrfs_set_super_generation(super,
2001 btrfs_backup_tree_root_gen(root_backup));
2002 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
2003 btrfs_set_super_root_level(super,
2004 btrfs_backup_tree_root_level(root_backup));
2005 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
2008 * fixme: the total bytes and num_devices need to match or we should
2011 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2012 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2016 /* helper to cleanup workers */
2017 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2019 btrfs_destroy_workqueue(fs_info->fixup_workers);
2020 btrfs_destroy_workqueue(fs_info->delalloc_workers);
2021 btrfs_destroy_workqueue(fs_info->workers);
2022 btrfs_destroy_workqueue(fs_info->endio_workers);
2023 btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2024 btrfs_destroy_workqueue(fs_info->endio_repair_workers);
2025 btrfs_destroy_workqueue(fs_info->rmw_workers);
2026 btrfs_destroy_workqueue(fs_info->endio_write_workers);
2027 btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2028 btrfs_destroy_workqueue(fs_info->submit_workers);
2029 btrfs_destroy_workqueue(fs_info->delayed_workers);
2030 btrfs_destroy_workqueue(fs_info->caching_workers);
2031 btrfs_destroy_workqueue(fs_info->readahead_workers);
2032 btrfs_destroy_workqueue(fs_info->flush_workers);
2033 btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2034 btrfs_destroy_workqueue(fs_info->extent_workers);
2036 * Now that all other work queues are destroyed, we can safely destroy
2037 * the queues used for metadata I/O, since tasks from those other work
2038 * queues can do metadata I/O operations.
2040 btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2041 btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2044 static void free_root_extent_buffers(struct btrfs_root *root)
2047 free_extent_buffer(root->node);
2048 free_extent_buffer(root->commit_root);
2050 root->commit_root = NULL;
2054 /* helper to cleanup tree roots */
2055 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2057 free_root_extent_buffers(info->tree_root);
2059 free_root_extent_buffers(info->dev_root);
2060 free_root_extent_buffers(info->extent_root);
2061 free_root_extent_buffers(info->csum_root);
2062 free_root_extent_buffers(info->quota_root);
2063 free_root_extent_buffers(info->uuid_root);
2065 free_root_extent_buffers(info->chunk_root);
2066 free_root_extent_buffers(info->free_space_root);
2069 void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
2072 struct btrfs_root *gang[8];
2075 while (!list_empty(&fs_info->dead_roots)) {
2076 gang[0] = list_entry(fs_info->dead_roots.next,
2077 struct btrfs_root, root_list);
2078 list_del(&gang[0]->root_list);
2080 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
2081 btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2083 free_extent_buffer(gang[0]->node);
2084 free_extent_buffer(gang[0]->commit_root);
2085 btrfs_put_fs_root(gang[0]);
2090 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2095 for (i = 0; i < ret; i++)
2096 btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2099 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2100 btrfs_free_log_root_tree(NULL, fs_info);
2101 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
2105 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
2107 mutex_init(&fs_info->scrub_lock);
2108 atomic_set(&fs_info->scrubs_running, 0);
2109 atomic_set(&fs_info->scrub_pause_req, 0);
2110 atomic_set(&fs_info->scrubs_paused, 0);
2111 atomic_set(&fs_info->scrub_cancel_req, 0);
2112 init_waitqueue_head(&fs_info->scrub_pause_wait);
2113 refcount_set(&fs_info->scrub_workers_refcnt, 0);
2116 static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
2118 spin_lock_init(&fs_info->balance_lock);
2119 mutex_init(&fs_info->balance_mutex);
2120 atomic_set(&fs_info->balance_pause_req, 0);
2121 atomic_set(&fs_info->balance_cancel_req, 0);
2122 fs_info->balance_ctl = NULL;
2123 init_waitqueue_head(&fs_info->balance_wait_q);
2126 static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
2128 struct inode *inode = fs_info->btree_inode;
2130 inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2131 set_nlink(inode, 1);
2133 * we set the i_size on the btree inode to the max possible int.
2134 * the real end of the address space is determined by all of
2135 * the devices in the system
2137 inode->i_size = OFFSET_MAX;
2138 inode->i_mapping->a_ops = &btree_aops;
2140 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
2141 extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode);
2142 BTRFS_I(inode)->io_tree.track_uptodate = 0;
2143 extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
2145 BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
2147 BTRFS_I(inode)->root = fs_info->tree_root;
2148 memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
2149 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
2150 btrfs_insert_inode_hash(inode);
2153 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
2155 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2156 init_rwsem(&fs_info->dev_replace.rwsem);
2157 init_waitqueue_head(&fs_info->dev_replace.replace_wait);
2160 static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
2162 spin_lock_init(&fs_info->qgroup_lock);
2163 mutex_init(&fs_info->qgroup_ioctl_lock);
2164 fs_info->qgroup_tree = RB_ROOT;
2165 fs_info->qgroup_op_tree = RB_ROOT;
2166 INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2167 fs_info->qgroup_seq = 1;
2168 fs_info->qgroup_ulist = NULL;
2169 fs_info->qgroup_rescan_running = false;
2170 mutex_init(&fs_info->qgroup_rescan_lock);
2173 static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
2174 struct btrfs_fs_devices *fs_devices)
2176 u32 max_active = fs_info->thread_pool_size;
2177 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2180 btrfs_alloc_workqueue(fs_info, "worker",
2181 flags | WQ_HIGHPRI, max_active, 16);
2183 fs_info->delalloc_workers =
2184 btrfs_alloc_workqueue(fs_info, "delalloc",
2185 flags, max_active, 2);
2187 fs_info->flush_workers =
2188 btrfs_alloc_workqueue(fs_info, "flush_delalloc",
2189 flags, max_active, 0);
2191 fs_info->caching_workers =
2192 btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
2195 * a higher idle thresh on the submit workers makes it much more
2196 * likely that bios will be send down in a sane order to the
2199 fs_info->submit_workers =
2200 btrfs_alloc_workqueue(fs_info, "submit", flags,
2201 min_t(u64, fs_devices->num_devices,
2204 fs_info->fixup_workers =
2205 btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
2208 * endios are largely parallel and should have a very
2211 fs_info->endio_workers =
2212 btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
2213 fs_info->endio_meta_workers =
2214 btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
2216 fs_info->endio_meta_write_workers =
2217 btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
2219 fs_info->endio_raid56_workers =
2220 btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
2222 fs_info->endio_repair_workers =
2223 btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
2224 fs_info->rmw_workers =
2225 btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
2226 fs_info->endio_write_workers =
2227 btrfs_alloc_workqueue(fs_info, "endio-write", flags,
2229 fs_info->endio_freespace_worker =
2230 btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
2232 fs_info->delayed_workers =
2233 btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
2235 fs_info->readahead_workers =
2236 btrfs_alloc_workqueue(fs_info, "readahead", flags,
2238 fs_info->qgroup_rescan_workers =
2239 btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
2240 fs_info->extent_workers =
2241 btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
2242 min_t(u64, fs_devices->num_devices,
2245 if (!(fs_info->workers && fs_info->delalloc_workers &&
2246 fs_info->submit_workers && fs_info->flush_workers &&
2247 fs_info->endio_workers && fs_info->endio_meta_workers &&
2248 fs_info->endio_meta_write_workers &&
2249 fs_info->endio_repair_workers &&
2250 fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2251 fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2252 fs_info->caching_workers && fs_info->readahead_workers &&
2253 fs_info->fixup_workers && fs_info->delayed_workers &&
2254 fs_info->extent_workers &&
2255 fs_info->qgroup_rescan_workers)) {
2262 static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2263 struct btrfs_fs_devices *fs_devices)
2266 struct btrfs_root *log_tree_root;
2267 struct btrfs_super_block *disk_super = fs_info->super_copy;
2268 u64 bytenr = btrfs_super_log_root(disk_super);
2269 int level = btrfs_super_log_root_level(disk_super);
2271 if (fs_devices->rw_devices == 0) {
2272 btrfs_warn(fs_info, "log replay required on RO media");
2276 log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2280 __setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2282 log_tree_root->node = read_tree_block(fs_info, bytenr,
2283 fs_info->generation + 1,
2285 if (IS_ERR(log_tree_root->node)) {
2286 btrfs_warn(fs_info, "failed to read log tree");
2287 ret = PTR_ERR(log_tree_root->node);
2288 kfree(log_tree_root);
2290 } else if (!extent_buffer_uptodate(log_tree_root->node)) {
2291 btrfs_err(fs_info, "failed to read log tree");
2292 free_extent_buffer(log_tree_root->node);
2293 kfree(log_tree_root);
2296 /* returns with log_tree_root freed on success */
2297 ret = btrfs_recover_log_trees(log_tree_root);
2299 btrfs_handle_fs_error(fs_info, ret,
2300 "Failed to recover log tree");
2301 free_extent_buffer(log_tree_root->node);
2302 kfree(log_tree_root);
2306 if (sb_rdonly(fs_info->sb)) {
2307 ret = btrfs_commit_super(fs_info);
2315 static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2317 struct btrfs_root *tree_root = fs_info->tree_root;
2318 struct btrfs_root *root;
2319 struct btrfs_key location;
2322 BUG_ON(!fs_info->tree_root);
2324 location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2325 location.type = BTRFS_ROOT_ITEM_KEY;
2326 location.offset = 0;
2328 root = btrfs_read_tree_root(tree_root, &location);
2330 ret = PTR_ERR(root);
2333 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2334 fs_info->extent_root = root;
2336 location.objectid = BTRFS_DEV_TREE_OBJECTID;
2337 root = btrfs_read_tree_root(tree_root, &location);
2339 ret = PTR_ERR(root);
2342 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2343 fs_info->dev_root = root;
2344 btrfs_init_devices_late(fs_info);
2346 location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2347 root = btrfs_read_tree_root(tree_root, &location);
2349 ret = PTR_ERR(root);
2352 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2353 fs_info->csum_root = root;
2355 location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2356 root = btrfs_read_tree_root(tree_root, &location);
2357 if (!IS_ERR(root)) {
2358 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2359 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2360 fs_info->quota_root = root;
2363 location.objectid = BTRFS_UUID_TREE_OBJECTID;
2364 root = btrfs_read_tree_root(tree_root, &location);
2366 ret = PTR_ERR(root);
2370 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2371 fs_info->uuid_root = root;
2374 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2375 location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID;
2376 root = btrfs_read_tree_root(tree_root, &location);
2378 ret = PTR_ERR(root);
2381 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
2382 fs_info->free_space_root = root;
2387 btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2388 location.objectid, ret);
2393 * Real super block validation
2394 * NOTE: super csum type and incompat features will not be checked here.
2396 * @sb: super block to check
2397 * @mirror_num: the super block number to check its bytenr:
2398 * 0 the primary (1st) sb
2399 * 1, 2 2nd and 3rd backup copy
2400 * -1 skip bytenr check
2402 static int validate_super(struct btrfs_fs_info *fs_info,
2403 struct btrfs_super_block *sb, int mirror_num)
2405 u64 nodesize = btrfs_super_nodesize(sb);
2406 u64 sectorsize = btrfs_super_sectorsize(sb);
2409 if (btrfs_super_magic(sb) != BTRFS_MAGIC) {
2410 btrfs_err(fs_info, "no valid FS found");
2413 if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2414 btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2415 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2418 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
2419 btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2420 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2423 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
2424 btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2425 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2428 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
2429 btrfs_err(fs_info, "log_root level too big: %d >= %d",
2430 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2435 * Check sectorsize and nodesize first, other check will need it.
2436 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2438 if (!is_power_of_2(sectorsize) || sectorsize < 4096 ||
2439 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2440 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2443 /* Only PAGE SIZE is supported yet */
2444 if (sectorsize != PAGE_SIZE) {
2446 "sectorsize %llu not supported yet, only support %lu",
2447 sectorsize, PAGE_SIZE);
2450 if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
2451 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2452 btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2455 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2456 btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2457 le32_to_cpu(sb->__unused_leafsize), nodesize);
2461 /* Root alignment check */
2462 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2463 btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2464 btrfs_super_root(sb));
2467 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2468 btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2469 btrfs_super_chunk_root(sb));
2472 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2473 btrfs_warn(fs_info, "log_root block unaligned: %llu",
2474 btrfs_super_log_root(sb));
2478 if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
2479 BTRFS_FSID_SIZE) != 0) {
2481 "dev_item UUID does not match metadata fsid: %pU != %pU",
2482 fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2487 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2490 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
2491 btrfs_err(fs_info, "bytes_used is too small %llu",
2492 btrfs_super_bytes_used(sb));
2495 if (!is_power_of_2(btrfs_super_stripesize(sb))) {
2496 btrfs_err(fs_info, "invalid stripesize %u",
2497 btrfs_super_stripesize(sb));
2500 if (btrfs_super_num_devices(sb) > (1UL << 31))
2501 btrfs_warn(fs_info, "suspicious number of devices: %llu",
2502 btrfs_super_num_devices(sb));
2503 if (btrfs_super_num_devices(sb) == 0) {
2504 btrfs_err(fs_info, "number of devices is 0");
2508 if (mirror_num >= 0 &&
2509 btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) {
2510 btrfs_err(fs_info, "super offset mismatch %llu != %u",
2511 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2516 * Obvious sys_chunk_array corruptions, it must hold at least one key
2519 if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2520 btrfs_err(fs_info, "system chunk array too big %u > %u",
2521 btrfs_super_sys_array_size(sb),
2522 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2525 if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
2526 + sizeof(struct btrfs_chunk)) {
2527 btrfs_err(fs_info, "system chunk array too small %u < %zu",
2528 btrfs_super_sys_array_size(sb),
2529 sizeof(struct btrfs_disk_key)
2530 + sizeof(struct btrfs_chunk));
2535 * The generation is a global counter, we'll trust it more than the others
2536 * but it's still possible that it's the one that's wrong.
2538 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
2540 "suspicious: generation < chunk_root_generation: %llu < %llu",
2541 btrfs_super_generation(sb),
2542 btrfs_super_chunk_root_generation(sb));
2543 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
2544 && btrfs_super_cache_generation(sb) != (u64)-1)
2546 "suspicious: generation < cache_generation: %llu < %llu",
2547 btrfs_super_generation(sb),
2548 btrfs_super_cache_generation(sb));
2554 * Validation of super block at mount time.
2555 * Some checks already done early at mount time, like csum type and incompat
2556 * flags will be skipped.
2558 static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2560 return validate_super(fs_info, fs_info->super_copy, 0);
2564 * Validation of super block at write time.
2565 * Some checks like bytenr check will be skipped as their values will be
2567 * Extra checks like csum type and incompat flags will be done here.
2569 static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2570 struct btrfs_super_block *sb)
2574 ret = validate_super(fs_info, sb, -1);
2577 if (btrfs_super_csum_type(sb) != BTRFS_CSUM_TYPE_CRC32) {
2579 btrfs_err(fs_info, "invalid csum type, has %u want %u",
2580 btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2583 if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2586 "invalid incompat flags, has 0x%llx valid mask 0x%llx",
2587 btrfs_super_incompat_flags(sb),
2588 (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2594 "super block corruption detected before writing it to disk");
2598 int open_ctree(struct super_block *sb,
2599 struct btrfs_fs_devices *fs_devices,
2607 struct btrfs_key location;
2608 struct buffer_head *bh;
2609 struct btrfs_super_block *disk_super;
2610 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2611 struct btrfs_root *tree_root;
2612 struct btrfs_root *chunk_root;
2615 int num_backups_tried = 0;
2616 int backup_index = 0;
2617 int clear_free_space_tree = 0;
2620 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2621 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2622 if (!tree_root || !chunk_root) {
2627 ret = init_srcu_struct(&fs_info->subvol_srcu);
2633 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2638 fs_info->dirty_metadata_batch = PAGE_SIZE *
2639 (1 + ilog2(nr_cpu_ids));
2641 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2644 goto fail_dirty_metadata_bytes;
2647 ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2651 goto fail_delalloc_bytes;
2654 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2655 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2656 INIT_LIST_HEAD(&fs_info->trans_list);
2657 INIT_LIST_HEAD(&fs_info->dead_roots);
2658 INIT_LIST_HEAD(&fs_info->delayed_iputs);
2659 INIT_LIST_HEAD(&fs_info->delalloc_roots);
2660 INIT_LIST_HEAD(&fs_info->caching_block_groups);
2661 INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
2662 spin_lock_init(&fs_info->pending_raid_kobjs_lock);
2663 spin_lock_init(&fs_info->delalloc_root_lock);
2664 spin_lock_init(&fs_info->trans_lock);
2665 spin_lock_init(&fs_info->fs_roots_radix_lock);
2666 spin_lock_init(&fs_info->delayed_iput_lock);
2667 spin_lock_init(&fs_info->defrag_inodes_lock);
2668 spin_lock_init(&fs_info->tree_mod_seq_lock);
2669 spin_lock_init(&fs_info->super_lock);
2670 spin_lock_init(&fs_info->qgroup_op_lock);
2671 spin_lock_init(&fs_info->buffer_lock);
2672 spin_lock_init(&fs_info->unused_bgs_lock);
2673 rwlock_init(&fs_info->tree_mod_log_lock);
2674 mutex_init(&fs_info->unused_bg_unpin_mutex);
2675 mutex_init(&fs_info->delete_unused_bgs_mutex);
2676 mutex_init(&fs_info->reloc_mutex);
2677 mutex_init(&fs_info->delalloc_root_mutex);
2678 seqlock_init(&fs_info->profiles_lock);
2680 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2681 INIT_LIST_HEAD(&fs_info->space_info);
2682 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2683 INIT_LIST_HEAD(&fs_info->unused_bgs);
2684 btrfs_mapping_init(&fs_info->mapping_tree);
2685 btrfs_init_block_rsv(&fs_info->global_block_rsv,
2686 BTRFS_BLOCK_RSV_GLOBAL);
2687 btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2688 btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2689 btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2690 btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2691 BTRFS_BLOCK_RSV_DELOPS);
2692 btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
2693 BTRFS_BLOCK_RSV_DELREFS);
2695 atomic_set(&fs_info->async_delalloc_pages, 0);
2696 atomic_set(&fs_info->defrag_running, 0);
2697 atomic_set(&fs_info->qgroup_op_seq, 0);
2698 atomic_set(&fs_info->reada_works_cnt, 0);
2699 atomic_set(&fs_info->nr_delayed_iputs, 0);
2700 atomic64_set(&fs_info->tree_mod_seq, 0);
2702 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2703 fs_info->metadata_ratio = 0;
2704 fs_info->defrag_inodes = RB_ROOT;
2705 atomic64_set(&fs_info->free_chunk_space, 0);
2706 fs_info->tree_mod_log = RB_ROOT;
2707 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2708 fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
2709 /* readahead state */
2710 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
2711 spin_lock_init(&fs_info->reada_lock);
2712 btrfs_init_ref_verify(fs_info);
2714 fs_info->thread_pool_size = min_t(unsigned long,
2715 num_online_cpus() + 2, 8);
2717 INIT_LIST_HEAD(&fs_info->ordered_roots);
2718 spin_lock_init(&fs_info->ordered_root_lock);
2720 fs_info->btree_inode = new_inode(sb);
2721 if (!fs_info->btree_inode) {
2723 goto fail_bio_counter;
2725 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2727 fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2729 if (!fs_info->delayed_root) {
2733 btrfs_init_delayed_root(fs_info->delayed_root);
2735 btrfs_init_scrub(fs_info);
2736 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2737 fs_info->check_integrity_print_mask = 0;
2739 btrfs_init_balance(fs_info);
2740 btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
2742 sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2743 sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2745 btrfs_init_btree_inode(fs_info);
2747 spin_lock_init(&fs_info->block_group_cache_lock);
2748 fs_info->block_group_cache_tree = RB_ROOT;
2749 fs_info->first_logical_byte = (u64)-1;
2751 extent_io_tree_init(&fs_info->freed_extents[0], NULL);
2752 extent_io_tree_init(&fs_info->freed_extents[1], NULL);
2753 fs_info->pinned_extents = &fs_info->freed_extents[0];
2754 set_bit(BTRFS_FS_BARRIER, &fs_info->flags);
2756 mutex_init(&fs_info->ordered_operations_mutex);
2757 mutex_init(&fs_info->tree_log_mutex);
2758 mutex_init(&fs_info->chunk_mutex);
2759 mutex_init(&fs_info->transaction_kthread_mutex);
2760 mutex_init(&fs_info->cleaner_mutex);
2761 mutex_init(&fs_info->ro_block_group_mutex);
2762 init_rwsem(&fs_info->commit_root_sem);
2763 init_rwsem(&fs_info->cleanup_work_sem);
2764 init_rwsem(&fs_info->subvol_sem);
2765 sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2767 btrfs_init_dev_replace_locks(fs_info);
2768 btrfs_init_qgroup(fs_info);
2770 btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2771 btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2773 init_waitqueue_head(&fs_info->transaction_throttle);
2774 init_waitqueue_head(&fs_info->transaction_wait);
2775 init_waitqueue_head(&fs_info->transaction_blocked_wait);
2776 init_waitqueue_head(&fs_info->async_submit_wait);
2777 init_waitqueue_head(&fs_info->delayed_iputs_wait);
2779 INIT_LIST_HEAD(&fs_info->pinned_chunks);
2781 /* Usable values until the real ones are cached from the superblock */
2782 fs_info->nodesize = 4096;
2783 fs_info->sectorsize = 4096;
2784 fs_info->stripesize = 4096;
2786 spin_lock_init(&fs_info->swapfile_pins_lock);
2787 fs_info->swapfile_pins = RB_ROOT;
2789 ret = btrfs_alloc_stripe_hash_table(fs_info);
2795 __setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
2797 invalidate_bdev(fs_devices->latest_bdev);
2800 * Read super block and check the signature bytes only
2802 bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2809 * We want to check superblock checksum, the type is stored inside.
2810 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2812 if (btrfs_check_super_csum(fs_info, bh->b_data)) {
2813 btrfs_err(fs_info, "superblock checksum mismatch");
2820 * super_copy is zeroed at allocation time and we never touch the
2821 * following bytes up to INFO_SIZE, the checksum is calculated from
2822 * the whole block of INFO_SIZE
2824 memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2827 disk_super = fs_info->super_copy;
2829 ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
2832 if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
2833 ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
2834 fs_info->super_copy->metadata_uuid,
2838 features = btrfs_super_flags(disk_super);
2839 if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
2840 features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2;
2841 btrfs_set_super_flags(disk_super, features);
2843 "found metadata UUID change in progress flag, clearing");
2846 memcpy(fs_info->super_for_commit, fs_info->super_copy,
2847 sizeof(*fs_info->super_for_commit));
2849 ret = btrfs_validate_mount_super(fs_info);
2851 btrfs_err(fs_info, "superblock contains fatal errors");
2856 if (!btrfs_super_root(disk_super))
2859 /* check FS state, whether FS is broken. */
2860 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2861 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2864 * run through our array of backup supers and setup
2865 * our ring pointer to the oldest one
2867 generation = btrfs_super_generation(disk_super);
2868 find_oldest_super_backup(fs_info, generation);
2871 * In the long term, we'll store the compression type in the super
2872 * block, and it'll be used for per file compression control.
2874 fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2876 ret = btrfs_parse_options(fs_info, options, sb->s_flags);
2882 features = btrfs_super_incompat_flags(disk_super) &
2883 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2886 "cannot mount because of unsupported optional features (%llx)",
2892 features = btrfs_super_incompat_flags(disk_super);
2893 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2894 if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
2895 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2896 else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
2897 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
2899 if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2900 btrfs_info(fs_info, "has skinny extents");
2903 * flag our filesystem as having big metadata blocks if
2904 * they are bigger than the page size
2906 if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
2907 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2909 "flagging fs with big metadata feature");
2910 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2913 nodesize = btrfs_super_nodesize(disk_super);
2914 sectorsize = btrfs_super_sectorsize(disk_super);
2915 stripesize = sectorsize;
2916 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2917 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2919 /* Cache block sizes */
2920 fs_info->nodesize = nodesize;
2921 fs_info->sectorsize = sectorsize;
2922 fs_info->stripesize = stripesize;
2925 * mixed block groups end up with duplicate but slightly offset
2926 * extent buffers for the same range. It leads to corruptions
2928 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2929 (sectorsize != nodesize)) {
2931 "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
2932 nodesize, sectorsize);
2937 * Needn't use the lock because there is no other task which will
2940 btrfs_set_super_incompat_flags(disk_super, features);
2942 features = btrfs_super_compat_ro_flags(disk_super) &
2943 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2944 if (!sb_rdonly(sb) && features) {
2946 "cannot mount read-write because of unsupported optional features (%llx)",
2952 ret = btrfs_init_workqueues(fs_info, fs_devices);
2955 goto fail_sb_buffer;
2958 sb->s_bdi->congested_fn = btrfs_congested_fn;
2959 sb->s_bdi->congested_data = fs_info;
2960 sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
2961 sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
2962 sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
2963 sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
2965 sb->s_blocksize = sectorsize;
2966 sb->s_blocksize_bits = blksize_bits(sectorsize);
2967 memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
2969 mutex_lock(&fs_info->chunk_mutex);
2970 ret = btrfs_read_sys_array(fs_info);
2971 mutex_unlock(&fs_info->chunk_mutex);
2973 btrfs_err(fs_info, "failed to read the system array: %d", ret);
2974 goto fail_sb_buffer;
2977 generation = btrfs_super_chunk_root_generation(disk_super);
2978 level = btrfs_super_chunk_root_level(disk_super);
2980 __setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2982 chunk_root->node = read_tree_block(fs_info,
2983 btrfs_super_chunk_root(disk_super),
2984 generation, level, NULL);
2985 if (IS_ERR(chunk_root->node) ||
2986 !extent_buffer_uptodate(chunk_root->node)) {
2987 btrfs_err(fs_info, "failed to read chunk root");
2988 if (!IS_ERR(chunk_root->node))
2989 free_extent_buffer(chunk_root->node);
2990 chunk_root->node = NULL;
2991 goto fail_tree_roots;
2993 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2994 chunk_root->commit_root = btrfs_root_node(chunk_root);
2996 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2997 btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2999 ret = btrfs_read_chunk_tree(fs_info);
3001 btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3002 goto fail_tree_roots;
3006 * Keep the devid that is marked to be the target device for the
3007 * device replace procedure
3009 btrfs_free_extra_devids(fs_devices, 0);
3011 if (!fs_devices->latest_bdev) {
3012 btrfs_err(fs_info, "failed to read devices");
3013 goto fail_tree_roots;
3017 generation = btrfs_super_generation(disk_super);
3018 level = btrfs_super_root_level(disk_super);
3020 tree_root->node = read_tree_block(fs_info,
3021 btrfs_super_root(disk_super),
3022 generation, level, NULL);
3023 if (IS_ERR(tree_root->node) ||
3024 !extent_buffer_uptodate(tree_root->node)) {
3025 btrfs_warn(fs_info, "failed to read tree root");
3026 if (!IS_ERR(tree_root->node))
3027 free_extent_buffer(tree_root->node);
3028 tree_root->node = NULL;
3029 goto recovery_tree_root;
3032 btrfs_set_root_node(&tree_root->root_item, tree_root->node);
3033 tree_root->commit_root = btrfs_root_node(tree_root);
3034 btrfs_set_root_refs(&tree_root->root_item, 1);
3036 mutex_lock(&tree_root->objectid_mutex);
3037 ret = btrfs_find_highest_objectid(tree_root,
3038 &tree_root->highest_objectid);
3040 mutex_unlock(&tree_root->objectid_mutex);
3041 goto recovery_tree_root;
3044 ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
3046 mutex_unlock(&tree_root->objectid_mutex);
3048 ret = btrfs_read_roots(fs_info);
3050 goto recovery_tree_root;
3052 fs_info->generation = generation;
3053 fs_info->last_trans_committed = generation;
3055 ret = btrfs_verify_dev_extents(fs_info);
3058 "failed to verify dev extents against chunks: %d",
3060 goto fail_block_groups;
3062 ret = btrfs_recover_balance(fs_info);
3064 btrfs_err(fs_info, "failed to recover balance: %d", ret);
3065 goto fail_block_groups;
3068 ret = btrfs_init_dev_stats(fs_info);
3070 btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3071 goto fail_block_groups;
3074 ret = btrfs_init_dev_replace(fs_info);
3076 btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3077 goto fail_block_groups;
3080 btrfs_free_extra_devids(fs_devices, 1);
3082 ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
3084 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3086 goto fail_block_groups;
3089 ret = btrfs_sysfs_add_device(fs_devices);
3091 btrfs_err(fs_info, "failed to init sysfs device interface: %d",
3093 goto fail_fsdev_sysfs;
3096 ret = btrfs_sysfs_add_mounted(fs_info);
3098 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3099 goto fail_fsdev_sysfs;
3102 ret = btrfs_init_space_info(fs_info);
3104 btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3108 ret = btrfs_read_block_groups(fs_info);
3110 btrfs_err(fs_info, "failed to read block groups: %d", ret);
3114 if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
3116 "writable mount is not allowed due to too many missing devices");
3120 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3122 if (IS_ERR(fs_info->cleaner_kthread))
3125 fs_info->transaction_kthread = kthread_run(transaction_kthread,
3127 "btrfs-transaction");
3128 if (IS_ERR(fs_info->transaction_kthread))
3131 if (!btrfs_test_opt(fs_info, NOSSD) &&
3132 !fs_info->fs_devices->rotating) {
3133 btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
3137 * Mount does not set all options immediately, we can do it now and do
3138 * not have to wait for transaction commit
3140 btrfs_apply_pending_changes(fs_info);
3142 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3143 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
3144 ret = btrfsic_mount(fs_info, fs_devices,
3145 btrfs_test_opt(fs_info,
3146 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
3148 fs_info->check_integrity_print_mask);
3151 "failed to initialize integrity check module: %d",
3155 ret = btrfs_read_qgroup_config(fs_info);
3157 goto fail_trans_kthread;
3159 if (btrfs_build_ref_tree(fs_info))
3160 btrfs_err(fs_info, "couldn't build ref tree");
3162 /* do not make disk changes in broken FS or nologreplay is given */
3163 if (btrfs_super_log_root(disk_super) != 0 &&
3164 !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3165 ret = btrfs_replay_log(fs_info, fs_devices);
3172 ret = btrfs_find_orphan_roots(fs_info);
3176 if (!sb_rdonly(sb)) {
3177 ret = btrfs_cleanup_fs_roots(fs_info);
3181 mutex_lock(&fs_info->cleaner_mutex);
3182 ret = btrfs_recover_relocation(tree_root);
3183 mutex_unlock(&fs_info->cleaner_mutex);
3185 btrfs_warn(fs_info, "failed to recover relocation: %d",
3192 location.objectid = BTRFS_FS_TREE_OBJECTID;
3193 location.type = BTRFS_ROOT_ITEM_KEY;
3194 location.offset = 0;
3196 fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
3197 if (IS_ERR(fs_info->fs_root)) {
3198 err = PTR_ERR(fs_info->fs_root);
3199 btrfs_warn(fs_info, "failed to read fs tree: %d", err);
3206 if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
3207 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3208 clear_free_space_tree = 1;
3209 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
3210 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
3211 btrfs_warn(fs_info, "free space tree is invalid");
3212 clear_free_space_tree = 1;
3215 if (clear_free_space_tree) {
3216 btrfs_info(fs_info, "clearing free space tree");
3217 ret = btrfs_clear_free_space_tree(fs_info);
3220 "failed to clear free space tree: %d", ret);
3221 close_ctree(fs_info);
3226 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3227 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3228 btrfs_info(fs_info, "creating free space tree");
3229 ret = btrfs_create_free_space_tree(fs_info);
3232 "failed to create free space tree: %d", ret);
3233 close_ctree(fs_info);
3238 down_read(&fs_info->cleanup_work_sem);
3239 if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
3240 (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
3241 up_read(&fs_info->cleanup_work_sem);
3242 close_ctree(fs_info);
3245 up_read(&fs_info->cleanup_work_sem);
3247 ret = btrfs_resume_balance_async(fs_info);
3249 btrfs_warn(fs_info, "failed to resume balance: %d", ret);
3250 close_ctree(fs_info);
3254 ret = btrfs_resume_dev_replace_async(fs_info);
3256 btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
3257 close_ctree(fs_info);
3261 btrfs_qgroup_rescan_resume(fs_info);
3263 if (!fs_info->uuid_root) {
3264 btrfs_info(fs_info, "creating UUID tree");
3265 ret = btrfs_create_uuid_tree(fs_info);
3268 "failed to create the UUID tree: %d", ret);
3269 close_ctree(fs_info);
3272 } else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3273 fs_info->generation !=
3274 btrfs_super_uuid_tree_generation(disk_super)) {
3275 btrfs_info(fs_info, "checking UUID tree");
3276 ret = btrfs_check_uuid_tree(fs_info);
3279 "failed to check the UUID tree: %d", ret);
3280 close_ctree(fs_info);
3284 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
3286 set_bit(BTRFS_FS_OPEN, &fs_info->flags);
3289 * backuproot only affect mount behavior, and if open_ctree succeeded,
3290 * no need to keep the flag
3292 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
3297 btrfs_free_qgroup_config(fs_info);
3299 kthread_stop(fs_info->transaction_kthread);
3300 btrfs_cleanup_transaction(fs_info);
3301 btrfs_free_fs_roots(fs_info);
3303 kthread_stop(fs_info->cleaner_kthread);
3306 * make sure we're done with the btree inode before we stop our
3309 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3312 btrfs_sysfs_remove_mounted(fs_info);
3315 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
3318 btrfs_put_block_group_cache(fs_info);
3321 free_root_pointers(fs_info, 1);
3322 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3325 btrfs_stop_all_workers(fs_info);
3326 btrfs_free_block_groups(fs_info);
3329 btrfs_mapping_tree_free(&fs_info->mapping_tree);
3331 iput(fs_info->btree_inode);
3333 percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
3334 fail_delalloc_bytes:
3335 percpu_counter_destroy(&fs_info->delalloc_bytes);
3336 fail_dirty_metadata_bytes:
3337 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3339 cleanup_srcu_struct(&fs_info->subvol_srcu);
3341 btrfs_free_stripe_hash_table(fs_info);
3342 btrfs_close_devices(fs_info->fs_devices);
3346 if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
3347 goto fail_tree_roots;
3349 free_root_pointers(fs_info, 0);
3351 /* don't use the log in recovery mode, it won't be valid */
3352 btrfs_set_super_log_root(disk_super, 0);
3354 /* we can't trust the free space cache either */
3355 btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3357 ret = next_root_backup(fs_info, fs_info->super_copy,
3358 &num_backups_tried, &backup_index);
3360 goto fail_block_groups;
3361 goto retry_root_backup;
3363 ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3365 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3368 set_buffer_uptodate(bh);
3370 struct btrfs_device *device = (struct btrfs_device *)
3373 btrfs_warn_rl_in_rcu(device->fs_info,
3374 "lost page write due to IO error on %s",
3375 rcu_str_deref(device->name));
3376 /* note, we don't set_buffer_write_io_error because we have
3377 * our own ways of dealing with the IO errors
3379 clear_buffer_uptodate(bh);
3380 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3386 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
3387 struct buffer_head **bh_ret)
3389 struct buffer_head *bh;
3390 struct btrfs_super_block *super;
3393 bytenr = btrfs_sb_offset(copy_num);
3394 if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
3397 bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE);
3399 * If we fail to read from the underlying devices, as of now
3400 * the best option we have is to mark it EIO.
3405 super = (struct btrfs_super_block *)bh->b_data;
3406 if (btrfs_super_bytenr(super) != bytenr ||
3407 btrfs_super_magic(super) != BTRFS_MAGIC) {
3417 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3419 struct buffer_head *bh;
3420 struct buffer_head *latest = NULL;
3421 struct btrfs_super_block *super;
3426 /* we would like to check all the supers, but that would make
3427 * a btrfs mount succeed after a mkfs from a different FS.
3428 * So, we need to add a special mount option to scan for
3429 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3431 for (i = 0; i < 1; i++) {
3432 ret = btrfs_read_dev_one_super(bdev, i, &bh);
3436 super = (struct btrfs_super_block *)bh->b_data;
3438 if (!latest || btrfs_super_generation(super) > transid) {
3441 transid = btrfs_super_generation(super);
3448 return ERR_PTR(ret);
3454 * Write superblock @sb to the @device. Do not wait for completion, all the
3455 * buffer heads we write are pinned.
3457 * Write @max_mirrors copies of the superblock, where 0 means default that fit
3458 * the expected device size at commit time. Note that max_mirrors must be
3459 * same for write and wait phases.
3461 * Return number of errors when buffer head is not found or submission fails.
3463 static int write_dev_supers(struct btrfs_device *device,
3464 struct btrfs_super_block *sb, int max_mirrors)
3466 struct buffer_head *bh;
3474 if (max_mirrors == 0)
3475 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3477 for (i = 0; i < max_mirrors; i++) {
3478 bytenr = btrfs_sb_offset(i);
3479 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3480 device->commit_total_bytes)
3483 btrfs_set_super_bytenr(sb, bytenr);
3486 crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc,
3487 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
3488 btrfs_csum_final(crc, sb->csum);
3490 /* One reference for us, and we leave it for the caller */
3491 bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE,
3492 BTRFS_SUPER_INFO_SIZE);
3494 btrfs_err(device->fs_info,
3495 "couldn't get super buffer head for bytenr %llu",
3501 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3503 /* one reference for submit_bh */
3506 set_buffer_uptodate(bh);
3508 bh->b_end_io = btrfs_end_buffer_write_sync;
3509 bh->b_private = device;
3512 * we fua the first super. The others we allow
3515 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
3516 if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3517 op_flags |= REQ_FUA;
3518 ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
3522 return errors < i ? 0 : -1;
3526 * Wait for write completion of superblocks done by write_dev_supers,
3527 * @max_mirrors same for write and wait phases.
3529 * Return number of errors when buffer head is not found or not marked up to
3532 static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3534 struct buffer_head *bh;
3537 bool primary_failed = false;
3540 if (max_mirrors == 0)
3541 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3543 for (i = 0; i < max_mirrors; i++) {
3544 bytenr = btrfs_sb_offset(i);
3545 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3546 device->commit_total_bytes)
3549 bh = __find_get_block(device->bdev,
3550 bytenr / BTRFS_BDEV_BLOCKSIZE,
3551 BTRFS_SUPER_INFO_SIZE);
3555 primary_failed = true;
3559 if (!buffer_uptodate(bh)) {
3562 primary_failed = true;
3565 /* drop our reference */
3568 /* drop the reference from the writing run */
3572 /* log error, force error return */
3573 if (primary_failed) {
3574 btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3579 return errors < i ? 0 : -1;
3583 * endio for the write_dev_flush, this will wake anyone waiting
3584 * for the barrier when it is done
3586 static void btrfs_end_empty_barrier(struct bio *bio)
3588 complete(bio->bi_private);
3592 * Submit a flush request to the device if it supports it. Error handling is
3593 * done in the waiting counterpart.
3595 static void write_dev_flush(struct btrfs_device *device)
3597 struct request_queue *q = bdev_get_queue(device->bdev);
3598 struct bio *bio = device->flush_bio;
3600 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
3604 bio->bi_end_io = btrfs_end_empty_barrier;
3605 bio_set_dev(bio, device->bdev);
3606 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3607 init_completion(&device->flush_wait);
3608 bio->bi_private = &device->flush_wait;
3610 btrfsic_submit_bio(bio);
3611 set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3615 * If the flush bio has been submitted by write_dev_flush, wait for it.
3617 static blk_status_t wait_dev_flush(struct btrfs_device *device)
3619 struct bio *bio = device->flush_bio;
3621 if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
3624 clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
3625 wait_for_completion_io(&device->flush_wait);
3627 return bio->bi_status;
3630 static int check_barrier_error(struct btrfs_fs_info *fs_info)
3632 if (!btrfs_check_rw_degradable(fs_info, NULL))
3638 * send an empty flush down to each device in parallel,
3639 * then wait for them
3641 static int barrier_all_devices(struct btrfs_fs_info *info)
3643 struct list_head *head;
3644 struct btrfs_device *dev;
3645 int errors_wait = 0;
3648 lockdep_assert_held(&info->fs_devices->device_list_mutex);
3649 /* send down all the barriers */
3650 head = &info->fs_devices->devices;
3651 list_for_each_entry(dev, head, dev_list) {
3652 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3656 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3657 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3660 write_dev_flush(dev);
3661 dev->last_flush_error = BLK_STS_OK;
3664 /* wait for all the barriers */
3665 list_for_each_entry(dev, head, dev_list) {
3666 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3672 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3673 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3676 ret = wait_dev_flush(dev);
3678 dev->last_flush_error = ret;
3679 btrfs_dev_stat_inc_and_print(dev,
3680 BTRFS_DEV_STAT_FLUSH_ERRS);
3687 * At some point we need the status of all disks
3688 * to arrive at the volume status. So error checking
3689 * is being pushed to a separate loop.
3691 return check_barrier_error(info);
3696 int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3699 int min_tolerated = INT_MAX;
3701 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
3702 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
3703 min_tolerated = min(min_tolerated,
3704 btrfs_raid_array[BTRFS_RAID_SINGLE].
3705 tolerated_failures);
3707 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3708 if (raid_type == BTRFS_RAID_SINGLE)
3710 if (!(flags & btrfs_raid_array[raid_type].bg_flag))
3712 min_tolerated = min(min_tolerated,
3713 btrfs_raid_array[raid_type].
3714 tolerated_failures);
3717 if (min_tolerated == INT_MAX) {
3718 pr_warn("BTRFS: unknown raid flag: %llu", flags);
3722 return min_tolerated;
3725 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3727 struct list_head *head;
3728 struct btrfs_device *dev;
3729 struct btrfs_super_block *sb;
3730 struct btrfs_dev_item *dev_item;
3734 int total_errors = 0;
3737 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3740 * max_mirrors == 0 indicates we're from commit_transaction,
3741 * not from fsync where the tree roots in fs_info have not
3742 * been consistent on disk.
3744 if (max_mirrors == 0)
3745 backup_super_roots(fs_info);
3747 sb = fs_info->super_for_commit;
3748 dev_item = &sb->dev_item;
3750 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3751 head = &fs_info->fs_devices->devices;
3752 max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
3755 ret = barrier_all_devices(fs_info);
3758 &fs_info->fs_devices->device_list_mutex);
3759 btrfs_handle_fs_error(fs_info, ret,
3760 "errors while submitting device barriers.");
3765 list_for_each_entry(dev, head, dev_list) {
3770 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3771 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3774 btrfs_set_stack_device_generation(dev_item, 0);
3775 btrfs_set_stack_device_type(dev_item, dev->type);
3776 btrfs_set_stack_device_id(dev_item, dev->devid);
3777 btrfs_set_stack_device_total_bytes(dev_item,
3778 dev->commit_total_bytes);
3779 btrfs_set_stack_device_bytes_used(dev_item,
3780 dev->commit_bytes_used);
3781 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3782 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3783 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3784 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3785 memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
3788 flags = btrfs_super_flags(sb);
3789 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3791 ret = btrfs_validate_write_super(fs_info, sb);
3793 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3794 btrfs_handle_fs_error(fs_info, -EUCLEAN,
3795 "unexpected superblock corruption detected");
3799 ret = write_dev_supers(dev, sb, max_mirrors);
3803 if (total_errors > max_errors) {
3804 btrfs_err(fs_info, "%d errors while writing supers",
3806 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3808 /* FUA is masked off if unsupported and can't be the reason */
3809 btrfs_handle_fs_error(fs_info, -EIO,
3810 "%d errors while writing supers",
3816 list_for_each_entry(dev, head, dev_list) {
3819 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3820 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3823 ret = wait_dev_supers(dev, max_mirrors);
3827 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3828 if (total_errors > max_errors) {
3829 btrfs_handle_fs_error(fs_info, -EIO,
3830 "%d errors while writing supers",
3837 /* Drop a fs root from the radix tree and free it. */
3838 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3839 struct btrfs_root *root)
3841 spin_lock(&fs_info->fs_roots_radix_lock);
3842 radix_tree_delete(&fs_info->fs_roots_radix,
3843 (unsigned long)root->root_key.objectid);
3844 spin_unlock(&fs_info->fs_roots_radix_lock);
3846 if (btrfs_root_refs(&root->root_item) == 0)
3847 synchronize_srcu(&fs_info->subvol_srcu);
3849 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
3850 btrfs_free_log(NULL, root);
3851 if (root->reloc_root) {
3852 free_extent_buffer(root->reloc_root->node);
3853 free_extent_buffer(root->reloc_root->commit_root);
3854 btrfs_put_fs_root(root->reloc_root);
3855 root->reloc_root = NULL;
3859 if (root->free_ino_pinned)
3860 __btrfs_remove_free_space_cache(root->free_ino_pinned);
3861 if (root->free_ino_ctl)
3862 __btrfs_remove_free_space_cache(root->free_ino_ctl);
3863 btrfs_free_fs_root(root);
3866 void btrfs_free_fs_root(struct btrfs_root *root)
3868 iput(root->ino_cache_inode);
3869 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3871 free_anon_bdev(root->anon_dev);
3872 if (root->subv_writers)
3873 btrfs_free_subvolume_writers(root->subv_writers);
3874 free_extent_buffer(root->node);
3875 free_extent_buffer(root->commit_root);
3876 kfree(root->free_ino_ctl);
3877 kfree(root->free_ino_pinned);
3878 btrfs_put_fs_root(root);
3881 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3883 u64 root_objectid = 0;
3884 struct btrfs_root *gang[8];
3887 unsigned int ret = 0;
3891 index = srcu_read_lock(&fs_info->subvol_srcu);
3892 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3893 (void **)gang, root_objectid,
3896 srcu_read_unlock(&fs_info->subvol_srcu, index);
3899 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3901 for (i = 0; i < ret; i++) {
3902 /* Avoid to grab roots in dead_roots */
3903 if (btrfs_root_refs(&gang[i]->root_item) == 0) {
3907 /* grab all the search result for later use */
3908 gang[i] = btrfs_grab_fs_root(gang[i]);
3910 srcu_read_unlock(&fs_info->subvol_srcu, index);
3912 for (i = 0; i < ret; i++) {
3915 root_objectid = gang[i]->root_key.objectid;
3916 err = btrfs_orphan_cleanup(gang[i]);
3919 btrfs_put_fs_root(gang[i]);
3924 /* release the uncleaned roots due to error */
3925 for (; i < ret; i++) {
3927 btrfs_put_fs_root(gang[i]);
3932 int btrfs_commit_super(struct btrfs_fs_info *fs_info)
3934 struct btrfs_root *root = fs_info->tree_root;
3935 struct btrfs_trans_handle *trans;
3937 mutex_lock(&fs_info->cleaner_mutex);
3938 btrfs_run_delayed_iputs(fs_info);
3939 mutex_unlock(&fs_info->cleaner_mutex);
3940 wake_up_process(fs_info->cleaner_kthread);
3942 /* wait until ongoing cleanup work done */
3943 down_write(&fs_info->cleanup_work_sem);
3944 up_write(&fs_info->cleanup_work_sem);
3946 trans = btrfs_join_transaction(root);
3948 return PTR_ERR(trans);
3949 return btrfs_commit_transaction(trans);
3952 void close_ctree(struct btrfs_fs_info *fs_info)
3956 set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
3958 * We don't want the cleaner to start new transactions, add more delayed
3959 * iputs, etc. while we're closing. We can't use kthread_stop() yet
3960 * because that frees the task_struct, and the transaction kthread might
3961 * still try to wake up the cleaner.
3963 kthread_park(fs_info->cleaner_kthread);
3965 /* wait for the qgroup rescan worker to stop */
3966 btrfs_qgroup_wait_for_completion(fs_info, false);
3968 /* wait for the uuid_scan task to finish */
3969 down(&fs_info->uuid_tree_rescan_sem);
3970 /* avoid complains from lockdep et al., set sem back to initial state */
3971 up(&fs_info->uuid_tree_rescan_sem);
3973 /* pause restriper - we want to resume on mount */
3974 btrfs_pause_balance(fs_info);
3976 btrfs_dev_replace_suspend_for_unmount(fs_info);
3978 btrfs_scrub_cancel(fs_info);
3980 /* wait for any defraggers to finish */
3981 wait_event(fs_info->transaction_wait,
3982 (atomic_read(&fs_info->defrag_running) == 0));
3984 /* clear out the rbtree of defraggable inodes */
3985 btrfs_cleanup_defrag_inodes(fs_info);
3987 cancel_work_sync(&fs_info->async_reclaim_work);
3989 if (!sb_rdonly(fs_info->sb)) {
3991 * The cleaner kthread is stopped, so do one final pass over
3992 * unused block groups.
3994 btrfs_delete_unused_bgs(fs_info);
3996 ret = btrfs_commit_super(fs_info);
3998 btrfs_err(fs_info, "commit super ret %d", ret);
4001 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
4002 test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
4003 btrfs_error_commit_super(fs_info);
4005 kthread_stop(fs_info->transaction_kthread);
4006 kthread_stop(fs_info->cleaner_kthread);
4008 ASSERT(list_empty(&fs_info->delayed_iputs));
4009 set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
4011 btrfs_free_qgroup_config(fs_info);
4012 ASSERT(list_empty(&fs_info->delalloc_roots));
4014 if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
4015 btrfs_info(fs_info, "at unmount delalloc count %lld",
4016 percpu_counter_sum(&fs_info->delalloc_bytes));
4019 btrfs_sysfs_remove_mounted(fs_info);
4020 btrfs_sysfs_remove_fsid(fs_info->fs_devices);
4022 btrfs_free_fs_roots(fs_info);
4024 btrfs_put_block_group_cache(fs_info);
4027 * we must make sure there is not any read request to
4028 * submit after we stopping all workers.
4030 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
4031 btrfs_stop_all_workers(fs_info);
4033 btrfs_free_block_groups(fs_info);
4035 clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
4036 free_root_pointers(fs_info, 1);
4038 iput(fs_info->btree_inode);
4040 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4041 if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
4042 btrfsic_unmount(fs_info->fs_devices);
4045 btrfs_close_devices(fs_info->fs_devices);
4046 btrfs_mapping_tree_free(&fs_info->mapping_tree);
4048 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
4049 percpu_counter_destroy(&fs_info->delalloc_bytes);
4050 percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
4051 cleanup_srcu_struct(&fs_info->subvol_srcu);
4053 btrfs_free_stripe_hash_table(fs_info);
4054 btrfs_free_ref_cache(fs_info);
4056 while (!list_empty(&fs_info->pinned_chunks)) {
4057 struct extent_map *em;
4059 em = list_first_entry(&fs_info->pinned_chunks,
4060 struct extent_map, list);
4061 list_del_init(&em->list);
4062 free_extent_map(em);
4066 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
4070 struct inode *btree_inode = buf->pages[0]->mapping->host;
4072 ret = extent_buffer_uptodate(buf);
4076 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
4077 parent_transid, atomic);
4083 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
4085 struct btrfs_fs_info *fs_info;
4086 struct btrfs_root *root;
4087 u64 transid = btrfs_header_generation(buf);
4090 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4092 * This is a fast path so only do this check if we have sanity tests
4093 * enabled. Normal people shouldn't be using unmapped buffers as dirty
4094 * outside of the sanity tests.
4096 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4099 root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4100 fs_info = root->fs_info;
4101 btrfs_assert_tree_locked(buf);
4102 if (transid != fs_info->generation)
4103 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
4104 buf->start, transid, fs_info->generation);
4105 was_dirty = set_extent_buffer_dirty(buf);
4107 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
4109 fs_info->dirty_metadata_batch);
4110 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
4112 * Since btrfs_mark_buffer_dirty() can be called with item pointer set
4113 * but item data not updated.
4114 * So here we should only check item pointers, not item data.
4116 if (btrfs_header_level(buf) == 0 &&
4117 btrfs_check_leaf_relaxed(fs_info, buf)) {
4118 btrfs_print_leaf(buf);
4124 static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4128 * looks as though older kernels can get into trouble with
4129 * this code, they end up stuck in balance_dirty_pages forever
4133 if (current->flags & PF_MEMALLOC)
4137 btrfs_balance_delayed_items(fs_info);
4139 ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
4140 BTRFS_DIRTY_METADATA_THRESH,
4141 fs_info->dirty_metadata_batch);
4143 balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
4147 void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4149 __btrfs_btree_balance_dirty(fs_info, 1);
4152 void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4154 __btrfs_btree_balance_dirty(fs_info, 0);
4157 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid, int level,
4158 struct btrfs_key *first_key)
4160 struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
4161 struct btrfs_fs_info *fs_info = root->fs_info;
4163 return btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
4167 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4169 /* cleanup FS via transaction */
4170 btrfs_cleanup_transaction(fs_info);
4172 mutex_lock(&fs_info->cleaner_mutex);
4173 btrfs_run_delayed_iputs(fs_info);
4174 mutex_unlock(&fs_info->cleaner_mutex);
4176 down_write(&fs_info->cleanup_work_sem);
4177 up_write(&fs_info->cleanup_work_sem);
4180 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4182 struct btrfs_ordered_extent *ordered;
4184 spin_lock(&root->ordered_extent_lock);
4186 * This will just short circuit the ordered completion stuff which will
4187 * make sure the ordered extent gets properly cleaned up.
4189 list_for_each_entry(ordered, &root->ordered_extents,
4191 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
4192 spin_unlock(&root->ordered_extent_lock);
4195 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4197 struct btrfs_root *root;
4198 struct list_head splice;
4200 INIT_LIST_HEAD(&splice);
4202 spin_lock(&fs_info->ordered_root_lock);
4203 list_splice_init(&fs_info->ordered_roots, &splice);
4204 while (!list_empty(&splice)) {
4205 root = list_first_entry(&splice, struct btrfs_root,
4207 list_move_tail(&root->ordered_root,
4208 &fs_info->ordered_roots);
4210 spin_unlock(&fs_info->ordered_root_lock);
4211 btrfs_destroy_ordered_extents(root);
4214 spin_lock(&fs_info->ordered_root_lock);
4216 spin_unlock(&fs_info->ordered_root_lock);
4219 * We need this here because if we've been flipped read-only we won't
4220 * get sync() from the umount, so we need to make sure any ordered
4221 * extents that haven't had their dirty pages IO start writeout yet
4222 * actually get run and error out properly.
4224 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
4227 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4228 struct btrfs_fs_info *fs_info)
4230 struct rb_node *node;
4231 struct btrfs_delayed_ref_root *delayed_refs;
4232 struct btrfs_delayed_ref_node *ref;
4235 delayed_refs = &trans->delayed_refs;
4237 spin_lock(&delayed_refs->lock);
4238 if (atomic_read(&delayed_refs->num_entries) == 0) {
4239 spin_unlock(&delayed_refs->lock);
4240 btrfs_info(fs_info, "delayed_refs has NO entry");
4244 while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4245 struct btrfs_delayed_ref_head *head;
4247 bool pin_bytes = false;
4249 head = rb_entry(node, struct btrfs_delayed_ref_head,
4251 if (btrfs_delayed_ref_lock(delayed_refs, head))
4254 spin_lock(&head->lock);
4255 while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4256 ref = rb_entry(n, struct btrfs_delayed_ref_node,
4259 rb_erase_cached(&ref->ref_node, &head->ref_tree);
4260 RB_CLEAR_NODE(&ref->ref_node);
4261 if (!list_empty(&ref->add_list))
4262 list_del(&ref->add_list);
4263 atomic_dec(&delayed_refs->num_entries);
4264 btrfs_put_delayed_ref(ref);
4266 if (head->must_insert_reserved)
4268 btrfs_free_delayed_extent_op(head->extent_op);
4269 btrfs_delete_ref_head(delayed_refs, head);
4270 spin_unlock(&head->lock);
4271 spin_unlock(&delayed_refs->lock);
4272 mutex_unlock(&head->mutex);
4275 btrfs_pin_extent(fs_info, head->bytenr,
4276 head->num_bytes, 1);
4277 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4278 btrfs_put_delayed_ref_head(head);
4280 spin_lock(&delayed_refs->lock);
4283 spin_unlock(&delayed_refs->lock);
4288 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4290 struct btrfs_inode *btrfs_inode;
4291 struct list_head splice;
4293 INIT_LIST_HEAD(&splice);
4295 spin_lock(&root->delalloc_lock);
4296 list_splice_init(&root->delalloc_inodes, &splice);
4298 while (!list_empty(&splice)) {
4299 struct inode *inode = NULL;
4300 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4302 __btrfs_del_delalloc_inode(root, btrfs_inode);
4303 spin_unlock(&root->delalloc_lock);
4306 * Make sure we get a live inode and that it'll not disappear
4309 inode = igrab(&btrfs_inode->vfs_inode);
4311 invalidate_inode_pages2(inode->i_mapping);
4314 spin_lock(&root->delalloc_lock);
4316 spin_unlock(&root->delalloc_lock);
4319 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4321 struct btrfs_root *root;
4322 struct list_head splice;
4324 INIT_LIST_HEAD(&splice);
4326 spin_lock(&fs_info->delalloc_root_lock);
4327 list_splice_init(&fs_info->delalloc_roots, &splice);
4328 while (!list_empty(&splice)) {
4329 root = list_first_entry(&splice, struct btrfs_root,
4331 root = btrfs_grab_fs_root(root);
4333 spin_unlock(&fs_info->delalloc_root_lock);
4335 btrfs_destroy_delalloc_inodes(root);
4336 btrfs_put_fs_root(root);
4338 spin_lock(&fs_info->delalloc_root_lock);
4340 spin_unlock(&fs_info->delalloc_root_lock);
4343 static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4344 struct extent_io_tree *dirty_pages,
4348 struct extent_buffer *eb;
4353 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
4358 clear_extent_bits(dirty_pages, start, end, mark);
4359 while (start <= end) {
4360 eb = find_extent_buffer(fs_info, start);
4361 start += fs_info->nodesize;
4364 wait_on_extent_buffer_writeback(eb);
4366 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
4368 clear_extent_buffer_dirty(eb);
4369 free_extent_buffer_stale(eb);
4376 static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4377 struct extent_io_tree *pinned_extents)
4379 struct extent_io_tree *unpin;
4385 unpin = pinned_extents;
4388 struct extent_state *cached_state = NULL;
4391 * The btrfs_finish_extent_commit() may get the same range as
4392 * ours between find_first_extent_bit and clear_extent_dirty.
4393 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4394 * the same extent range.
4396 mutex_lock(&fs_info->unused_bg_unpin_mutex);
4397 ret = find_first_extent_bit(unpin, 0, &start, &end,
4398 EXTENT_DIRTY, &cached_state);
4400 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4404 clear_extent_dirty(unpin, start, end, &cached_state);
4405 free_extent_state(cached_state);
4406 btrfs_error_unpin_extent_range(fs_info, start, end);
4407 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
4412 if (unpin == &fs_info->freed_extents[0])
4413 unpin = &fs_info->freed_extents[1];
4415 unpin = &fs_info->freed_extents[0];
4423 static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4425 struct inode *inode;
4427 inode = cache->io_ctl.inode;
4429 invalidate_inode_pages2(inode->i_mapping);
4430 BTRFS_I(inode)->generation = 0;
4431 cache->io_ctl.inode = NULL;
4434 btrfs_put_block_group(cache);
4437 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4438 struct btrfs_fs_info *fs_info)
4440 struct btrfs_block_group_cache *cache;
4442 spin_lock(&cur_trans->dirty_bgs_lock);
4443 while (!list_empty(&cur_trans->dirty_bgs)) {
4444 cache = list_first_entry(&cur_trans->dirty_bgs,
4445 struct btrfs_block_group_cache,
4448 if (!list_empty(&cache->io_list)) {
4449 spin_unlock(&cur_trans->dirty_bgs_lock);
4450 list_del_init(&cache->io_list);
4451 btrfs_cleanup_bg_io(cache);
4452 spin_lock(&cur_trans->dirty_bgs_lock);
4455 list_del_init(&cache->dirty_list);
4456 spin_lock(&cache->lock);
4457 cache->disk_cache_state = BTRFS_DC_ERROR;
4458 spin_unlock(&cache->lock);
4460 spin_unlock(&cur_trans->dirty_bgs_lock);
4461 btrfs_put_block_group(cache);
4462 btrfs_delayed_refs_rsv_release(fs_info, 1);
4463 spin_lock(&cur_trans->dirty_bgs_lock);
4465 spin_unlock(&cur_trans->dirty_bgs_lock);
4468 * Refer to the definition of io_bgs member for details why it's safe
4469 * to use it without any locking
4471 while (!list_empty(&cur_trans->io_bgs)) {
4472 cache = list_first_entry(&cur_trans->io_bgs,
4473 struct btrfs_block_group_cache,
4476 list_del_init(&cache->io_list);
4477 spin_lock(&cache->lock);
4478 cache->disk_cache_state = BTRFS_DC_ERROR;
4479 spin_unlock(&cache->lock);
4480 btrfs_cleanup_bg_io(cache);
4484 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4485 struct btrfs_fs_info *fs_info)
4487 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4488 ASSERT(list_empty(&cur_trans->dirty_bgs));
4489 ASSERT(list_empty(&cur_trans->io_bgs));
4491 btrfs_destroy_delayed_refs(cur_trans, fs_info);
4493 cur_trans->state = TRANS_STATE_COMMIT_START;
4494 wake_up(&fs_info->transaction_blocked_wait);
4496 cur_trans->state = TRANS_STATE_UNBLOCKED;
4497 wake_up(&fs_info->transaction_wait);
4499 btrfs_destroy_delayed_inodes(fs_info);
4500 btrfs_assert_delayed_root_empty(fs_info);
4502 btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
4504 btrfs_destroy_pinned_extent(fs_info,
4505 fs_info->pinned_extents);
4507 cur_trans->state =TRANS_STATE_COMPLETED;
4508 wake_up(&cur_trans->commit_wait);
4511 static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4513 struct btrfs_transaction *t;
4515 mutex_lock(&fs_info->transaction_kthread_mutex);
4517 spin_lock(&fs_info->trans_lock);
4518 while (!list_empty(&fs_info->trans_list)) {
4519 t = list_first_entry(&fs_info->trans_list,
4520 struct btrfs_transaction, list);
4521 if (t->state >= TRANS_STATE_COMMIT_START) {
4522 refcount_inc(&t->use_count);
4523 spin_unlock(&fs_info->trans_lock);
4524 btrfs_wait_for_commit(fs_info, t->transid);
4525 btrfs_put_transaction(t);
4526 spin_lock(&fs_info->trans_lock);
4529 if (t == fs_info->running_transaction) {
4530 t->state = TRANS_STATE_COMMIT_DOING;
4531 spin_unlock(&fs_info->trans_lock);
4533 * We wait for 0 num_writers since we don't hold a trans
4534 * handle open currently for this transaction.
4536 wait_event(t->writer_wait,
4537 atomic_read(&t->num_writers) == 0);
4539 spin_unlock(&fs_info->trans_lock);
4541 btrfs_cleanup_one_transaction(t, fs_info);
4543 spin_lock(&fs_info->trans_lock);
4544 if (t == fs_info->running_transaction)
4545 fs_info->running_transaction = NULL;
4546 list_del_init(&t->list);
4547 spin_unlock(&fs_info->trans_lock);
4549 btrfs_put_transaction(t);
4550 trace_btrfs_transaction_commit(fs_info->tree_root);
4551 spin_lock(&fs_info->trans_lock);
4553 spin_unlock(&fs_info->trans_lock);
4554 btrfs_destroy_all_ordered_extents(fs_info);
4555 btrfs_destroy_delayed_inodes(fs_info);
4556 btrfs_assert_delayed_root_empty(fs_info);
4557 btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
4558 btrfs_destroy_all_delalloc_inodes(fs_info);
4559 mutex_unlock(&fs_info->transaction_kthread_mutex);
4564 static const struct extent_io_ops btree_extent_io_ops = {
4565 /* mandatory callbacks */
4566 .submit_bio_hook = btree_submit_bio_hook,
4567 .readpage_end_io_hook = btree_readpage_end_io_hook,