2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/version.h>
21 #include <linux/blkdev.h>
22 #include <linux/scatterlist.h>
23 #include <linux/swap.h>
24 #include <linux/radix-tree.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h> // for block_sync_page
27 #include <linux/workqueue.h>
28 #include <linux/kthread.h>
29 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
30 # include <linux/freezer.h>
32 # include <linux/sched.h>
37 #include "transaction.h"
38 #include "btrfs_inode.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
43 #include "ref-cache.h"
46 static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
48 if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) {
49 printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n",
50 (unsigned long long)extent_buffer_blocknr(buf),
51 (unsigned long long)btrfs_header_blocknr(buf));
58 static struct extent_io_ops btree_extent_io_ops;
59 static void end_workqueue_fn(struct btrfs_work *work);
65 struct btrfs_fs_info *info;
68 struct list_head list;
69 struct btrfs_work work;
72 struct async_submit_bio {
75 struct list_head list;
76 extent_submit_bio_hook_t *submit_bio_hook;
79 struct btrfs_work work;
82 struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
83 size_t page_offset, u64 start, u64 len,
86 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
87 struct extent_map *em;
90 spin_lock(&em_tree->lock);
91 em = lookup_extent_mapping(em_tree, start, len);
94 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
95 spin_unlock(&em_tree->lock);
98 spin_unlock(&em_tree->lock);
100 em = alloc_extent_map(GFP_NOFS);
102 em = ERR_PTR(-ENOMEM);
108 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
110 spin_lock(&em_tree->lock);
111 ret = add_extent_mapping(em_tree, em);
112 if (ret == -EEXIST) {
113 u64 failed_start = em->start;
114 u64 failed_len = em->len;
116 printk("failed to insert %Lu %Lu -> %Lu into tree\n",
117 em->start, em->len, em->block_start);
119 em = lookup_extent_mapping(em_tree, start, len);
121 printk("after failing, found %Lu %Lu %Lu\n",
122 em->start, em->len, em->block_start);
125 em = lookup_extent_mapping(em_tree, failed_start,
128 printk("double failure lookup gives us "
129 "%Lu %Lu -> %Lu\n", em->start,
130 em->len, em->block_start);
139 spin_unlock(&em_tree->lock);
147 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
149 return btrfs_crc32c(seed, data, len);
152 void btrfs_csum_final(u32 crc, char *result)
154 *(__le32 *)result = ~cpu_to_le32(crc);
157 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
160 char result[BTRFS_CRC32_SIZE];
162 unsigned long cur_len;
163 unsigned long offset = BTRFS_CSUM_SIZE;
164 char *map_token = NULL;
166 unsigned long map_start;
167 unsigned long map_len;
171 len = buf->len - offset;
173 err = map_private_extent_buffer(buf, offset, 32,
175 &map_start, &map_len, KM_USER0);
177 printk("failed to map extent buffer! %lu\n",
181 cur_len = min(len, map_len - (offset - map_start));
182 crc = btrfs_csum_data(root, kaddr + offset - map_start,
186 unmap_extent_buffer(buf, map_token, KM_USER0);
188 btrfs_csum_final(crc, result);
191 /* FIXME, this is not good */
192 if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
195 memcpy(&found, result, BTRFS_CRC32_SIZE);
197 read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
198 printk("btrfs: %s checksum verify failed on %llu "
199 "wanted %X found %X level %d\n",
200 root->fs_info->sb->s_id,
201 buf->start, val, found, btrfs_header_level(buf));
205 write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE);
210 static int verify_parent_transid(struct extent_io_tree *io_tree,
211 struct extent_buffer *eb, u64 parent_transid)
215 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
218 lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
219 if (extent_buffer_uptodate(io_tree, eb) &&
220 btrfs_header_generation(eb) == parent_transid) {
224 printk("parent transid verify failed on %llu wanted %llu found %llu\n",
225 (unsigned long long)eb->start,
226 (unsigned long long)parent_transid,
227 (unsigned long long)btrfs_header_generation(eb));
229 clear_extent_buffer_uptodate(io_tree, eb);
231 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
237 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
238 struct extent_buffer *eb,
239 u64 start, u64 parent_transid)
241 struct extent_io_tree *io_tree;
246 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
248 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
249 btree_get_extent, mirror_num);
251 !verify_parent_transid(io_tree, eb, parent_transid))
254 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
260 if (mirror_num > num_copies)
266 int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
268 struct extent_io_tree *tree;
269 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
273 struct extent_buffer *eb;
276 tree = &BTRFS_I(page->mapping->host)->io_tree;
278 if (page->private == EXTENT_PAGE_PRIVATE)
282 len = page->private >> 2;
286 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
287 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
288 btrfs_header_generation(eb));
290 found_start = btrfs_header_bytenr(eb);
291 if (found_start != start) {
292 printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
293 start, found_start, len);
297 if (eb->first_page != page) {
298 printk("bad first page %lu %lu\n", eb->first_page->index,
303 if (!PageUptodate(page)) {
304 printk("csum not up to date page %lu\n", page->index);
308 found_level = btrfs_header_level(eb);
309 spin_lock(&root->fs_info->hash_lock);
310 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
311 spin_unlock(&root->fs_info->hash_lock);
312 csum_tree_block(root, eb, 0);
314 free_extent_buffer(eb);
319 static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
321 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
323 csum_dirty_buffer(root, page);
327 int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
328 struct extent_state *state)
330 struct extent_io_tree *tree;
334 struct extent_buffer *eb;
335 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
338 tree = &BTRFS_I(page->mapping->host)->io_tree;
339 if (page->private == EXTENT_PAGE_PRIVATE)
343 len = page->private >> 2;
347 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
349 found_start = btrfs_header_bytenr(eb);
350 if (found_start != start) {
354 if (eb->first_page != page) {
355 printk("bad first page %lu %lu\n", eb->first_page->index,
361 if (memcmp_extent_buffer(eb, root->fs_info->fsid,
362 (unsigned long)btrfs_header_fsid(eb),
364 printk("bad fsid on block %Lu\n", eb->start);
368 found_level = btrfs_header_level(eb);
370 ret = csum_tree_block(root, eb, 1);
374 end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
375 end = eb->start + end - 1;
377 free_extent_buffer(eb);
382 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
383 static void end_workqueue_bio(struct bio *bio, int err)
385 static int end_workqueue_bio(struct bio *bio,
386 unsigned int bytes_done, int err)
389 struct end_io_wq *end_io_wq = bio->bi_private;
390 struct btrfs_fs_info *fs_info;
392 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
397 fs_info = end_io_wq->info;
398 end_io_wq->error = err;
399 end_io_wq->work.func = end_workqueue_fn;
400 end_io_wq->work.flags = 0;
401 if (bio->bi_rw & (1 << BIO_RW))
402 btrfs_queue_worker(&fs_info->endio_write_workers,
405 btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
407 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
412 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
415 struct end_io_wq *end_io_wq;
416 end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
420 end_io_wq->private = bio->bi_private;
421 end_io_wq->end_io = bio->bi_end_io;
422 end_io_wq->info = info;
423 end_io_wq->error = 0;
424 end_io_wq->bio = bio;
425 end_io_wq->metadata = metadata;
427 bio->bi_private = end_io_wq;
428 bio->bi_end_io = end_workqueue_bio;
432 int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
434 int limit = 256 * info->fs_devices->open_devices;
437 limit = (limit * 3) / 2;
438 if (atomic_read(&info->nr_async_submits) > limit)
441 return atomic_read(&info->nr_async_bios) > limit;
444 static void run_one_async_submit(struct btrfs_work *work)
446 struct btrfs_fs_info *fs_info;
447 struct async_submit_bio *async;
449 async = container_of(work, struct async_submit_bio, work);
450 fs_info = BTRFS_I(async->inode)->root->fs_info;
451 atomic_dec(&fs_info->nr_async_submits);
453 if ((async->bio->bi_rw & (1 << BIO_RW)) &&
454 !btrfs_congested_async(fs_info, 1)) {
455 clear_bdi_congested(&fs_info->bdi, WRITE);
457 async->submit_bio_hook(async->inode, async->rw, async->bio,
462 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
463 int rw, struct bio *bio, int mirror_num,
464 extent_submit_bio_hook_t *submit_bio_hook)
466 struct async_submit_bio *async;
468 async = kmalloc(sizeof(*async), GFP_NOFS);
472 async->inode = inode;
475 async->mirror_num = mirror_num;
476 async->submit_bio_hook = submit_bio_hook;
477 async->work.func = run_one_async_submit;
478 async->work.flags = 0;
479 atomic_inc(&fs_info->nr_async_submits);
480 btrfs_queue_worker(&fs_info->workers, &async->work);
484 static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
487 struct btrfs_root *root = BTRFS_I(inode)->root;
491 offset = bio->bi_sector << 9;
494 * when we're called for a write, we're already in the async
495 * submission context. Just jump into btrfs_map_bio
497 if (rw & (1 << BIO_RW)) {
498 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
503 * called for a read, do the setup so that checksum validation
504 * can happen in the async kernel threads
506 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
509 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
512 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
516 * kthread helpers are used to submit writes so that checksumming
517 * can happen in parallel across all CPUs
519 if (!(rw & (1 << BIO_RW))) {
520 return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
522 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
523 inode, rw, bio, mirror_num,
524 __btree_submit_bio_hook);
527 static int btree_writepage(struct page *page, struct writeback_control *wbc)
529 struct extent_io_tree *tree;
530 tree = &BTRFS_I(page->mapping->host)->io_tree;
532 if (current->flags & PF_MEMALLOC) {
533 redirty_page_for_writepage(wbc, page);
537 return extent_write_full_page(tree, page, btree_get_extent, wbc);
540 static int btree_writepages(struct address_space *mapping,
541 struct writeback_control *wbc)
543 struct extent_io_tree *tree;
544 tree = &BTRFS_I(mapping->host)->io_tree;
545 if (wbc->sync_mode == WB_SYNC_NONE) {
548 unsigned long thresh = 96 * 1024 * 1024;
550 if (wbc->for_kupdate)
553 if (current_is_pdflush()) {
554 thresh = 96 * 1024 * 1024;
556 thresh = 8 * 1024 * 1024;
558 num_dirty = count_range_bits(tree, &start, (u64)-1,
559 thresh, EXTENT_DIRTY);
560 if (num_dirty < thresh) {
564 return extent_writepages(tree, mapping, btree_get_extent, wbc);
567 int btree_readpage(struct file *file, struct page *page)
569 struct extent_io_tree *tree;
570 tree = &BTRFS_I(page->mapping->host)->io_tree;
571 return extent_read_full_page(tree, page, btree_get_extent);
574 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
576 struct extent_io_tree *tree;
577 struct extent_map_tree *map;
580 tree = &BTRFS_I(page->mapping->host)->io_tree;
581 map = &BTRFS_I(page->mapping->host)->extent_tree;
583 ret = try_release_extent_state(map, tree, page, gfp_flags);
588 ret = try_release_extent_buffer(tree, page);
590 ClearPagePrivate(page);
591 set_page_private(page, 0);
592 page_cache_release(page);
598 static void btree_invalidatepage(struct page *page, unsigned long offset)
600 struct extent_io_tree *tree;
601 tree = &BTRFS_I(page->mapping->host)->io_tree;
602 extent_invalidatepage(tree, page, offset);
603 btree_releasepage(page, GFP_NOFS);
604 if (PagePrivate(page)) {
605 printk("warning page private not zero on page %Lu\n",
607 ClearPagePrivate(page);
608 set_page_private(page, 0);
609 page_cache_release(page);
614 static int btree_writepage(struct page *page, struct writeback_control *wbc)
616 struct buffer_head *bh;
617 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
618 struct buffer_head *head;
619 if (!page_has_buffers(page)) {
620 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
621 (1 << BH_Dirty)|(1 << BH_Uptodate));
623 head = page_buffers(page);
626 if (buffer_dirty(bh))
627 csum_tree_block(root, bh, 0);
628 bh = bh->b_this_page;
629 } while (bh != head);
630 return block_write_full_page(page, btree_get_block, wbc);
634 static struct address_space_operations btree_aops = {
635 .readpage = btree_readpage,
636 .writepage = btree_writepage,
637 .writepages = btree_writepages,
638 .releasepage = btree_releasepage,
639 .invalidatepage = btree_invalidatepage,
640 .sync_page = block_sync_page,
643 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
646 struct extent_buffer *buf = NULL;
647 struct inode *btree_inode = root->fs_info->btree_inode;
650 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
653 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
654 buf, 0, 0, btree_get_extent, 0);
655 free_extent_buffer(buf);
659 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
660 u64 bytenr, u32 blocksize)
662 struct inode *btree_inode = root->fs_info->btree_inode;
663 struct extent_buffer *eb;
664 eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
665 bytenr, blocksize, GFP_NOFS);
669 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
670 u64 bytenr, u32 blocksize)
672 struct inode *btree_inode = root->fs_info->btree_inode;
673 struct extent_buffer *eb;
675 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
676 bytenr, blocksize, NULL, GFP_NOFS);
681 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
682 u32 blocksize, u64 parent_transid)
684 struct extent_buffer *buf = NULL;
685 struct inode *btree_inode = root->fs_info->btree_inode;
686 struct extent_io_tree *io_tree;
689 io_tree = &BTRFS_I(btree_inode)->io_tree;
691 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
695 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
698 buf->flags |= EXTENT_UPTODATE;
704 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
705 struct extent_buffer *buf)
707 struct inode *btree_inode = root->fs_info->btree_inode;
708 if (btrfs_header_generation(buf) ==
709 root->fs_info->running_transaction->transid) {
710 WARN_ON(!btrfs_tree_locked(buf));
711 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
717 int wait_on_tree_block_writeback(struct btrfs_root *root,
718 struct extent_buffer *buf)
720 struct inode *btree_inode = root->fs_info->btree_inode;
721 wait_on_extent_buffer_writeback(&BTRFS_I(btree_inode)->io_tree,
726 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
727 u32 stripesize, struct btrfs_root *root,
728 struct btrfs_fs_info *fs_info,
733 root->commit_root = NULL;
734 root->ref_tree = NULL;
735 root->sectorsize = sectorsize;
736 root->nodesize = nodesize;
737 root->leafsize = leafsize;
738 root->stripesize = stripesize;
740 root->track_dirty = 0;
742 root->fs_info = fs_info;
743 root->objectid = objectid;
744 root->last_trans = 0;
745 root->highest_inode = 0;
746 root->last_inode_alloc = 0;
750 INIT_LIST_HEAD(&root->dirty_list);
751 INIT_LIST_HEAD(&root->orphan_list);
752 INIT_LIST_HEAD(&root->dead_list);
753 spin_lock_init(&root->node_lock);
754 spin_lock_init(&root->list_lock);
755 mutex_init(&root->objectid_mutex);
757 btrfs_leaf_ref_tree_init(&root->ref_tree_struct);
758 root->ref_tree = &root->ref_tree_struct;
760 memset(&root->root_key, 0, sizeof(root->root_key));
761 memset(&root->root_item, 0, sizeof(root->root_item));
762 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
763 memset(&root->root_kobj, 0, sizeof(root->root_kobj));
764 root->defrag_trans_start = fs_info->generation;
765 init_completion(&root->kobj_unregister);
766 root->defrag_running = 0;
767 root->defrag_level = 0;
768 root->root_key.objectid = objectid;
772 static int find_and_setup_root(struct btrfs_root *tree_root,
773 struct btrfs_fs_info *fs_info,
775 struct btrfs_root *root)
780 __setup_root(tree_root->nodesize, tree_root->leafsize,
781 tree_root->sectorsize, tree_root->stripesize,
782 root, fs_info, objectid);
783 ret = btrfs_find_last_root(tree_root, objectid,
784 &root->root_item, &root->root_key);
787 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
788 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
794 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_fs_info *fs_info,
795 struct btrfs_key *location)
797 struct btrfs_root *root;
798 struct btrfs_root *tree_root = fs_info->tree_root;
799 struct btrfs_path *path;
800 struct extent_buffer *l;
805 root = kzalloc(sizeof(*root), GFP_NOFS);
807 return ERR_PTR(-ENOMEM);
808 if (location->offset == (u64)-1) {
809 ret = find_and_setup_root(tree_root, fs_info,
810 location->objectid, root);
818 __setup_root(tree_root->nodesize, tree_root->leafsize,
819 tree_root->sectorsize, tree_root->stripesize,
820 root, fs_info, location->objectid);
822 path = btrfs_alloc_path();
824 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
831 read_extent_buffer(l, &root->root_item,
832 btrfs_item_ptr_offset(l, path->slots[0]),
833 sizeof(root->root_item));
834 memcpy(&root->root_key, location, sizeof(*location));
837 btrfs_release_path(root, path);
838 btrfs_free_path(path);
843 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
844 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
849 ret = btrfs_find_highest_inode(root, &highest_inode);
851 root->highest_inode = highest_inode;
852 root->last_inode_alloc = highest_inode;
857 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
860 struct btrfs_root *root;
862 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
863 return fs_info->tree_root;
864 if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
865 return fs_info->extent_root;
867 root = radix_tree_lookup(&fs_info->fs_roots_radix,
868 (unsigned long)root_objectid);
872 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
873 struct btrfs_key *location)
875 struct btrfs_root *root;
878 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
879 return fs_info->tree_root;
880 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
881 return fs_info->extent_root;
882 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
883 return fs_info->chunk_root;
884 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
885 return fs_info->dev_root;
887 root = radix_tree_lookup(&fs_info->fs_roots_radix,
888 (unsigned long)location->objectid);
892 root = btrfs_read_fs_root_no_radix(fs_info, location);
895 ret = radix_tree_insert(&fs_info->fs_roots_radix,
896 (unsigned long)root->root_key.objectid,
899 free_extent_buffer(root->node);
903 ret = btrfs_find_dead_roots(fs_info->tree_root,
904 root->root_key.objectid, root);
910 struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
911 struct btrfs_key *location,
912 const char *name, int namelen)
914 struct btrfs_root *root;
917 root = btrfs_read_fs_root_no_name(fs_info, location);
924 ret = btrfs_set_root_name(root, name, namelen);
926 free_extent_buffer(root->node);
931 ret = btrfs_sysfs_add_root(root);
933 free_extent_buffer(root->node);
942 static int add_hasher(struct btrfs_fs_info *info, char *type) {
943 struct btrfs_hasher *hasher;
945 hasher = kmalloc(sizeof(*hasher), GFP_NOFS);
948 hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC);
949 if (!hasher->hash_tfm) {
953 spin_lock(&info->hash_lock);
954 list_add(&hasher->list, &info->hashers);
955 spin_unlock(&info->hash_lock);
960 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
962 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
964 struct list_head *cur;
965 struct btrfs_device *device;
966 struct backing_dev_info *bdi;
968 if ((bdi_bits & (1 << BDI_write_congested)) &&
969 btrfs_congested_async(info, 0))
972 list_for_each(cur, &info->fs_devices->devices) {
973 device = list_entry(cur, struct btrfs_device, dev_list);
976 bdi = blk_get_backing_dev_info(device->bdev);
977 if (bdi && bdi_congested(bdi, bdi_bits)) {
986 * this unplugs every device on the box, and it is only used when page
989 static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
991 struct list_head *cur;
992 struct btrfs_device *device;
993 struct btrfs_fs_info *info;
995 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
996 list_for_each(cur, &info->fs_devices->devices) {
997 device = list_entry(cur, struct btrfs_device, dev_list);
998 bdi = blk_get_backing_dev_info(device->bdev);
999 if (bdi->unplug_io_fn) {
1000 bdi->unplug_io_fn(bdi, page);
1005 void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1007 struct inode *inode;
1008 struct extent_map_tree *em_tree;
1009 struct extent_map *em;
1010 struct address_space *mapping;
1013 /* the generic O_DIRECT read code does this */
1015 __unplug_io_fn(bdi, page);
1020 * page->mapping may change at any time. Get a consistent copy
1021 * and use that for everything below
1024 mapping = page->mapping;
1028 inode = mapping->host;
1029 offset = page_offset(page);
1031 em_tree = &BTRFS_I(inode)->extent_tree;
1032 spin_lock(&em_tree->lock);
1033 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1034 spin_unlock(&em_tree->lock);
1036 __unplug_io_fn(bdi, page);
1040 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1041 free_extent_map(em);
1042 __unplug_io_fn(bdi, page);
1045 offset = offset - em->start;
1046 btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1047 em->block_start + offset, page);
1048 free_extent_map(em);
1051 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1053 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1056 bdi->ra_pages = default_backing_dev_info.ra_pages;
1058 bdi->capabilities = default_backing_dev_info.capabilities;
1059 bdi->unplug_io_fn = btrfs_unplug_io_fn;
1060 bdi->unplug_io_data = info;
1061 bdi->congested_fn = btrfs_congested_fn;
1062 bdi->congested_data = info;
1066 static int bio_ready_for_csum(struct bio *bio)
1072 struct extent_io_tree *io_tree = NULL;
1073 struct btrfs_fs_info *info = NULL;
1074 struct bio_vec *bvec;
1078 bio_for_each_segment(bvec, bio, i) {
1079 page = bvec->bv_page;
1080 if (page->private == EXTENT_PAGE_PRIVATE) {
1081 length += bvec->bv_len;
1084 if (!page->private) {
1085 length += bvec->bv_len;
1088 length = bvec->bv_len;
1089 buf_len = page->private >> 2;
1090 start = page_offset(page) + bvec->bv_offset;
1091 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1092 info = BTRFS_I(page->mapping->host)->root->fs_info;
1094 /* are we fully contained in this bio? */
1095 if (buf_len <= length)
1098 ret = extent_range_uptodate(io_tree, start + length,
1099 start + buf_len - 1);
1106 * called by the kthread helper functions to finally call the bio end_io
1107 * functions. This is where read checksum verification actually happens
1109 static void end_workqueue_fn(struct btrfs_work *work)
1112 struct end_io_wq *end_io_wq;
1113 struct btrfs_fs_info *fs_info;
1116 end_io_wq = container_of(work, struct end_io_wq, work);
1117 bio = end_io_wq->bio;
1118 fs_info = end_io_wq->info;
1120 /* metadata bios are special because the whole tree block must
1121 * be checksummed at once. This makes sure the entire block is in
1122 * ram and up to date before trying to verify things. For
1123 * blocksize <= pagesize, it is basically a noop
1125 if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
1126 btrfs_queue_worker(&fs_info->endio_workers,
1130 error = end_io_wq->error;
1131 bio->bi_private = end_io_wq->private;
1132 bio->bi_end_io = end_io_wq->end_io;
1134 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1135 bio_endio(bio, bio->bi_size, error);
1137 bio_endio(bio, error);
1141 static int cleaner_kthread(void *arg)
1143 struct btrfs_root *root = arg;
1147 if (root->fs_info->closing)
1150 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1151 mutex_lock(&root->fs_info->cleaner_mutex);
1152 btrfs_clean_old_snapshots(root);
1153 mutex_unlock(&root->fs_info->cleaner_mutex);
1155 if (freezing(current)) {
1159 if (root->fs_info->closing)
1161 set_current_state(TASK_INTERRUPTIBLE);
1163 __set_current_state(TASK_RUNNING);
1165 } while (!kthread_should_stop());
1169 static int transaction_kthread(void *arg)
1171 struct btrfs_root *root = arg;
1172 struct btrfs_trans_handle *trans;
1173 struct btrfs_transaction *cur;
1175 unsigned long delay;
1180 if (root->fs_info->closing)
1184 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1185 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1187 if (root->fs_info->total_ref_cache_size > 20 * 1024 * 1024) {
1188 printk("btrfs: total reference cache size %Lu\n",
1189 root->fs_info->total_ref_cache_size);
1192 mutex_lock(&root->fs_info->trans_mutex);
1193 cur = root->fs_info->running_transaction;
1195 mutex_unlock(&root->fs_info->trans_mutex);
1199 now = get_seconds();
1200 if (now < cur->start_time || now - cur->start_time < 30) {
1201 mutex_unlock(&root->fs_info->trans_mutex);
1205 mutex_unlock(&root->fs_info->trans_mutex);
1206 trans = btrfs_start_transaction(root, 1);
1207 ret = btrfs_commit_transaction(trans, root);
1209 wake_up_process(root->fs_info->cleaner_kthread);
1210 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1212 if (freezing(current)) {
1215 if (root->fs_info->closing)
1217 set_current_state(TASK_INTERRUPTIBLE);
1218 schedule_timeout(delay);
1219 __set_current_state(TASK_RUNNING);
1221 } while (!kthread_should_stop());
1225 struct btrfs_root *open_ctree(struct super_block *sb,
1226 struct btrfs_fs_devices *fs_devices,
1234 struct buffer_head *bh;
1235 struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
1237 struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
1239 struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1241 struct btrfs_root *chunk_root = kmalloc(sizeof(struct btrfs_root),
1243 struct btrfs_root *dev_root = kmalloc(sizeof(struct btrfs_root),
1248 struct btrfs_super_block *disk_super;
1250 if (!extent_root || !tree_root || !fs_info) {
1254 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
1255 INIT_LIST_HEAD(&fs_info->trans_list);
1256 INIT_LIST_HEAD(&fs_info->dead_roots);
1257 INIT_LIST_HEAD(&fs_info->hashers);
1258 INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1259 spin_lock_init(&fs_info->hash_lock);
1260 spin_lock_init(&fs_info->delalloc_lock);
1261 spin_lock_init(&fs_info->new_trans_lock);
1262 spin_lock_init(&fs_info->ref_cache_lock);
1264 init_completion(&fs_info->kobj_unregister);
1265 fs_info->tree_root = tree_root;
1266 fs_info->extent_root = extent_root;
1267 fs_info->chunk_root = chunk_root;
1268 fs_info->dev_root = dev_root;
1269 fs_info->fs_devices = fs_devices;
1270 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1271 INIT_LIST_HEAD(&fs_info->space_info);
1272 btrfs_mapping_init(&fs_info->mapping_tree);
1273 atomic_set(&fs_info->nr_async_submits, 0);
1274 atomic_set(&fs_info->nr_async_bios, 0);
1275 atomic_set(&fs_info->throttles, 0);
1276 atomic_set(&fs_info->throttle_gen, 0);
1278 fs_info->max_extent = (u64)-1;
1279 fs_info->max_inline = 8192 * 1024;
1280 setup_bdi(fs_info, &fs_info->bdi);
1281 fs_info->btree_inode = new_inode(sb);
1282 fs_info->btree_inode->i_ino = 1;
1283 fs_info->btree_inode->i_nlink = 1;
1284 fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
1286 INIT_LIST_HEAD(&fs_info->ordered_extents);
1287 spin_lock_init(&fs_info->ordered_extent_lock);
1289 sb->s_blocksize = 4096;
1290 sb->s_blocksize_bits = blksize_bits(4096);
1293 * we set the i_size on the btree inode to the max possible int.
1294 * the real end of the address space is determined by all of
1295 * the devices in the system
1297 fs_info->btree_inode->i_size = OFFSET_MAX;
1298 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1299 fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1301 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1302 fs_info->btree_inode->i_mapping,
1304 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1307 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1309 extent_io_tree_init(&fs_info->free_space_cache,
1310 fs_info->btree_inode->i_mapping, GFP_NOFS);
1311 extent_io_tree_init(&fs_info->block_group_cache,
1312 fs_info->btree_inode->i_mapping, GFP_NOFS);
1313 extent_io_tree_init(&fs_info->pinned_extents,
1314 fs_info->btree_inode->i_mapping, GFP_NOFS);
1315 extent_io_tree_init(&fs_info->pending_del,
1316 fs_info->btree_inode->i_mapping, GFP_NOFS);
1317 extent_io_tree_init(&fs_info->extent_ins,
1318 fs_info->btree_inode->i_mapping, GFP_NOFS);
1319 fs_info->do_barriers = 1;
1321 BTRFS_I(fs_info->btree_inode)->root = tree_root;
1322 memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1323 sizeof(struct btrfs_key));
1324 insert_inode_hash(fs_info->btree_inode);
1325 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
1327 mutex_init(&fs_info->trans_mutex);
1328 mutex_init(&fs_info->drop_mutex);
1329 mutex_init(&fs_info->alloc_mutex);
1330 mutex_init(&fs_info->chunk_mutex);
1331 mutex_init(&fs_info->transaction_kthread_mutex);
1332 mutex_init(&fs_info->cleaner_mutex);
1333 mutex_init(&fs_info->volume_mutex);
1334 init_waitqueue_head(&fs_info->transaction_throttle);
1335 init_waitqueue_head(&fs_info->transaction_wait);
1338 ret = add_hasher(fs_info, "crc32c");
1340 printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
1345 __setup_root(4096, 4096, 4096, 4096, tree_root,
1346 fs_info, BTRFS_ROOT_TREE_OBJECTID);
1349 bh = __bread(fs_devices->latest_bdev,
1350 BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
1354 memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1357 memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1359 disk_super = &fs_info->super_copy;
1360 if (!btrfs_super_root(disk_super))
1361 goto fail_sb_buffer;
1363 err = btrfs_parse_options(tree_root, options);
1365 goto fail_sb_buffer;
1368 * we need to start all the end_io workers up front because the
1369 * queue work function gets called at interrupt time, and so it
1370 * cannot dynamically grow.
1372 btrfs_init_workers(&fs_info->workers, "worker",
1373 fs_info->thread_pool_size);
1374 btrfs_init_workers(&fs_info->submit_workers, "submit",
1375 min_t(u64, fs_devices->num_devices,
1376 fs_info->thread_pool_size));
1378 /* a higher idle thresh on the submit workers makes it much more
1379 * likely that bios will be send down in a sane order to the
1382 fs_info->submit_workers.idle_thresh = 64;
1384 btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
1385 btrfs_init_workers(&fs_info->endio_workers, "endio",
1386 fs_info->thread_pool_size);
1387 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1388 fs_info->thread_pool_size);
1391 * endios are largely parallel and should have a very
1394 fs_info->endio_workers.idle_thresh = 4;
1395 fs_info->endio_write_workers.idle_thresh = 4;
1397 btrfs_start_workers(&fs_info->workers, 1);
1398 btrfs_start_workers(&fs_info->submit_workers, 1);
1399 btrfs_start_workers(&fs_info->fixup_workers, 1);
1400 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1401 btrfs_start_workers(&fs_info->endio_write_workers,
1402 fs_info->thread_pool_size);
1405 if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
1406 printk("Btrfs: wanted %llu devices, but found %llu\n",
1407 (unsigned long long)btrfs_super_num_devices(disk_super),
1408 (unsigned long long)fs_devices->open_devices);
1409 if (btrfs_test_opt(tree_root, DEGRADED))
1410 printk("continuing in degraded mode\n");
1412 goto fail_sb_buffer;
1416 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1418 nodesize = btrfs_super_nodesize(disk_super);
1419 leafsize = btrfs_super_leafsize(disk_super);
1420 sectorsize = btrfs_super_sectorsize(disk_super);
1421 stripesize = btrfs_super_stripesize(disk_super);
1422 tree_root->nodesize = nodesize;
1423 tree_root->leafsize = leafsize;
1424 tree_root->sectorsize = sectorsize;
1425 tree_root->stripesize = stripesize;
1427 sb->s_blocksize = sectorsize;
1428 sb->s_blocksize_bits = blksize_bits(sectorsize);
1430 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1431 sizeof(disk_super->magic))) {
1432 printk("btrfs: valid FS not found on %s\n", sb->s_id);
1433 goto fail_sb_buffer;
1436 mutex_lock(&fs_info->chunk_mutex);
1437 ret = btrfs_read_sys_array(tree_root);
1438 mutex_unlock(&fs_info->chunk_mutex);
1440 printk("btrfs: failed to read the system array on %s\n",
1442 goto fail_sys_array;
1445 blocksize = btrfs_level_size(tree_root,
1446 btrfs_super_chunk_root_level(disk_super));
1448 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1449 chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1451 chunk_root->node = read_tree_block(chunk_root,
1452 btrfs_super_chunk_root(disk_super),
1454 BUG_ON(!chunk_root->node);
1456 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1457 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1460 mutex_lock(&fs_info->chunk_mutex);
1461 ret = btrfs_read_chunk_tree(chunk_root);
1462 mutex_unlock(&fs_info->chunk_mutex);
1465 btrfs_close_extra_devices(fs_devices);
1467 blocksize = btrfs_level_size(tree_root,
1468 btrfs_super_root_level(disk_super));
1471 tree_root->node = read_tree_block(tree_root,
1472 btrfs_super_root(disk_super),
1474 if (!tree_root->node)
1475 goto fail_sb_buffer;
1478 ret = find_and_setup_root(tree_root, fs_info,
1479 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1481 goto fail_tree_root;
1482 extent_root->track_dirty = 1;
1484 ret = find_and_setup_root(tree_root, fs_info,
1485 BTRFS_DEV_TREE_OBJECTID, dev_root);
1486 dev_root->track_dirty = 1;
1489 goto fail_extent_root;
1491 btrfs_read_block_groups(extent_root);
1493 fs_info->generation = btrfs_super_generation(disk_super) + 1;
1494 fs_info->data_alloc_profile = (u64)-1;
1495 fs_info->metadata_alloc_profile = (u64)-1;
1496 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1497 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1499 if (!fs_info->cleaner_kthread)
1500 goto fail_extent_root;
1502 fs_info->transaction_kthread = kthread_run(transaction_kthread,
1504 "btrfs-transaction");
1505 if (!fs_info->transaction_kthread)
1512 kthread_stop(fs_info->cleaner_kthread);
1514 free_extent_buffer(extent_root->node);
1516 free_extent_buffer(tree_root->node);
1519 btrfs_stop_workers(&fs_info->fixup_workers);
1520 btrfs_stop_workers(&fs_info->workers);
1521 btrfs_stop_workers(&fs_info->endio_workers);
1522 btrfs_stop_workers(&fs_info->endio_write_workers);
1523 btrfs_stop_workers(&fs_info->submit_workers);
1525 iput(fs_info->btree_inode);
1527 btrfs_close_devices(fs_info->fs_devices);
1528 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1532 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1533 bdi_destroy(&fs_info->bdi);
1536 return ERR_PTR(err);
1539 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1541 char b[BDEVNAME_SIZE];
1544 set_buffer_uptodate(bh);
1546 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1547 printk(KERN_WARNING "lost page write due to "
1548 "I/O error on %s\n",
1549 bdevname(bh->b_bdev, b));
1551 /* note, we dont' set_buffer_write_io_error because we have
1552 * our own ways of dealing with the IO errors
1554 clear_buffer_uptodate(bh);
1560 int write_all_supers(struct btrfs_root *root)
1562 struct list_head *cur;
1563 struct list_head *head = &root->fs_info->fs_devices->devices;
1564 struct btrfs_device *dev;
1565 struct btrfs_super_block *sb;
1566 struct btrfs_dev_item *dev_item;
1567 struct buffer_head *bh;
1571 int total_errors = 0;
1575 max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1576 do_barriers = !btrfs_test_opt(root, NOBARRIER);
1578 sb = &root->fs_info->super_for_commit;
1579 dev_item = &sb->dev_item;
1580 list_for_each(cur, head) {
1581 dev = list_entry(cur, struct btrfs_device, dev_list);
1586 if (!dev->in_fs_metadata)
1589 btrfs_set_stack_device_type(dev_item, dev->type);
1590 btrfs_set_stack_device_id(dev_item, dev->devid);
1591 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
1592 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
1593 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
1594 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
1595 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
1596 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
1597 flags = btrfs_super_flags(sb);
1598 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
1602 crc = btrfs_csum_data(root, (char *)sb + BTRFS_CSUM_SIZE, crc,
1603 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
1604 btrfs_csum_final(crc, sb->csum);
1606 bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET / 4096,
1607 BTRFS_SUPER_INFO_SIZE);
1609 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
1610 dev->pending_io = bh;
1613 set_buffer_uptodate(bh);
1615 bh->b_end_io = btrfs_end_buffer_write_sync;
1617 if (do_barriers && dev->barriers) {
1618 ret = submit_bh(WRITE_BARRIER, bh);
1619 if (ret == -EOPNOTSUPP) {
1620 printk("btrfs: disabling barriers on dev %s\n",
1622 set_buffer_uptodate(bh);
1626 ret = submit_bh(WRITE, bh);
1629 ret = submit_bh(WRITE, bh);
1634 if (total_errors > max_errors) {
1635 printk("btrfs: %d errors while writing supers\n", total_errors);
1640 list_for_each(cur, head) {
1641 dev = list_entry(cur, struct btrfs_device, dev_list);
1644 if (!dev->in_fs_metadata)
1647 BUG_ON(!dev->pending_io);
1648 bh = dev->pending_io;
1650 if (!buffer_uptodate(dev->pending_io)) {
1651 if (do_barriers && dev->barriers) {
1652 printk("btrfs: disabling barriers on dev %s\n",
1654 set_buffer_uptodate(bh);
1658 ret = submit_bh(WRITE, bh);
1661 if (!buffer_uptodate(bh))
1668 dev->pending_io = NULL;
1671 if (total_errors > max_errors) {
1672 printk("btrfs: %d errors while writing supers\n", total_errors);
1678 int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
1683 ret = write_all_supers(root);
1687 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
1689 radix_tree_delete(&fs_info->fs_roots_radix,
1690 (unsigned long)root->root_key.objectid);
1692 btrfs_sysfs_del_root(root);
1696 free_extent_buffer(root->node);
1697 if (root->commit_root)
1698 free_extent_buffer(root->commit_root);
1705 static int del_fs_roots(struct btrfs_fs_info *fs_info)
1708 struct btrfs_root *gang[8];
1712 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1717 for (i = 0; i < ret; i++)
1718 btrfs_free_fs_root(fs_info, gang[i]);
1723 int close_ctree(struct btrfs_root *root)
1726 struct btrfs_trans_handle *trans;
1727 struct btrfs_fs_info *fs_info = root->fs_info;
1729 fs_info->closing = 1;
1732 kthread_stop(root->fs_info->transaction_kthread);
1733 kthread_stop(root->fs_info->cleaner_kthread);
1735 btrfs_clean_old_snapshots(root);
1736 trans = btrfs_start_transaction(root, 1);
1737 ret = btrfs_commit_transaction(trans, root);
1738 /* run commit again to drop the original snapshot */
1739 trans = btrfs_start_transaction(root, 1);
1740 btrfs_commit_transaction(trans, root);
1741 ret = btrfs_write_and_wait_transaction(NULL, root);
1744 write_ctree_super(NULL, root);
1746 if (fs_info->delalloc_bytes) {
1747 printk("btrfs: at unmount delalloc count %Lu\n",
1748 fs_info->delalloc_bytes);
1750 if (fs_info->total_ref_cache_size) {
1751 printk("btrfs: at umount reference cache size %Lu\n",
1752 fs_info->total_ref_cache_size);
1755 if (fs_info->extent_root->node)
1756 free_extent_buffer(fs_info->extent_root->node);
1758 if (fs_info->tree_root->node)
1759 free_extent_buffer(fs_info->tree_root->node);
1761 if (root->fs_info->chunk_root->node);
1762 free_extent_buffer(root->fs_info->chunk_root->node);
1764 if (root->fs_info->dev_root->node);
1765 free_extent_buffer(root->fs_info->dev_root->node);
1767 btrfs_free_block_groups(root->fs_info);
1768 fs_info->closing = 2;
1769 del_fs_roots(fs_info);
1771 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
1773 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
1775 btrfs_stop_workers(&fs_info->fixup_workers);
1776 btrfs_stop_workers(&fs_info->workers);
1777 btrfs_stop_workers(&fs_info->endio_workers);
1778 btrfs_stop_workers(&fs_info->endio_write_workers);
1779 btrfs_stop_workers(&fs_info->submit_workers);
1781 iput(fs_info->btree_inode);
1783 while(!list_empty(&fs_info->hashers)) {
1784 struct btrfs_hasher *hasher;
1785 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
1787 list_del(&hasher->hashers);
1788 crypto_free_hash(&fs_info->hash_tfm);
1792 btrfs_close_devices(fs_info->fs_devices);
1793 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1795 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1796 bdi_destroy(&fs_info->bdi);
1799 kfree(fs_info->extent_root);
1800 kfree(fs_info->tree_root);
1801 kfree(fs_info->chunk_root);
1802 kfree(fs_info->dev_root);
1806 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
1809 struct inode *btree_inode = buf->first_page->mapping->host;
1811 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
1815 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
1820 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
1822 struct inode *btree_inode = buf->first_page->mapping->host;
1823 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
1827 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
1829 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1830 u64 transid = btrfs_header_generation(buf);
1831 struct inode *btree_inode = root->fs_info->btree_inode;
1833 WARN_ON(!btrfs_tree_locked(buf));
1834 if (transid != root->fs_info->generation) {
1835 printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
1836 (unsigned long long)buf->start,
1837 transid, root->fs_info->generation);
1840 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
1843 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
1846 * looks as though older kernels can get into trouble with
1847 * this code, they end up stuck in balance_dirty_pages forever
1849 struct extent_io_tree *tree;
1852 unsigned long thresh = 2 * 1024 * 1024;
1853 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
1855 if (current_is_pdflush())
1858 num_dirty = count_range_bits(tree, &start, (u64)-1,
1859 thresh, EXTENT_DIRTY);
1860 if (num_dirty > thresh) {
1861 balance_dirty_pages_ratelimited_nr(
1862 root->fs_info->btree_inode->i_mapping, 1);
1867 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
1869 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1871 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1873 buf->flags |= EXTENT_UPTODATE;
1878 static struct extent_io_ops btree_extent_io_ops = {
1879 .writepage_io_hook = btree_writepage_io_hook,
1880 .readpage_end_io_hook = btree_readpage_end_io_hook,
1881 .submit_bio_hook = btree_submit_bio_hook,
1882 /* note we're sharing with inode.c for the merge bio hook */
1883 .merge_bio_hook = btrfs_merge_bio_hook,