2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/version.h>
21 #include <linux/blkdev.h>
22 #include <linux/scatterlist.h>
23 #include <linux/swap.h>
24 #include <linux/radix-tree.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h> // for block_sync_page
27 #include <linux/workqueue.h>
28 #include <linux/kthread.h>
29 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
30 # include <linux/freezer.h>
32 # include <linux/sched.h>
37 #include "transaction.h"
38 #include "btrfs_inode.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
45 static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
47 if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) {
48 printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n",
49 (unsigned long long)extent_buffer_blocknr(buf),
50 (unsigned long long)btrfs_header_blocknr(buf));
57 static struct extent_io_ops btree_extent_io_ops;
58 static void end_workqueue_fn(struct btrfs_work *work);
64 struct btrfs_fs_info *info;
67 struct list_head list;
68 struct btrfs_work work;
71 struct async_submit_bio {
74 struct list_head list;
75 extent_submit_bio_hook_t *submit_bio_hook;
78 struct btrfs_work work;
81 struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
82 size_t page_offset, u64 start, u64 len,
85 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
86 struct extent_map *em;
89 spin_lock(&em_tree->lock);
90 em = lookup_extent_mapping(em_tree, start, len);
93 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
94 spin_unlock(&em_tree->lock);
97 spin_unlock(&em_tree->lock);
99 em = alloc_extent_map(GFP_NOFS);
101 em = ERR_PTR(-ENOMEM);
107 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
109 spin_lock(&em_tree->lock);
110 ret = add_extent_mapping(em_tree, em);
111 if (ret == -EEXIST) {
112 u64 failed_start = em->start;
113 u64 failed_len = em->len;
115 printk("failed to insert %Lu %Lu -> %Lu into tree\n",
116 em->start, em->len, em->block_start);
118 em = lookup_extent_mapping(em_tree, start, len);
120 printk("after failing, found %Lu %Lu %Lu\n",
121 em->start, em->len, em->block_start);
124 em = lookup_extent_mapping(em_tree, failed_start,
127 printk("double failure lookup gives us "
128 "%Lu %Lu -> %Lu\n", em->start,
129 em->len, em->block_start);
138 spin_unlock(&em_tree->lock);
146 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
148 return btrfs_crc32c(seed, data, len);
151 void btrfs_csum_final(u32 crc, char *result)
153 *(__le32 *)result = ~cpu_to_le32(crc);
156 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
159 char result[BTRFS_CRC32_SIZE];
161 unsigned long cur_len;
162 unsigned long offset = BTRFS_CSUM_SIZE;
163 char *map_token = NULL;
165 unsigned long map_start;
166 unsigned long map_len;
170 len = buf->len - offset;
172 err = map_private_extent_buffer(buf, offset, 32,
174 &map_start, &map_len, KM_USER0);
176 printk("failed to map extent buffer! %lu\n",
180 cur_len = min(len, map_len - (offset - map_start));
181 crc = btrfs_csum_data(root, kaddr + offset - map_start,
185 unmap_extent_buffer(buf, map_token, KM_USER0);
187 btrfs_csum_final(crc, result);
190 int from_this_trans = 0;
192 if (root->fs_info->running_transaction &&
193 btrfs_header_generation(buf) ==
194 root->fs_info->running_transaction->transid)
197 /* FIXME, this is not good */
198 if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
201 memcpy(&found, result, BTRFS_CRC32_SIZE);
203 read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
204 printk("btrfs: %s checksum verify failed on %llu "
205 "wanted %X found %X from_this_trans %d "
207 root->fs_info->sb->s_id,
208 buf->start, val, found, from_this_trans,
209 btrfs_header_level(buf));
213 write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE);
218 static int verify_parent_transid(struct extent_io_tree *io_tree,
219 struct extent_buffer *eb, u64 parent_transid)
223 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
226 lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
227 if (extent_buffer_uptodate(io_tree, eb) &&
228 btrfs_header_generation(eb) == parent_transid) {
232 printk("parent transid verify failed on %llu wanted %llu found %llu\n",
233 (unsigned long long)eb->start,
234 (unsigned long long)parent_transid,
235 (unsigned long long)btrfs_header_generation(eb));
238 clear_extent_buffer_uptodate(io_tree, eb);
239 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
245 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
246 struct extent_buffer *eb,
247 u64 start, u64 parent_transid)
249 struct extent_io_tree *io_tree;
254 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
256 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
257 btree_get_extent, mirror_num);
259 !verify_parent_transid(io_tree, eb, parent_transid))
262 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
268 if (mirror_num > num_copies)
274 int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
276 struct extent_io_tree *tree;
277 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
281 struct extent_buffer *eb;
284 tree = &BTRFS_I(page->mapping->host)->io_tree;
286 if (page->private == EXTENT_PAGE_PRIVATE)
290 len = page->private >> 2;
294 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
295 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
296 btrfs_header_generation(eb));
298 btrfs_clear_buffer_defrag(eb);
299 found_start = btrfs_header_bytenr(eb);
300 if (found_start != start) {
301 printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
302 start, found_start, len);
306 if (eb->first_page != page) {
307 printk("bad first page %lu %lu\n", eb->first_page->index,
312 if (!PageUptodate(page)) {
313 printk("csum not up to date page %lu\n", page->index);
317 found_level = btrfs_header_level(eb);
318 spin_lock(&root->fs_info->hash_lock);
319 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
320 spin_unlock(&root->fs_info->hash_lock);
321 csum_tree_block(root, eb, 0);
323 free_extent_buffer(eb);
328 static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
330 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
332 csum_dirty_buffer(root, page);
336 int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
337 struct extent_state *state)
339 struct extent_io_tree *tree;
343 struct extent_buffer *eb;
344 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
347 tree = &BTRFS_I(page->mapping->host)->io_tree;
348 if (page->private == EXTENT_PAGE_PRIVATE)
352 len = page->private >> 2;
356 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
358 btrfs_clear_buffer_defrag(eb);
359 found_start = btrfs_header_bytenr(eb);
360 if (found_start != start) {
364 if (eb->first_page != page) {
365 printk("bad first page %lu %lu\n", eb->first_page->index,
371 if (memcmp_extent_buffer(eb, root->fs_info->fsid,
372 (unsigned long)btrfs_header_fsid(eb),
374 printk("bad fsid on block %Lu\n", eb->start);
378 found_level = btrfs_header_level(eb);
380 ret = csum_tree_block(root, eb, 1);
384 end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
385 end = eb->start + end - 1;
386 release_extent_buffer_tail_pages(eb);
388 free_extent_buffer(eb);
393 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
394 static void end_workqueue_bio(struct bio *bio, int err)
396 static int end_workqueue_bio(struct bio *bio,
397 unsigned int bytes_done, int err)
400 struct end_io_wq *end_io_wq = bio->bi_private;
401 struct btrfs_fs_info *fs_info;
403 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
408 fs_info = end_io_wq->info;
409 end_io_wq->error = err;
410 end_io_wq->work.func = end_workqueue_fn;
411 end_io_wq->work.flags = 0;
412 btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
414 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
419 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
422 struct end_io_wq *end_io_wq;
423 end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
427 end_io_wq->private = bio->bi_private;
428 end_io_wq->end_io = bio->bi_end_io;
429 end_io_wq->info = info;
430 end_io_wq->error = 0;
431 end_io_wq->bio = bio;
432 end_io_wq->metadata = metadata;
434 bio->bi_private = end_io_wq;
435 bio->bi_end_io = end_workqueue_bio;
439 static void run_one_async_submit(struct btrfs_work *work)
441 struct btrfs_fs_info *fs_info;
442 struct async_submit_bio *async;
444 async = container_of(work, struct async_submit_bio, work);
445 fs_info = BTRFS_I(async->inode)->root->fs_info;
446 atomic_dec(&fs_info->nr_async_submits);
447 async->submit_bio_hook(async->inode, async->rw, async->bio,
452 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
453 int rw, struct bio *bio, int mirror_num,
454 extent_submit_bio_hook_t *submit_bio_hook)
456 struct async_submit_bio *async;
458 async = kmalloc(sizeof(*async), GFP_NOFS);
462 async->inode = inode;
465 async->mirror_num = mirror_num;
466 async->submit_bio_hook = submit_bio_hook;
467 async->work.func = run_one_async_submit;
468 async->work.flags = 0;
469 atomic_inc(&fs_info->nr_async_submits);
470 btrfs_queue_worker(&fs_info->workers, &async->work);
474 static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
477 struct btrfs_root *root = BTRFS_I(inode)->root;
481 offset = bio->bi_sector << 9;
484 * when we're called for a write, we're already in the async
485 * submission context. Just jump ingo btrfs_map_bio
487 if (rw & (1 << BIO_RW)) {
488 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
493 * called for a read, do the setup so that checksum validation
494 * can happen in the async kernel threads
496 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
499 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
502 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
506 * kthread helpers are used to submit writes so that checksumming
507 * can happen in parallel across all CPUs
509 if (!(rw & (1 << BIO_RW))) {
510 return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
512 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
513 inode, rw, bio, mirror_num,
514 __btree_submit_bio_hook);
517 static int btree_writepage(struct page *page, struct writeback_control *wbc)
519 struct extent_io_tree *tree;
520 tree = &BTRFS_I(page->mapping->host)->io_tree;
521 return extent_write_full_page(tree, page, btree_get_extent, wbc);
524 static int btree_writepages(struct address_space *mapping,
525 struct writeback_control *wbc)
527 struct extent_io_tree *tree;
528 tree = &BTRFS_I(mapping->host)->io_tree;
529 if (wbc->sync_mode == WB_SYNC_NONE) {
532 unsigned long thresh = 96 * 1024 * 1024;
534 if (wbc->for_kupdate)
537 if (current_is_pdflush()) {
538 thresh = 96 * 1024 * 1024;
540 thresh = 8 * 1024 * 1024;
542 num_dirty = count_range_bits(tree, &start, (u64)-1,
543 thresh, EXTENT_DIRTY);
544 if (num_dirty < thresh) {
548 return extent_writepages(tree, mapping, btree_get_extent, wbc);
551 int btree_readpage(struct file *file, struct page *page)
553 struct extent_io_tree *tree;
554 tree = &BTRFS_I(page->mapping->host)->io_tree;
555 return extent_read_full_page(tree, page, btree_get_extent);
558 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
560 struct extent_io_tree *tree;
561 struct extent_map_tree *map;
564 if (page_count(page) > 3) {
565 /* once for page->private, once for the caller, once
566 * once for the page cache
570 tree = &BTRFS_I(page->mapping->host)->io_tree;
571 map = &BTRFS_I(page->mapping->host)->extent_tree;
572 ret = try_release_extent_state(map, tree, page, gfp_flags);
574 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
575 ClearPagePrivate(page);
576 set_page_private(page, 0);
577 page_cache_release(page);
582 static void btree_invalidatepage(struct page *page, unsigned long offset)
584 struct extent_io_tree *tree;
585 tree = &BTRFS_I(page->mapping->host)->io_tree;
586 extent_invalidatepage(tree, page, offset);
587 btree_releasepage(page, GFP_NOFS);
588 if (PagePrivate(page)) {
589 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
590 ClearPagePrivate(page);
591 set_page_private(page, 0);
592 page_cache_release(page);
597 static int btree_writepage(struct page *page, struct writeback_control *wbc)
599 struct buffer_head *bh;
600 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
601 struct buffer_head *head;
602 if (!page_has_buffers(page)) {
603 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
604 (1 << BH_Dirty)|(1 << BH_Uptodate));
606 head = page_buffers(page);
609 if (buffer_dirty(bh))
610 csum_tree_block(root, bh, 0);
611 bh = bh->b_this_page;
612 } while (bh != head);
613 return block_write_full_page(page, btree_get_block, wbc);
617 static struct address_space_operations btree_aops = {
618 .readpage = btree_readpage,
619 .writepage = btree_writepage,
620 .writepages = btree_writepages,
621 .releasepage = btree_releasepage,
622 .invalidatepage = btree_invalidatepage,
623 .sync_page = block_sync_page,
626 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
629 struct extent_buffer *buf = NULL;
630 struct inode *btree_inode = root->fs_info->btree_inode;
633 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
636 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
637 buf, 0, 0, btree_get_extent, 0);
638 free_extent_buffer(buf);
642 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
643 u64 bytenr, u32 blocksize)
645 struct inode *btree_inode = root->fs_info->btree_inode;
646 struct extent_buffer *eb;
647 eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
648 bytenr, blocksize, GFP_NOFS);
652 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
653 u64 bytenr, u32 blocksize)
655 struct inode *btree_inode = root->fs_info->btree_inode;
656 struct extent_buffer *eb;
658 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
659 bytenr, blocksize, NULL, GFP_NOFS);
664 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
665 u32 blocksize, u64 parent_transid)
667 struct extent_buffer *buf = NULL;
668 struct inode *btree_inode = root->fs_info->btree_inode;
669 struct extent_io_tree *io_tree;
672 io_tree = &BTRFS_I(btree_inode)->io_tree;
674 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
678 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
681 buf->flags |= EXTENT_UPTODATE;
687 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
688 struct extent_buffer *buf)
690 struct inode *btree_inode = root->fs_info->btree_inode;
691 if (btrfs_header_generation(buf) ==
692 root->fs_info->running_transaction->transid) {
693 WARN_ON(!btrfs_tree_locked(buf));
694 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
700 int wait_on_tree_block_writeback(struct btrfs_root *root,
701 struct extent_buffer *buf)
703 struct inode *btree_inode = root->fs_info->btree_inode;
704 wait_on_extent_buffer_writeback(&BTRFS_I(btree_inode)->io_tree,
709 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
710 u32 stripesize, struct btrfs_root *root,
711 struct btrfs_fs_info *fs_info,
716 root->commit_root = NULL;
717 root->sectorsize = sectorsize;
718 root->nodesize = nodesize;
719 root->leafsize = leafsize;
720 root->stripesize = stripesize;
722 root->track_dirty = 0;
724 root->fs_info = fs_info;
725 root->objectid = objectid;
726 root->last_trans = 0;
727 root->highest_inode = 0;
728 root->last_inode_alloc = 0;
732 INIT_LIST_HEAD(&root->dirty_list);
733 spin_lock_init(&root->node_lock);
734 mutex_init(&root->objectid_mutex);
735 memset(&root->root_key, 0, sizeof(root->root_key));
736 memset(&root->root_item, 0, sizeof(root->root_item));
737 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
738 memset(&root->root_kobj, 0, sizeof(root->root_kobj));
739 init_completion(&root->kobj_unregister);
740 root->defrag_running = 0;
741 root->defrag_level = 0;
742 root->root_key.objectid = objectid;
746 static int find_and_setup_root(struct btrfs_root *tree_root,
747 struct btrfs_fs_info *fs_info,
749 struct btrfs_root *root)
754 __setup_root(tree_root->nodesize, tree_root->leafsize,
755 tree_root->sectorsize, tree_root->stripesize,
756 root, fs_info, objectid);
757 ret = btrfs_find_last_root(tree_root, objectid,
758 &root->root_item, &root->root_key);
761 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
762 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
768 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_fs_info *fs_info,
769 struct btrfs_key *location)
771 struct btrfs_root *root;
772 struct btrfs_root *tree_root = fs_info->tree_root;
773 struct btrfs_path *path;
774 struct extent_buffer *l;
779 root = kzalloc(sizeof(*root), GFP_NOFS);
781 return ERR_PTR(-ENOMEM);
782 if (location->offset == (u64)-1) {
783 ret = find_and_setup_root(tree_root, fs_info,
784 location->objectid, root);
792 __setup_root(tree_root->nodesize, tree_root->leafsize,
793 tree_root->sectorsize, tree_root->stripesize,
794 root, fs_info, location->objectid);
796 path = btrfs_alloc_path();
798 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
805 read_extent_buffer(l, &root->root_item,
806 btrfs_item_ptr_offset(l, path->slots[0]),
807 sizeof(root->root_item));
808 memcpy(&root->root_key, location, sizeof(*location));
811 btrfs_release_path(root, path);
812 btrfs_free_path(path);
817 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
818 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
823 ret = btrfs_find_highest_inode(root, &highest_inode);
825 root->highest_inode = highest_inode;
826 root->last_inode_alloc = highest_inode;
831 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
834 struct btrfs_root *root;
836 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
837 return fs_info->tree_root;
838 if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
839 return fs_info->extent_root;
841 root = radix_tree_lookup(&fs_info->fs_roots_radix,
842 (unsigned long)root_objectid);
846 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
847 struct btrfs_key *location)
849 struct btrfs_root *root;
852 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
853 return fs_info->tree_root;
854 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
855 return fs_info->extent_root;
856 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
857 return fs_info->chunk_root;
858 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
859 return fs_info->dev_root;
861 root = radix_tree_lookup(&fs_info->fs_roots_radix,
862 (unsigned long)location->objectid);
866 root = btrfs_read_fs_root_no_radix(fs_info, location);
869 ret = radix_tree_insert(&fs_info->fs_roots_radix,
870 (unsigned long)root->root_key.objectid,
873 free_extent_buffer(root->node);
877 ret = btrfs_find_dead_roots(fs_info->tree_root,
878 root->root_key.objectid, root);
884 struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
885 struct btrfs_key *location,
886 const char *name, int namelen)
888 struct btrfs_root *root;
891 root = btrfs_read_fs_root_no_name(fs_info, location);
898 ret = btrfs_set_root_name(root, name, namelen);
900 free_extent_buffer(root->node);
905 ret = btrfs_sysfs_add_root(root);
907 free_extent_buffer(root->node);
916 static int add_hasher(struct btrfs_fs_info *info, char *type) {
917 struct btrfs_hasher *hasher;
919 hasher = kmalloc(sizeof(*hasher), GFP_NOFS);
922 hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC);
923 if (!hasher->hash_tfm) {
927 spin_lock(&info->hash_lock);
928 list_add(&hasher->list, &info->hashers);
929 spin_unlock(&info->hash_lock);
934 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
936 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
938 int limit = 256 * info->fs_devices->open_devices;
939 struct list_head *cur;
940 struct btrfs_device *device;
941 struct backing_dev_info *bdi;
943 if ((bdi_bits & (1 << BDI_write_congested)) &&
944 atomic_read(&info->nr_async_submits) > limit) {
948 list_for_each(cur, &info->fs_devices->devices) {
949 device = list_entry(cur, struct btrfs_device, dev_list);
952 bdi = blk_get_backing_dev_info(device->bdev);
953 if (bdi && bdi_congested(bdi, bdi_bits)) {
962 * this unplugs every device on the box, and it is only used when page
965 static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
967 struct list_head *cur;
968 struct btrfs_device *device;
969 struct btrfs_fs_info *info;
971 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
972 list_for_each(cur, &info->fs_devices->devices) {
973 device = list_entry(cur, struct btrfs_device, dev_list);
974 bdi = blk_get_backing_dev_info(device->bdev);
975 if (bdi->unplug_io_fn) {
976 bdi->unplug_io_fn(bdi, page);
981 void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
984 struct extent_map_tree *em_tree;
985 struct extent_map *em;
986 struct address_space *mapping;
989 /* the generic O_DIRECT read code does this */
991 __unplug_io_fn(bdi, page);
996 * page->mapping may change at any time. Get a consistent copy
997 * and use that for everything below
1000 mapping = page->mapping;
1004 inode = mapping->host;
1005 offset = page_offset(page);
1007 em_tree = &BTRFS_I(inode)->extent_tree;
1008 spin_lock(&em_tree->lock);
1009 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1010 spin_unlock(&em_tree->lock);
1014 offset = offset - em->start;
1015 btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1016 em->block_start + offset, page);
1017 free_extent_map(em);
1020 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1022 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1025 bdi->ra_pages = default_backing_dev_info.ra_pages;
1027 bdi->capabilities = default_backing_dev_info.capabilities;
1028 bdi->unplug_io_fn = btrfs_unplug_io_fn;
1029 bdi->unplug_io_data = info;
1030 bdi->congested_fn = btrfs_congested_fn;
1031 bdi->congested_data = info;
1035 static int bio_ready_for_csum(struct bio *bio)
1041 struct extent_io_tree *io_tree = NULL;
1042 struct btrfs_fs_info *info = NULL;
1043 struct bio_vec *bvec;
1047 bio_for_each_segment(bvec, bio, i) {
1048 page = bvec->bv_page;
1049 if (page->private == EXTENT_PAGE_PRIVATE) {
1050 length += bvec->bv_len;
1053 if (!page->private) {
1054 length += bvec->bv_len;
1057 length = bvec->bv_len;
1058 buf_len = page->private >> 2;
1059 start = page_offset(page) + bvec->bv_offset;
1060 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1061 info = BTRFS_I(page->mapping->host)->root->fs_info;
1063 /* are we fully contained in this bio? */
1064 if (buf_len <= length)
1067 ret = extent_range_uptodate(io_tree, start + length,
1068 start + buf_len - 1);
1075 * called by the kthread helper functions to finally call the bio end_io
1076 * functions. This is where read checksum verification actually happens
1078 static void end_workqueue_fn(struct btrfs_work *work)
1081 struct end_io_wq *end_io_wq;
1082 struct btrfs_fs_info *fs_info;
1085 end_io_wq = container_of(work, struct end_io_wq, work);
1086 bio = end_io_wq->bio;
1087 fs_info = end_io_wq->info;
1089 /* metadata bios are special because the whole tree block must
1090 * be checksummed at once. This makes sure the entire block is in
1091 * ram and up to date before trying to verify things. For
1092 * blocksize <= pagesize, it is basically a noop
1094 if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
1095 btrfs_queue_worker(&fs_info->endio_workers,
1099 error = end_io_wq->error;
1100 bio->bi_private = end_io_wq->private;
1101 bio->bi_end_io = end_io_wq->end_io;
1103 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1104 bio_endio(bio, bio->bi_size, error);
1106 bio_endio(bio, error);
1110 static int cleaner_kthread(void *arg)
1112 struct btrfs_root *root = arg;
1116 if (root->fs_info->closing)
1119 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1120 mutex_lock(&root->fs_info->cleaner_mutex);
1121 printk("cleaner awake\n");
1122 btrfs_clean_old_snapshots(root);
1123 printk("cleaner done\n");
1124 mutex_unlock(&root->fs_info->cleaner_mutex);
1126 if (freezing(current)) {
1130 if (root->fs_info->closing)
1132 set_current_state(TASK_INTERRUPTIBLE);
1134 __set_current_state(TASK_RUNNING);
1136 } while (!kthread_should_stop());
1140 static int transaction_kthread(void *arg)
1142 struct btrfs_root *root = arg;
1143 struct btrfs_trans_handle *trans;
1144 struct btrfs_transaction *cur;
1146 unsigned long delay;
1151 if (root->fs_info->closing)
1155 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1156 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1158 mutex_lock(&root->fs_info->trans_mutex);
1159 cur = root->fs_info->running_transaction;
1161 mutex_unlock(&root->fs_info->trans_mutex);
1164 now = get_seconds();
1165 if (now < cur->start_time || now - cur->start_time < 30) {
1166 mutex_unlock(&root->fs_info->trans_mutex);
1170 mutex_unlock(&root->fs_info->trans_mutex);
1171 btrfs_defrag_dirty_roots(root->fs_info);
1172 trans = btrfs_start_transaction(root, 1);
1173 ret = btrfs_commit_transaction(trans, root);
1175 wake_up_process(root->fs_info->cleaner_kthread);
1176 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1178 if (freezing(current)) {
1181 if (root->fs_info->closing)
1183 set_current_state(TASK_INTERRUPTIBLE);
1184 schedule_timeout(delay);
1185 __set_current_state(TASK_RUNNING);
1187 } while (!kthread_should_stop());
1191 struct btrfs_root *open_ctree(struct super_block *sb,
1192 struct btrfs_fs_devices *fs_devices,
1200 struct buffer_head *bh;
1201 struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
1203 struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
1205 struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1207 struct btrfs_root *chunk_root = kmalloc(sizeof(struct btrfs_root),
1209 struct btrfs_root *dev_root = kmalloc(sizeof(struct btrfs_root),
1214 struct btrfs_super_block *disk_super;
1216 if (!extent_root || !tree_root || !fs_info) {
1220 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
1221 INIT_LIST_HEAD(&fs_info->trans_list);
1222 INIT_LIST_HEAD(&fs_info->dead_roots);
1223 INIT_LIST_HEAD(&fs_info->hashers);
1224 spin_lock_init(&fs_info->hash_lock);
1225 spin_lock_init(&fs_info->delalloc_lock);
1226 spin_lock_init(&fs_info->new_trans_lock);
1228 init_completion(&fs_info->kobj_unregister);
1229 fs_info->tree_root = tree_root;
1230 fs_info->extent_root = extent_root;
1231 fs_info->chunk_root = chunk_root;
1232 fs_info->dev_root = dev_root;
1233 fs_info->fs_devices = fs_devices;
1234 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1235 INIT_LIST_HEAD(&fs_info->space_info);
1236 btrfs_mapping_init(&fs_info->mapping_tree);
1237 atomic_set(&fs_info->nr_async_submits, 0);
1238 atomic_set(&fs_info->throttles, 0);
1240 fs_info->max_extent = (u64)-1;
1241 fs_info->max_inline = 8192 * 1024;
1242 setup_bdi(fs_info, &fs_info->bdi);
1243 fs_info->btree_inode = new_inode(sb);
1244 fs_info->btree_inode->i_ino = 1;
1245 fs_info->btree_inode->i_nlink = 1;
1246 fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
1248 sb->s_blocksize = 4096;
1249 sb->s_blocksize_bits = blksize_bits(4096);
1252 * we set the i_size on the btree inode to the max possible int.
1253 * the real end of the address space is determined by all of
1254 * the devices in the system
1256 fs_info->btree_inode->i_size = OFFSET_MAX;
1257 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1258 fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1260 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1261 fs_info->btree_inode->i_mapping,
1263 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1266 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1268 extent_io_tree_init(&fs_info->free_space_cache,
1269 fs_info->btree_inode->i_mapping, GFP_NOFS);
1270 extent_io_tree_init(&fs_info->block_group_cache,
1271 fs_info->btree_inode->i_mapping, GFP_NOFS);
1272 extent_io_tree_init(&fs_info->pinned_extents,
1273 fs_info->btree_inode->i_mapping, GFP_NOFS);
1274 extent_io_tree_init(&fs_info->pending_del,
1275 fs_info->btree_inode->i_mapping, GFP_NOFS);
1276 extent_io_tree_init(&fs_info->extent_ins,
1277 fs_info->btree_inode->i_mapping, GFP_NOFS);
1278 fs_info->do_barriers = 1;
1280 BTRFS_I(fs_info->btree_inode)->root = tree_root;
1281 memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1282 sizeof(struct btrfs_key));
1283 insert_inode_hash(fs_info->btree_inode);
1284 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
1286 mutex_init(&fs_info->trans_mutex);
1287 mutex_init(&fs_info->drop_mutex);
1288 mutex_init(&fs_info->alloc_mutex);
1289 mutex_init(&fs_info->chunk_mutex);
1290 mutex_init(&fs_info->transaction_kthread_mutex);
1291 mutex_init(&fs_info->cleaner_mutex);
1294 ret = add_hasher(fs_info, "crc32c");
1296 printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
1301 __setup_root(4096, 4096, 4096, 4096, tree_root,
1302 fs_info, BTRFS_ROOT_TREE_OBJECTID);
1305 bh = __bread(fs_devices->latest_bdev,
1306 BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
1310 memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1313 memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1315 disk_super = &fs_info->super_copy;
1316 if (!btrfs_super_root(disk_super))
1317 goto fail_sb_buffer;
1319 err = btrfs_parse_options(tree_root, options);
1321 goto fail_sb_buffer;
1324 * we need to start all the end_io workers up front because the
1325 * queue work function gets called at interrupt time, and so it
1326 * cannot dynamically grow.
1328 btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size);
1329 btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size);
1330 btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1331 btrfs_start_workers(&fs_info->workers, 1);
1332 btrfs_start_workers(&fs_info->submit_workers, 1);
1333 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1336 if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
1337 printk("Btrfs: wanted %llu devices, but found %llu\n",
1338 (unsigned long long)btrfs_super_num_devices(disk_super),
1339 (unsigned long long)fs_devices->open_devices);
1340 if (btrfs_test_opt(tree_root, DEGRADED))
1341 printk("continuing in degraded mode\n");
1343 goto fail_sb_buffer;
1347 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1349 nodesize = btrfs_super_nodesize(disk_super);
1350 leafsize = btrfs_super_leafsize(disk_super);
1351 sectorsize = btrfs_super_sectorsize(disk_super);
1352 stripesize = btrfs_super_stripesize(disk_super);
1353 tree_root->nodesize = nodesize;
1354 tree_root->leafsize = leafsize;
1355 tree_root->sectorsize = sectorsize;
1356 tree_root->stripesize = stripesize;
1358 sb->s_blocksize = sectorsize;
1359 sb->s_blocksize_bits = blksize_bits(sectorsize);
1361 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1362 sizeof(disk_super->magic))) {
1363 printk("btrfs: valid FS not found on %s\n", sb->s_id);
1364 goto fail_sb_buffer;
1367 mutex_lock(&fs_info->chunk_mutex);
1368 ret = btrfs_read_sys_array(tree_root);
1369 mutex_unlock(&fs_info->chunk_mutex);
1371 printk("btrfs: failed to read the system array on %s\n",
1373 goto fail_sys_array;
1376 blocksize = btrfs_level_size(tree_root,
1377 btrfs_super_chunk_root_level(disk_super));
1379 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1380 chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1382 chunk_root->node = read_tree_block(chunk_root,
1383 btrfs_super_chunk_root(disk_super),
1385 BUG_ON(!chunk_root->node);
1387 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1388 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1391 mutex_lock(&fs_info->chunk_mutex);
1392 ret = btrfs_read_chunk_tree(chunk_root);
1393 mutex_unlock(&fs_info->chunk_mutex);
1396 btrfs_close_extra_devices(fs_devices);
1398 blocksize = btrfs_level_size(tree_root,
1399 btrfs_super_root_level(disk_super));
1402 tree_root->node = read_tree_block(tree_root,
1403 btrfs_super_root(disk_super),
1405 if (!tree_root->node)
1406 goto fail_sb_buffer;
1409 ret = find_and_setup_root(tree_root, fs_info,
1410 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1412 goto fail_tree_root;
1413 extent_root->track_dirty = 1;
1415 ret = find_and_setup_root(tree_root, fs_info,
1416 BTRFS_DEV_TREE_OBJECTID, dev_root);
1417 dev_root->track_dirty = 1;
1420 goto fail_extent_root;
1422 btrfs_read_block_groups(extent_root);
1424 fs_info->generation = btrfs_super_generation(disk_super) + 1;
1425 fs_info->data_alloc_profile = (u64)-1;
1426 fs_info->metadata_alloc_profile = (u64)-1;
1427 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1428 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1430 if (!fs_info->cleaner_kthread)
1431 goto fail_extent_root;
1433 fs_info->transaction_kthread = kthread_run(transaction_kthread,
1435 "btrfs-transaction");
1436 if (!fs_info->transaction_kthread)
1437 goto fail_trans_kthread;
1443 kthread_stop(fs_info->cleaner_kthread);
1445 free_extent_buffer(extent_root->node);
1447 free_extent_buffer(tree_root->node);
1450 extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
1451 btrfs_stop_workers(&fs_info->workers);
1452 btrfs_stop_workers(&fs_info->endio_workers);
1453 btrfs_stop_workers(&fs_info->submit_workers);
1455 iput(fs_info->btree_inode);
1457 btrfs_close_devices(fs_info->fs_devices);
1458 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1462 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1463 bdi_destroy(&fs_info->bdi);
1466 return ERR_PTR(err);
1469 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1471 char b[BDEVNAME_SIZE];
1474 set_buffer_uptodate(bh);
1476 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1477 printk(KERN_WARNING "lost page write due to "
1478 "I/O error on %s\n",
1479 bdevname(bh->b_bdev, b));
1481 /* note, we dont' set_buffer_write_io_error because we have
1482 * our own ways of dealing with the IO errors
1484 clear_buffer_uptodate(bh);
1490 int write_all_supers(struct btrfs_root *root)
1492 struct list_head *cur;
1493 struct list_head *head = &root->fs_info->fs_devices->devices;
1494 struct btrfs_device *dev;
1495 struct btrfs_super_block *sb;
1496 struct btrfs_dev_item *dev_item;
1497 struct buffer_head *bh;
1501 int total_errors = 0;
1505 max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1506 do_barriers = !btrfs_test_opt(root, NOBARRIER);
1508 sb = &root->fs_info->super_for_commit;
1509 dev_item = &sb->dev_item;
1510 list_for_each(cur, head) {
1511 dev = list_entry(cur, struct btrfs_device, dev_list);
1516 if (!dev->in_fs_metadata)
1519 btrfs_set_stack_device_type(dev_item, dev->type);
1520 btrfs_set_stack_device_id(dev_item, dev->devid);
1521 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
1522 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
1523 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
1524 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
1525 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
1526 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
1527 flags = btrfs_super_flags(sb);
1528 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
1532 crc = btrfs_csum_data(root, (char *)sb + BTRFS_CSUM_SIZE, crc,
1533 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
1534 btrfs_csum_final(crc, sb->csum);
1536 bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET / 4096,
1537 BTRFS_SUPER_INFO_SIZE);
1539 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
1540 dev->pending_io = bh;
1543 set_buffer_uptodate(bh);
1545 bh->b_end_io = btrfs_end_buffer_write_sync;
1547 if (do_barriers && dev->barriers) {
1548 ret = submit_bh(WRITE_BARRIER, bh);
1549 if (ret == -EOPNOTSUPP) {
1550 printk("btrfs: disabling barriers on dev %s\n",
1552 set_buffer_uptodate(bh);
1556 ret = submit_bh(WRITE, bh);
1559 ret = submit_bh(WRITE, bh);
1564 if (total_errors > max_errors) {
1565 printk("btrfs: %d errors while writing supers\n", total_errors);
1570 list_for_each(cur, head) {
1571 dev = list_entry(cur, struct btrfs_device, dev_list);
1574 if (!dev->in_fs_metadata)
1577 BUG_ON(!dev->pending_io);
1578 bh = dev->pending_io;
1580 if (!buffer_uptodate(dev->pending_io)) {
1581 if (do_barriers && dev->barriers) {
1582 printk("btrfs: disabling barriers on dev %s\n",
1584 set_buffer_uptodate(bh);
1588 ret = submit_bh(WRITE, bh);
1591 if (!buffer_uptodate(bh))
1598 dev->pending_io = NULL;
1601 if (total_errors > max_errors) {
1602 printk("btrfs: %d errors while writing supers\n", total_errors);
1608 int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
1613 ret = write_all_supers(root);
1617 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
1619 radix_tree_delete(&fs_info->fs_roots_radix,
1620 (unsigned long)root->root_key.objectid);
1622 btrfs_sysfs_del_root(root);
1626 free_extent_buffer(root->node);
1627 if (root->commit_root)
1628 free_extent_buffer(root->commit_root);
1635 static int del_fs_roots(struct btrfs_fs_info *fs_info)
1638 struct btrfs_root *gang[8];
1642 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1647 for (i = 0; i < ret; i++)
1648 btrfs_free_fs_root(fs_info, gang[i]);
1653 int close_ctree(struct btrfs_root *root)
1656 struct btrfs_trans_handle *trans;
1657 struct btrfs_fs_info *fs_info = root->fs_info;
1659 fs_info->closing = 1;
1662 kthread_stop(root->fs_info->transaction_kthread);
1663 kthread_stop(root->fs_info->cleaner_kthread);
1665 btrfs_defrag_dirty_roots(root->fs_info);
1666 btrfs_clean_old_snapshots(root);
1667 trans = btrfs_start_transaction(root, 1);
1668 ret = btrfs_commit_transaction(trans, root);
1669 /* run commit again to drop the original snapshot */
1670 trans = btrfs_start_transaction(root, 1);
1671 btrfs_commit_transaction(trans, root);
1672 ret = btrfs_write_and_wait_transaction(NULL, root);
1675 write_ctree_super(NULL, root);
1677 if (fs_info->delalloc_bytes) {
1678 printk("btrfs: at unmount delalloc count %Lu\n",
1679 fs_info->delalloc_bytes);
1681 if (fs_info->extent_root->node)
1682 free_extent_buffer(fs_info->extent_root->node);
1684 if (fs_info->tree_root->node)
1685 free_extent_buffer(fs_info->tree_root->node);
1687 if (root->fs_info->chunk_root->node);
1688 free_extent_buffer(root->fs_info->chunk_root->node);
1690 if (root->fs_info->dev_root->node);
1691 free_extent_buffer(root->fs_info->dev_root->node);
1693 btrfs_free_block_groups(root->fs_info);
1694 del_fs_roots(fs_info);
1696 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
1698 extent_io_tree_empty_lru(&fs_info->free_space_cache);
1699 extent_io_tree_empty_lru(&fs_info->block_group_cache);
1700 extent_io_tree_empty_lru(&fs_info->pinned_extents);
1701 extent_io_tree_empty_lru(&fs_info->pending_del);
1702 extent_io_tree_empty_lru(&fs_info->extent_ins);
1703 extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
1705 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
1707 btrfs_stop_workers(&fs_info->workers);
1708 btrfs_stop_workers(&fs_info->endio_workers);
1709 btrfs_stop_workers(&fs_info->submit_workers);
1711 iput(fs_info->btree_inode);
1713 while(!list_empty(&fs_info->hashers)) {
1714 struct btrfs_hasher *hasher;
1715 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
1717 list_del(&hasher->hashers);
1718 crypto_free_hash(&fs_info->hash_tfm);
1722 btrfs_close_devices(fs_info->fs_devices);
1723 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1725 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1726 bdi_destroy(&fs_info->bdi);
1729 kfree(fs_info->extent_root);
1730 kfree(fs_info->tree_root);
1731 kfree(fs_info->chunk_root);
1732 kfree(fs_info->dev_root);
1736 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
1739 struct inode *btree_inode = buf->first_page->mapping->host;
1741 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
1745 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
1750 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
1752 struct inode *btree_inode = buf->first_page->mapping->host;
1753 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
1757 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
1759 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1760 u64 transid = btrfs_header_generation(buf);
1761 struct inode *btree_inode = root->fs_info->btree_inode;
1763 WARN_ON(!btrfs_tree_locked(buf));
1764 if (transid != root->fs_info->generation) {
1765 printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
1766 (unsigned long long)buf->start,
1767 transid, root->fs_info->generation);
1770 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
1773 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
1776 * looks as though older kernels can get into trouble with
1777 * this code, they end up stuck in balance_dirty_pages forever
1779 struct extent_io_tree *tree;
1782 unsigned long thresh = 16 * 1024 * 1024;
1783 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
1785 if (current_is_pdflush())
1788 num_dirty = count_range_bits(tree, &start, (u64)-1,
1789 thresh, EXTENT_DIRTY);
1790 if (num_dirty > thresh) {
1791 balance_dirty_pages_ratelimited_nr(
1792 root->fs_info->btree_inode->i_mapping, 1);
1797 void btrfs_set_buffer_defrag(struct extent_buffer *buf)
1799 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1800 struct inode *btree_inode = root->fs_info->btree_inode;
1801 set_extent_bits(&BTRFS_I(btree_inode)->io_tree, buf->start,
1802 buf->start + buf->len - 1, EXTENT_DEFRAG, GFP_NOFS);
1805 void btrfs_set_buffer_defrag_done(struct extent_buffer *buf)
1807 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1808 struct inode *btree_inode = root->fs_info->btree_inode;
1809 set_extent_bits(&BTRFS_I(btree_inode)->io_tree, buf->start,
1810 buf->start + buf->len - 1, EXTENT_DEFRAG_DONE,
1814 int btrfs_buffer_defrag(struct extent_buffer *buf)
1816 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1817 struct inode *btree_inode = root->fs_info->btree_inode;
1818 return test_range_bit(&BTRFS_I(btree_inode)->io_tree,
1819 buf->start, buf->start + buf->len - 1, EXTENT_DEFRAG, 0);
1822 int btrfs_buffer_defrag_done(struct extent_buffer *buf)
1824 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1825 struct inode *btree_inode = root->fs_info->btree_inode;
1826 return test_range_bit(&BTRFS_I(btree_inode)->io_tree,
1827 buf->start, buf->start + buf->len - 1,
1828 EXTENT_DEFRAG_DONE, 0);
1831 int btrfs_clear_buffer_defrag_done(struct extent_buffer *buf)
1833 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1834 struct inode *btree_inode = root->fs_info->btree_inode;
1835 return clear_extent_bits(&BTRFS_I(btree_inode)->io_tree,
1836 buf->start, buf->start + buf->len - 1,
1837 EXTENT_DEFRAG_DONE, GFP_NOFS);
1840 int btrfs_clear_buffer_defrag(struct extent_buffer *buf)
1842 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1843 struct inode *btree_inode = root->fs_info->btree_inode;
1844 return clear_extent_bits(&BTRFS_I(btree_inode)->io_tree,
1845 buf->start, buf->start + buf->len - 1,
1846 EXTENT_DEFRAG, GFP_NOFS);
1849 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
1851 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1853 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1855 buf->flags |= EXTENT_UPTODATE;
1860 static struct extent_io_ops btree_extent_io_ops = {
1861 .writepage_io_hook = btree_writepage_io_hook,
1862 .readpage_end_io_hook = btree_readpage_end_io_hook,
1863 .submit_bio_hook = btree_submit_bio_hook,
1864 /* note we're sharing with inode.c for the merge bio hook */
1865 .merge_bio_hook = btrfs_merge_bio_hook,