2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/version.h>
21 #include <linux/blkdev.h>
22 #include <linux/scatterlist.h>
23 #include <linux/swap.h>
24 #include <linux/radix-tree.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h> // for block_sync_page
27 #include <linux/workqueue.h>
28 #include <linux/kthread.h>
29 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
30 # include <linux/freezer.h>
32 # include <linux/sched.h>
37 #include "transaction.h"
38 #include "btrfs_inode.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
45 static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
47 if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) {
48 printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n",
49 (unsigned long long)extent_buffer_blocknr(buf),
50 (unsigned long long)btrfs_header_blocknr(buf));
57 static struct extent_io_ops btree_extent_io_ops;
58 static void end_workqueue_fn(struct btrfs_work *work);
64 struct btrfs_fs_info *info;
67 struct list_head list;
68 struct btrfs_work work;
71 struct async_submit_bio {
74 struct list_head list;
75 extent_submit_bio_hook_t *submit_bio_hook;
78 struct btrfs_work work;
81 struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
82 size_t page_offset, u64 start, u64 len,
85 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
86 struct extent_map *em;
89 spin_lock(&em_tree->lock);
90 em = lookup_extent_mapping(em_tree, start, len);
93 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
94 spin_unlock(&em_tree->lock);
97 spin_unlock(&em_tree->lock);
99 em = alloc_extent_map(GFP_NOFS);
101 em = ERR_PTR(-ENOMEM);
107 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
109 spin_lock(&em_tree->lock);
110 ret = add_extent_mapping(em_tree, em);
111 if (ret == -EEXIST) {
112 u64 failed_start = em->start;
113 u64 failed_len = em->len;
115 printk("failed to insert %Lu %Lu -> %Lu into tree\n",
116 em->start, em->len, em->block_start);
118 em = lookup_extent_mapping(em_tree, start, len);
120 printk("after failing, found %Lu %Lu %Lu\n",
121 em->start, em->len, em->block_start);
124 em = lookup_extent_mapping(em_tree, failed_start,
127 printk("double failure lookup gives us "
128 "%Lu %Lu -> %Lu\n", em->start,
129 em->len, em->block_start);
138 spin_unlock(&em_tree->lock);
146 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
148 return btrfs_crc32c(seed, data, len);
151 void btrfs_csum_final(u32 crc, char *result)
153 *(__le32 *)result = ~cpu_to_le32(crc);
156 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
159 char result[BTRFS_CRC32_SIZE];
161 unsigned long cur_len;
162 unsigned long offset = BTRFS_CSUM_SIZE;
163 char *map_token = NULL;
165 unsigned long map_start;
166 unsigned long map_len;
170 len = buf->len - offset;
172 err = map_private_extent_buffer(buf, offset, 32,
174 &map_start, &map_len, KM_USER0);
176 printk("failed to map extent buffer! %lu\n",
180 cur_len = min(len, map_len - (offset - map_start));
181 crc = btrfs_csum_data(root, kaddr + offset - map_start,
185 unmap_extent_buffer(buf, map_token, KM_USER0);
187 btrfs_csum_final(crc, result);
190 int from_this_trans = 0;
192 if (root->fs_info->running_transaction &&
193 btrfs_header_generation(buf) ==
194 root->fs_info->running_transaction->transid)
197 /* FIXME, this is not good */
198 if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
201 memcpy(&found, result, BTRFS_CRC32_SIZE);
203 read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
204 printk("btrfs: %s checksum verify failed on %llu "
205 "wanted %X found %X from_this_trans %d "
207 root->fs_info->sb->s_id,
208 buf->start, val, found, from_this_trans,
209 btrfs_header_level(buf));
213 write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE);
218 static int verify_parent_transid(struct extent_io_tree *io_tree,
219 struct extent_buffer *eb, u64 parent_transid)
223 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
226 lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
227 if (extent_buffer_uptodate(io_tree, eb) &&
228 btrfs_header_generation(eb) == parent_transid) {
232 printk("parent transid verify failed on %llu wanted %llu found %llu\n",
233 (unsigned long long)eb->start,
234 (unsigned long long)parent_transid,
235 (unsigned long long)btrfs_header_generation(eb));
238 clear_extent_buffer_uptodate(io_tree, eb);
239 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
245 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
246 struct extent_buffer *eb,
247 u64 start, u64 parent_transid)
249 struct extent_io_tree *io_tree;
254 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
256 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
257 btree_get_extent, mirror_num);
259 !verify_parent_transid(io_tree, eb, parent_transid))
262 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
268 if (mirror_num > num_copies)
274 int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
276 struct extent_io_tree *tree;
277 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
281 struct extent_buffer *eb;
284 tree = &BTRFS_I(page->mapping->host)->io_tree;
286 if (page->private == EXTENT_PAGE_PRIVATE)
290 len = page->private >> 2;
294 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
295 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
296 btrfs_header_generation(eb));
298 found_start = btrfs_header_bytenr(eb);
299 if (found_start != start) {
300 printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
301 start, found_start, len);
305 if (eb->first_page != page) {
306 printk("bad first page %lu %lu\n", eb->first_page->index,
311 if (!PageUptodate(page)) {
312 printk("csum not up to date page %lu\n", page->index);
316 found_level = btrfs_header_level(eb);
317 spin_lock(&root->fs_info->hash_lock);
318 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
319 spin_unlock(&root->fs_info->hash_lock);
320 csum_tree_block(root, eb, 0);
322 free_extent_buffer(eb);
327 static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
329 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
331 csum_dirty_buffer(root, page);
335 int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
336 struct extent_state *state)
338 struct extent_io_tree *tree;
342 struct extent_buffer *eb;
343 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
346 tree = &BTRFS_I(page->mapping->host)->io_tree;
347 if (page->private == EXTENT_PAGE_PRIVATE)
351 len = page->private >> 2;
355 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
357 found_start = btrfs_header_bytenr(eb);
358 if (found_start != start) {
362 if (eb->first_page != page) {
363 printk("bad first page %lu %lu\n", eb->first_page->index,
369 if (memcmp_extent_buffer(eb, root->fs_info->fsid,
370 (unsigned long)btrfs_header_fsid(eb),
372 printk("bad fsid on block %Lu\n", eb->start);
376 found_level = btrfs_header_level(eb);
378 ret = csum_tree_block(root, eb, 1);
382 end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
383 end = eb->start + end - 1;
384 release_extent_buffer_tail_pages(eb);
386 free_extent_buffer(eb);
391 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
392 static void end_workqueue_bio(struct bio *bio, int err)
394 static int end_workqueue_bio(struct bio *bio,
395 unsigned int bytes_done, int err)
398 struct end_io_wq *end_io_wq = bio->bi_private;
399 struct btrfs_fs_info *fs_info;
401 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
406 fs_info = end_io_wq->info;
407 end_io_wq->error = err;
408 end_io_wq->work.func = end_workqueue_fn;
409 end_io_wq->work.flags = 0;
410 btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
412 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
417 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
420 struct end_io_wq *end_io_wq;
421 end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
425 end_io_wq->private = bio->bi_private;
426 end_io_wq->end_io = bio->bi_end_io;
427 end_io_wq->info = info;
428 end_io_wq->error = 0;
429 end_io_wq->bio = bio;
430 end_io_wq->metadata = metadata;
432 bio->bi_private = end_io_wq;
433 bio->bi_end_io = end_workqueue_bio;
437 static void run_one_async_submit(struct btrfs_work *work)
439 struct btrfs_fs_info *fs_info;
440 struct async_submit_bio *async;
442 async = container_of(work, struct async_submit_bio, work);
443 fs_info = BTRFS_I(async->inode)->root->fs_info;
444 atomic_dec(&fs_info->nr_async_submits);
445 async->submit_bio_hook(async->inode, async->rw, async->bio,
450 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
451 int rw, struct bio *bio, int mirror_num,
452 extent_submit_bio_hook_t *submit_bio_hook)
454 struct async_submit_bio *async;
456 async = kmalloc(sizeof(*async), GFP_NOFS);
460 async->inode = inode;
463 async->mirror_num = mirror_num;
464 async->submit_bio_hook = submit_bio_hook;
465 async->work.func = run_one_async_submit;
466 async->work.flags = 0;
467 atomic_inc(&fs_info->nr_async_submits);
468 btrfs_queue_worker(&fs_info->workers, &async->work);
472 static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
475 struct btrfs_root *root = BTRFS_I(inode)->root;
479 offset = bio->bi_sector << 9;
482 * when we're called for a write, we're already in the async
483 * submission context. Just jump ingo btrfs_map_bio
485 if (rw & (1 << BIO_RW)) {
486 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
491 * called for a read, do the setup so that checksum validation
492 * can happen in the async kernel threads
494 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
497 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
500 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
504 * kthread helpers are used to submit writes so that checksumming
505 * can happen in parallel across all CPUs
507 if (!(rw & (1 << BIO_RW))) {
508 return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
510 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
511 inode, rw, bio, mirror_num,
512 __btree_submit_bio_hook);
515 static int btree_writepage(struct page *page, struct writeback_control *wbc)
517 struct extent_io_tree *tree;
518 tree = &BTRFS_I(page->mapping->host)->io_tree;
519 return extent_write_full_page(tree, page, btree_get_extent, wbc);
522 static int btree_writepages(struct address_space *mapping,
523 struct writeback_control *wbc)
525 struct extent_io_tree *tree;
526 tree = &BTRFS_I(mapping->host)->io_tree;
527 if (wbc->sync_mode == WB_SYNC_NONE) {
530 unsigned long thresh = 96 * 1024 * 1024;
532 if (wbc->for_kupdate)
535 if (current_is_pdflush()) {
536 thresh = 96 * 1024 * 1024;
538 thresh = 8 * 1024 * 1024;
540 num_dirty = count_range_bits(tree, &start, (u64)-1,
541 thresh, EXTENT_DIRTY);
542 if (num_dirty < thresh) {
546 return extent_writepages(tree, mapping, btree_get_extent, wbc);
549 int btree_readpage(struct file *file, struct page *page)
551 struct extent_io_tree *tree;
552 tree = &BTRFS_I(page->mapping->host)->io_tree;
553 return extent_read_full_page(tree, page, btree_get_extent);
556 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
558 struct extent_io_tree *tree;
559 struct extent_map_tree *map;
562 if (page_count(page) > 3) {
563 /* once for page->private, once for the caller, once
564 * once for the page cache
568 tree = &BTRFS_I(page->mapping->host)->io_tree;
569 map = &BTRFS_I(page->mapping->host)->extent_tree;
570 ret = try_release_extent_state(map, tree, page, gfp_flags);
572 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
573 ClearPagePrivate(page);
574 set_page_private(page, 0);
575 page_cache_release(page);
580 static void btree_invalidatepage(struct page *page, unsigned long offset)
582 struct extent_io_tree *tree;
583 tree = &BTRFS_I(page->mapping->host)->io_tree;
584 extent_invalidatepage(tree, page, offset);
585 btree_releasepage(page, GFP_NOFS);
586 if (PagePrivate(page)) {
587 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
588 ClearPagePrivate(page);
589 set_page_private(page, 0);
590 page_cache_release(page);
595 static int btree_writepage(struct page *page, struct writeback_control *wbc)
597 struct buffer_head *bh;
598 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
599 struct buffer_head *head;
600 if (!page_has_buffers(page)) {
601 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
602 (1 << BH_Dirty)|(1 << BH_Uptodate));
604 head = page_buffers(page);
607 if (buffer_dirty(bh))
608 csum_tree_block(root, bh, 0);
609 bh = bh->b_this_page;
610 } while (bh != head);
611 return block_write_full_page(page, btree_get_block, wbc);
615 static struct address_space_operations btree_aops = {
616 .readpage = btree_readpage,
617 .writepage = btree_writepage,
618 .writepages = btree_writepages,
619 .releasepage = btree_releasepage,
620 .invalidatepage = btree_invalidatepage,
621 .sync_page = block_sync_page,
624 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
627 struct extent_buffer *buf = NULL;
628 struct inode *btree_inode = root->fs_info->btree_inode;
631 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
634 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
635 buf, 0, 0, btree_get_extent, 0);
636 free_extent_buffer(buf);
640 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
641 u64 bytenr, u32 blocksize)
643 struct inode *btree_inode = root->fs_info->btree_inode;
644 struct extent_buffer *eb;
645 eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
646 bytenr, blocksize, GFP_NOFS);
650 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
651 u64 bytenr, u32 blocksize)
653 struct inode *btree_inode = root->fs_info->btree_inode;
654 struct extent_buffer *eb;
656 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
657 bytenr, blocksize, NULL, GFP_NOFS);
662 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
663 u32 blocksize, u64 parent_transid)
665 struct extent_buffer *buf = NULL;
666 struct inode *btree_inode = root->fs_info->btree_inode;
667 struct extent_io_tree *io_tree;
670 io_tree = &BTRFS_I(btree_inode)->io_tree;
672 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
676 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
679 buf->flags |= EXTENT_UPTODATE;
685 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
686 struct extent_buffer *buf)
688 struct inode *btree_inode = root->fs_info->btree_inode;
689 if (btrfs_header_generation(buf) ==
690 root->fs_info->running_transaction->transid) {
691 WARN_ON(!btrfs_tree_locked(buf));
692 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
698 int wait_on_tree_block_writeback(struct btrfs_root *root,
699 struct extent_buffer *buf)
701 struct inode *btree_inode = root->fs_info->btree_inode;
702 wait_on_extent_buffer_writeback(&BTRFS_I(btree_inode)->io_tree,
707 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
708 u32 stripesize, struct btrfs_root *root,
709 struct btrfs_fs_info *fs_info,
714 root->commit_root = NULL;
715 root->sectorsize = sectorsize;
716 root->nodesize = nodesize;
717 root->leafsize = leafsize;
718 root->stripesize = stripesize;
720 root->track_dirty = 0;
722 root->fs_info = fs_info;
723 root->objectid = objectid;
724 root->last_trans = 0;
725 root->highest_inode = 0;
726 root->last_inode_alloc = 0;
730 INIT_LIST_HEAD(&root->dirty_list);
731 spin_lock_init(&root->node_lock);
732 mutex_init(&root->objectid_mutex);
733 memset(&root->root_key, 0, sizeof(root->root_key));
734 memset(&root->root_item, 0, sizeof(root->root_item));
735 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
736 memset(&root->root_kobj, 0, sizeof(root->root_kobj));
737 root->defrag_trans_start = fs_info->generation;
738 init_completion(&root->kobj_unregister);
739 root->defrag_running = 0;
740 root->defrag_level = 0;
741 root->root_key.objectid = objectid;
745 static int find_and_setup_root(struct btrfs_root *tree_root,
746 struct btrfs_fs_info *fs_info,
748 struct btrfs_root *root)
753 __setup_root(tree_root->nodesize, tree_root->leafsize,
754 tree_root->sectorsize, tree_root->stripesize,
755 root, fs_info, objectid);
756 ret = btrfs_find_last_root(tree_root, objectid,
757 &root->root_item, &root->root_key);
760 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
761 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
767 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_fs_info *fs_info,
768 struct btrfs_key *location)
770 struct btrfs_root *root;
771 struct btrfs_root *tree_root = fs_info->tree_root;
772 struct btrfs_path *path;
773 struct extent_buffer *l;
778 root = kzalloc(sizeof(*root), GFP_NOFS);
780 return ERR_PTR(-ENOMEM);
781 if (location->offset == (u64)-1) {
782 ret = find_and_setup_root(tree_root, fs_info,
783 location->objectid, root);
791 __setup_root(tree_root->nodesize, tree_root->leafsize,
792 tree_root->sectorsize, tree_root->stripesize,
793 root, fs_info, location->objectid);
795 path = btrfs_alloc_path();
797 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
804 read_extent_buffer(l, &root->root_item,
805 btrfs_item_ptr_offset(l, path->slots[0]),
806 sizeof(root->root_item));
807 memcpy(&root->root_key, location, sizeof(*location));
810 btrfs_release_path(root, path);
811 btrfs_free_path(path);
816 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
817 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
822 ret = btrfs_find_highest_inode(root, &highest_inode);
824 root->highest_inode = highest_inode;
825 root->last_inode_alloc = highest_inode;
830 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
833 struct btrfs_root *root;
835 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
836 return fs_info->tree_root;
837 if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
838 return fs_info->extent_root;
840 root = radix_tree_lookup(&fs_info->fs_roots_radix,
841 (unsigned long)root_objectid);
845 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
846 struct btrfs_key *location)
848 struct btrfs_root *root;
851 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
852 return fs_info->tree_root;
853 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
854 return fs_info->extent_root;
855 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
856 return fs_info->chunk_root;
857 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
858 return fs_info->dev_root;
860 root = radix_tree_lookup(&fs_info->fs_roots_radix,
861 (unsigned long)location->objectid);
865 root = btrfs_read_fs_root_no_radix(fs_info, location);
868 ret = radix_tree_insert(&fs_info->fs_roots_radix,
869 (unsigned long)root->root_key.objectid,
872 free_extent_buffer(root->node);
876 ret = btrfs_find_dead_roots(fs_info->tree_root,
877 root->root_key.objectid, root);
883 struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
884 struct btrfs_key *location,
885 const char *name, int namelen)
887 struct btrfs_root *root;
890 root = btrfs_read_fs_root_no_name(fs_info, location);
897 ret = btrfs_set_root_name(root, name, namelen);
899 free_extent_buffer(root->node);
904 ret = btrfs_sysfs_add_root(root);
906 free_extent_buffer(root->node);
915 static int add_hasher(struct btrfs_fs_info *info, char *type) {
916 struct btrfs_hasher *hasher;
918 hasher = kmalloc(sizeof(*hasher), GFP_NOFS);
921 hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC);
922 if (!hasher->hash_tfm) {
926 spin_lock(&info->hash_lock);
927 list_add(&hasher->list, &info->hashers);
928 spin_unlock(&info->hash_lock);
933 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
935 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
937 int limit = 256 * info->fs_devices->open_devices;
938 struct list_head *cur;
939 struct btrfs_device *device;
940 struct backing_dev_info *bdi;
942 if ((bdi_bits & (1 << BDI_write_congested)) &&
943 atomic_read(&info->nr_async_submits) > limit) {
947 list_for_each(cur, &info->fs_devices->devices) {
948 device = list_entry(cur, struct btrfs_device, dev_list);
951 bdi = blk_get_backing_dev_info(device->bdev);
952 if (bdi && bdi_congested(bdi, bdi_bits)) {
961 * this unplugs every device on the box, and it is only used when page
964 static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
966 struct list_head *cur;
967 struct btrfs_device *device;
968 struct btrfs_fs_info *info;
970 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
971 list_for_each(cur, &info->fs_devices->devices) {
972 device = list_entry(cur, struct btrfs_device, dev_list);
973 bdi = blk_get_backing_dev_info(device->bdev);
974 if (bdi->unplug_io_fn) {
975 bdi->unplug_io_fn(bdi, page);
980 void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
983 struct extent_map_tree *em_tree;
984 struct extent_map *em;
985 struct address_space *mapping;
988 /* the generic O_DIRECT read code does this */
990 __unplug_io_fn(bdi, page);
995 * page->mapping may change at any time. Get a consistent copy
996 * and use that for everything below
999 mapping = page->mapping;
1003 inode = mapping->host;
1004 offset = page_offset(page);
1006 em_tree = &BTRFS_I(inode)->extent_tree;
1007 spin_lock(&em_tree->lock);
1008 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1009 spin_unlock(&em_tree->lock);
1013 offset = offset - em->start;
1014 btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1015 em->block_start + offset, page);
1016 free_extent_map(em);
1019 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1021 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1024 bdi->ra_pages = default_backing_dev_info.ra_pages;
1026 bdi->capabilities = default_backing_dev_info.capabilities;
1027 bdi->unplug_io_fn = btrfs_unplug_io_fn;
1028 bdi->unplug_io_data = info;
1029 bdi->congested_fn = btrfs_congested_fn;
1030 bdi->congested_data = info;
1034 static int bio_ready_for_csum(struct bio *bio)
1040 struct extent_io_tree *io_tree = NULL;
1041 struct btrfs_fs_info *info = NULL;
1042 struct bio_vec *bvec;
1046 bio_for_each_segment(bvec, bio, i) {
1047 page = bvec->bv_page;
1048 if (page->private == EXTENT_PAGE_PRIVATE) {
1049 length += bvec->bv_len;
1052 if (!page->private) {
1053 length += bvec->bv_len;
1056 length = bvec->bv_len;
1057 buf_len = page->private >> 2;
1058 start = page_offset(page) + bvec->bv_offset;
1059 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1060 info = BTRFS_I(page->mapping->host)->root->fs_info;
1062 /* are we fully contained in this bio? */
1063 if (buf_len <= length)
1066 ret = extent_range_uptodate(io_tree, start + length,
1067 start + buf_len - 1);
1074 * called by the kthread helper functions to finally call the bio end_io
1075 * functions. This is where read checksum verification actually happens
1077 static void end_workqueue_fn(struct btrfs_work *work)
1080 struct end_io_wq *end_io_wq;
1081 struct btrfs_fs_info *fs_info;
1084 end_io_wq = container_of(work, struct end_io_wq, work);
1085 bio = end_io_wq->bio;
1086 fs_info = end_io_wq->info;
1088 /* metadata bios are special because the whole tree block must
1089 * be checksummed at once. This makes sure the entire block is in
1090 * ram and up to date before trying to verify things. For
1091 * blocksize <= pagesize, it is basically a noop
1093 if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
1094 btrfs_queue_worker(&fs_info->endio_workers,
1098 error = end_io_wq->error;
1099 bio->bi_private = end_io_wq->private;
1100 bio->bi_end_io = end_io_wq->end_io;
1102 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1103 bio_endio(bio, bio->bi_size, error);
1105 bio_endio(bio, error);
1109 static int cleaner_kthread(void *arg)
1111 struct btrfs_root *root = arg;
1115 if (root->fs_info->closing)
1118 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1119 mutex_lock(&root->fs_info->cleaner_mutex);
1120 btrfs_clean_old_snapshots(root);
1121 mutex_unlock(&root->fs_info->cleaner_mutex);
1123 if (freezing(current)) {
1127 if (root->fs_info->closing)
1129 set_current_state(TASK_INTERRUPTIBLE);
1131 __set_current_state(TASK_RUNNING);
1133 } while (!kthread_should_stop());
1137 static int transaction_kthread(void *arg)
1139 struct btrfs_root *root = arg;
1140 struct btrfs_trans_handle *trans;
1141 struct btrfs_transaction *cur;
1143 unsigned long delay;
1148 if (root->fs_info->closing)
1152 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1153 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1155 mutex_lock(&root->fs_info->trans_mutex);
1156 cur = root->fs_info->running_transaction;
1158 mutex_unlock(&root->fs_info->trans_mutex);
1161 now = get_seconds();
1162 if (now < cur->start_time || now - cur->start_time < 30) {
1163 mutex_unlock(&root->fs_info->trans_mutex);
1167 mutex_unlock(&root->fs_info->trans_mutex);
1168 trans = btrfs_start_transaction(root, 1);
1169 ret = btrfs_commit_transaction(trans, root);
1171 wake_up_process(root->fs_info->cleaner_kthread);
1172 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1174 if (freezing(current)) {
1177 if (root->fs_info->closing)
1179 set_current_state(TASK_INTERRUPTIBLE);
1180 schedule_timeout(delay);
1181 __set_current_state(TASK_RUNNING);
1183 } while (!kthread_should_stop());
1187 struct btrfs_root *open_ctree(struct super_block *sb,
1188 struct btrfs_fs_devices *fs_devices,
1196 struct buffer_head *bh;
1197 struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
1199 struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
1201 struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1203 struct btrfs_root *chunk_root = kmalloc(sizeof(struct btrfs_root),
1205 struct btrfs_root *dev_root = kmalloc(sizeof(struct btrfs_root),
1210 struct btrfs_super_block *disk_super;
1212 if (!extent_root || !tree_root || !fs_info) {
1216 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
1217 INIT_LIST_HEAD(&fs_info->trans_list);
1218 INIT_LIST_HEAD(&fs_info->dead_roots);
1219 INIT_LIST_HEAD(&fs_info->hashers);
1220 spin_lock_init(&fs_info->hash_lock);
1221 spin_lock_init(&fs_info->delalloc_lock);
1222 spin_lock_init(&fs_info->new_trans_lock);
1224 init_completion(&fs_info->kobj_unregister);
1225 fs_info->tree_root = tree_root;
1226 fs_info->extent_root = extent_root;
1227 fs_info->chunk_root = chunk_root;
1228 fs_info->dev_root = dev_root;
1229 fs_info->fs_devices = fs_devices;
1230 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1231 INIT_LIST_HEAD(&fs_info->space_info);
1232 btrfs_mapping_init(&fs_info->mapping_tree);
1233 atomic_set(&fs_info->nr_async_submits, 0);
1234 atomic_set(&fs_info->throttles, 0);
1236 fs_info->max_extent = (u64)-1;
1237 fs_info->max_inline = 8192 * 1024;
1238 setup_bdi(fs_info, &fs_info->bdi);
1239 fs_info->btree_inode = new_inode(sb);
1240 fs_info->btree_inode->i_ino = 1;
1241 fs_info->btree_inode->i_nlink = 1;
1242 fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
1244 sb->s_blocksize = 4096;
1245 sb->s_blocksize_bits = blksize_bits(4096);
1248 * we set the i_size on the btree inode to the max possible int.
1249 * the real end of the address space is determined by all of
1250 * the devices in the system
1252 fs_info->btree_inode->i_size = OFFSET_MAX;
1253 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1254 fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1256 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1257 fs_info->btree_inode->i_mapping,
1259 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1262 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1264 extent_io_tree_init(&fs_info->free_space_cache,
1265 fs_info->btree_inode->i_mapping, GFP_NOFS);
1266 extent_io_tree_init(&fs_info->block_group_cache,
1267 fs_info->btree_inode->i_mapping, GFP_NOFS);
1268 extent_io_tree_init(&fs_info->pinned_extents,
1269 fs_info->btree_inode->i_mapping, GFP_NOFS);
1270 extent_io_tree_init(&fs_info->pending_del,
1271 fs_info->btree_inode->i_mapping, GFP_NOFS);
1272 extent_io_tree_init(&fs_info->extent_ins,
1273 fs_info->btree_inode->i_mapping, GFP_NOFS);
1274 fs_info->do_barriers = 1;
1276 BTRFS_I(fs_info->btree_inode)->root = tree_root;
1277 memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1278 sizeof(struct btrfs_key));
1279 insert_inode_hash(fs_info->btree_inode);
1280 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
1282 mutex_init(&fs_info->trans_mutex);
1283 mutex_init(&fs_info->drop_mutex);
1284 mutex_init(&fs_info->alloc_mutex);
1285 mutex_init(&fs_info->chunk_mutex);
1286 mutex_init(&fs_info->transaction_kthread_mutex);
1287 mutex_init(&fs_info->cleaner_mutex);
1288 mutex_init(&fs_info->volume_mutex);
1291 ret = add_hasher(fs_info, "crc32c");
1293 printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
1298 __setup_root(4096, 4096, 4096, 4096, tree_root,
1299 fs_info, BTRFS_ROOT_TREE_OBJECTID);
1302 bh = __bread(fs_devices->latest_bdev,
1303 BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
1307 memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1310 memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1312 disk_super = &fs_info->super_copy;
1313 if (!btrfs_super_root(disk_super))
1314 goto fail_sb_buffer;
1316 err = btrfs_parse_options(tree_root, options);
1318 goto fail_sb_buffer;
1321 * we need to start all the end_io workers up front because the
1322 * queue work function gets called at interrupt time, and so it
1323 * cannot dynamically grow.
1325 btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size);
1326 btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size);
1327 btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1328 btrfs_start_workers(&fs_info->workers, 1);
1329 btrfs_start_workers(&fs_info->submit_workers, 1);
1330 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1333 if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
1334 printk("Btrfs: wanted %llu devices, but found %llu\n",
1335 (unsigned long long)btrfs_super_num_devices(disk_super),
1336 (unsigned long long)fs_devices->open_devices);
1337 if (btrfs_test_opt(tree_root, DEGRADED))
1338 printk("continuing in degraded mode\n");
1340 goto fail_sb_buffer;
1344 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1346 nodesize = btrfs_super_nodesize(disk_super);
1347 leafsize = btrfs_super_leafsize(disk_super);
1348 sectorsize = btrfs_super_sectorsize(disk_super);
1349 stripesize = btrfs_super_stripesize(disk_super);
1350 tree_root->nodesize = nodesize;
1351 tree_root->leafsize = leafsize;
1352 tree_root->sectorsize = sectorsize;
1353 tree_root->stripesize = stripesize;
1355 sb->s_blocksize = sectorsize;
1356 sb->s_blocksize_bits = blksize_bits(sectorsize);
1358 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1359 sizeof(disk_super->magic))) {
1360 printk("btrfs: valid FS not found on %s\n", sb->s_id);
1361 goto fail_sb_buffer;
1364 mutex_lock(&fs_info->chunk_mutex);
1365 ret = btrfs_read_sys_array(tree_root);
1366 mutex_unlock(&fs_info->chunk_mutex);
1368 printk("btrfs: failed to read the system array on %s\n",
1370 goto fail_sys_array;
1373 blocksize = btrfs_level_size(tree_root,
1374 btrfs_super_chunk_root_level(disk_super));
1376 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1377 chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1379 chunk_root->node = read_tree_block(chunk_root,
1380 btrfs_super_chunk_root(disk_super),
1382 BUG_ON(!chunk_root->node);
1384 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1385 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1388 mutex_lock(&fs_info->chunk_mutex);
1389 ret = btrfs_read_chunk_tree(chunk_root);
1390 mutex_unlock(&fs_info->chunk_mutex);
1393 btrfs_close_extra_devices(fs_devices);
1395 blocksize = btrfs_level_size(tree_root,
1396 btrfs_super_root_level(disk_super));
1399 tree_root->node = read_tree_block(tree_root,
1400 btrfs_super_root(disk_super),
1402 if (!tree_root->node)
1403 goto fail_sb_buffer;
1406 ret = find_and_setup_root(tree_root, fs_info,
1407 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1409 goto fail_tree_root;
1410 extent_root->track_dirty = 1;
1412 ret = find_and_setup_root(tree_root, fs_info,
1413 BTRFS_DEV_TREE_OBJECTID, dev_root);
1414 dev_root->track_dirty = 1;
1417 goto fail_extent_root;
1419 btrfs_read_block_groups(extent_root);
1421 fs_info->generation = btrfs_super_generation(disk_super) + 1;
1422 fs_info->data_alloc_profile = (u64)-1;
1423 fs_info->metadata_alloc_profile = (u64)-1;
1424 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1425 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1427 if (!fs_info->cleaner_kthread)
1428 goto fail_extent_root;
1430 fs_info->transaction_kthread = kthread_run(transaction_kthread,
1432 "btrfs-transaction");
1433 if (!fs_info->transaction_kthread)
1440 kthread_stop(fs_info->cleaner_kthread);
1442 free_extent_buffer(extent_root->node);
1444 free_extent_buffer(tree_root->node);
1447 extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
1448 btrfs_stop_workers(&fs_info->workers);
1449 btrfs_stop_workers(&fs_info->endio_workers);
1450 btrfs_stop_workers(&fs_info->submit_workers);
1452 iput(fs_info->btree_inode);
1454 btrfs_close_devices(fs_info->fs_devices);
1455 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1459 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1460 bdi_destroy(&fs_info->bdi);
1463 return ERR_PTR(err);
1466 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1468 char b[BDEVNAME_SIZE];
1471 set_buffer_uptodate(bh);
1473 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1474 printk(KERN_WARNING "lost page write due to "
1475 "I/O error on %s\n",
1476 bdevname(bh->b_bdev, b));
1478 /* note, we dont' set_buffer_write_io_error because we have
1479 * our own ways of dealing with the IO errors
1481 clear_buffer_uptodate(bh);
1487 int write_all_supers(struct btrfs_root *root)
1489 struct list_head *cur;
1490 struct list_head *head = &root->fs_info->fs_devices->devices;
1491 struct btrfs_device *dev;
1492 struct btrfs_super_block *sb;
1493 struct btrfs_dev_item *dev_item;
1494 struct buffer_head *bh;
1498 int total_errors = 0;
1502 max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1503 do_barriers = !btrfs_test_opt(root, NOBARRIER);
1505 sb = &root->fs_info->super_for_commit;
1506 dev_item = &sb->dev_item;
1507 list_for_each(cur, head) {
1508 dev = list_entry(cur, struct btrfs_device, dev_list);
1513 if (!dev->in_fs_metadata)
1516 btrfs_set_stack_device_type(dev_item, dev->type);
1517 btrfs_set_stack_device_id(dev_item, dev->devid);
1518 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
1519 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
1520 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
1521 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
1522 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
1523 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
1524 flags = btrfs_super_flags(sb);
1525 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
1529 crc = btrfs_csum_data(root, (char *)sb + BTRFS_CSUM_SIZE, crc,
1530 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
1531 btrfs_csum_final(crc, sb->csum);
1533 bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET / 4096,
1534 BTRFS_SUPER_INFO_SIZE);
1536 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
1537 dev->pending_io = bh;
1540 set_buffer_uptodate(bh);
1542 bh->b_end_io = btrfs_end_buffer_write_sync;
1544 if (do_barriers && dev->barriers) {
1545 ret = submit_bh(WRITE_BARRIER, bh);
1546 if (ret == -EOPNOTSUPP) {
1547 printk("btrfs: disabling barriers on dev %s\n",
1549 set_buffer_uptodate(bh);
1553 ret = submit_bh(WRITE, bh);
1556 ret = submit_bh(WRITE, bh);
1561 if (total_errors > max_errors) {
1562 printk("btrfs: %d errors while writing supers\n", total_errors);
1567 list_for_each(cur, head) {
1568 dev = list_entry(cur, struct btrfs_device, dev_list);
1571 if (!dev->in_fs_metadata)
1574 BUG_ON(!dev->pending_io);
1575 bh = dev->pending_io;
1577 if (!buffer_uptodate(dev->pending_io)) {
1578 if (do_barriers && dev->barriers) {
1579 printk("btrfs: disabling barriers on dev %s\n",
1581 set_buffer_uptodate(bh);
1585 ret = submit_bh(WRITE, bh);
1588 if (!buffer_uptodate(bh))
1595 dev->pending_io = NULL;
1598 if (total_errors > max_errors) {
1599 printk("btrfs: %d errors while writing supers\n", total_errors);
1605 int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
1610 ret = write_all_supers(root);
1614 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
1616 radix_tree_delete(&fs_info->fs_roots_radix,
1617 (unsigned long)root->root_key.objectid);
1619 btrfs_sysfs_del_root(root);
1623 free_extent_buffer(root->node);
1624 if (root->commit_root)
1625 free_extent_buffer(root->commit_root);
1632 static int del_fs_roots(struct btrfs_fs_info *fs_info)
1635 struct btrfs_root *gang[8];
1639 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1644 for (i = 0; i < ret; i++)
1645 btrfs_free_fs_root(fs_info, gang[i]);
1650 int close_ctree(struct btrfs_root *root)
1653 struct btrfs_trans_handle *trans;
1654 struct btrfs_fs_info *fs_info = root->fs_info;
1656 fs_info->closing = 1;
1659 kthread_stop(root->fs_info->transaction_kthread);
1660 kthread_stop(root->fs_info->cleaner_kthread);
1662 btrfs_clean_old_snapshots(root);
1663 trans = btrfs_start_transaction(root, 1);
1664 ret = btrfs_commit_transaction(trans, root);
1665 /* run commit again to drop the original snapshot */
1666 trans = btrfs_start_transaction(root, 1);
1667 btrfs_commit_transaction(trans, root);
1668 ret = btrfs_write_and_wait_transaction(NULL, root);
1671 write_ctree_super(NULL, root);
1673 if (fs_info->delalloc_bytes) {
1674 printk("btrfs: at unmount delalloc count %Lu\n",
1675 fs_info->delalloc_bytes);
1677 if (fs_info->extent_root->node)
1678 free_extent_buffer(fs_info->extent_root->node);
1680 if (fs_info->tree_root->node)
1681 free_extent_buffer(fs_info->tree_root->node);
1683 if (root->fs_info->chunk_root->node);
1684 free_extent_buffer(root->fs_info->chunk_root->node);
1686 if (root->fs_info->dev_root->node);
1687 free_extent_buffer(root->fs_info->dev_root->node);
1689 btrfs_free_block_groups(root->fs_info);
1690 del_fs_roots(fs_info);
1692 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
1694 extent_io_tree_empty_lru(&fs_info->free_space_cache);
1695 extent_io_tree_empty_lru(&fs_info->block_group_cache);
1696 extent_io_tree_empty_lru(&fs_info->pinned_extents);
1697 extent_io_tree_empty_lru(&fs_info->pending_del);
1698 extent_io_tree_empty_lru(&fs_info->extent_ins);
1699 extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
1701 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
1703 btrfs_stop_workers(&fs_info->workers);
1704 btrfs_stop_workers(&fs_info->endio_workers);
1705 btrfs_stop_workers(&fs_info->submit_workers);
1707 iput(fs_info->btree_inode);
1709 while(!list_empty(&fs_info->hashers)) {
1710 struct btrfs_hasher *hasher;
1711 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
1713 list_del(&hasher->hashers);
1714 crypto_free_hash(&fs_info->hash_tfm);
1718 btrfs_close_devices(fs_info->fs_devices);
1719 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1721 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1722 bdi_destroy(&fs_info->bdi);
1725 kfree(fs_info->extent_root);
1726 kfree(fs_info->tree_root);
1727 kfree(fs_info->chunk_root);
1728 kfree(fs_info->dev_root);
1732 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
1735 struct inode *btree_inode = buf->first_page->mapping->host;
1737 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
1741 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
1746 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
1748 struct inode *btree_inode = buf->first_page->mapping->host;
1749 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
1753 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
1755 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1756 u64 transid = btrfs_header_generation(buf);
1757 struct inode *btree_inode = root->fs_info->btree_inode;
1759 WARN_ON(!btrfs_tree_locked(buf));
1760 if (transid != root->fs_info->generation) {
1761 printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
1762 (unsigned long long)buf->start,
1763 transid, root->fs_info->generation);
1766 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
1769 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
1772 * looks as though older kernels can get into trouble with
1773 * this code, they end up stuck in balance_dirty_pages forever
1775 struct extent_io_tree *tree;
1778 unsigned long thresh = 16 * 1024 * 1024;
1779 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
1781 if (current_is_pdflush())
1784 num_dirty = count_range_bits(tree, &start, (u64)-1,
1785 thresh, EXTENT_DIRTY);
1786 if (num_dirty > thresh) {
1787 balance_dirty_pages_ratelimited_nr(
1788 root->fs_info->btree_inode->i_mapping, 1);
1793 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
1795 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1797 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1799 buf->flags |= EXTENT_UPTODATE;
1804 static struct extent_io_ops btree_extent_io_ops = {
1805 .writepage_io_hook = btree_writepage_io_hook,
1806 .readpage_end_io_hook = btree_readpage_end_io_hook,
1807 .submit_bio_hook = btree_submit_bio_hook,
1808 /* note we're sharing with inode.c for the merge bio hook */
1809 .merge_bio_hook = btrfs_merge_bio_hook,