Btrfs: Search data ordered extents first for checksums on read
[linux-2.6-block.git] / fs / btrfs / disk-io.c
index 46188ee166236c0f240f3107f0f8d2b958b51b02..7ce3f83c5dd633bab76b3a535361b28013b134d3 100644 (file)
@@ -16,6 +16,7 @@
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/version.h>
 #include <linux/fs.h>
 #include <linux/blkdev.h>
 #include <linux/scatterlist.h>
 #include <linux/writeback.h>
 #include <linux/buffer_head.h> // for block_sync_page
 #include <linux/workqueue.h>
+#include <linux/kthread.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+# include <linux/freezer.h>
+#else
+# include <linux/sched.h>
+#endif
 #include "crc32c.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -31,6 +38,8 @@
 #include "btrfs_inode.h"
 #include "volumes.h"
 #include "print-tree.h"
+#include "async-thread.h"
+#include "locking.h"
 
 #if 0
 static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
@@ -46,8 +55,7 @@ static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
 #endif
 
 static struct extent_io_ops btree_extent_io_ops;
-static struct workqueue_struct *end_io_workqueue;
-static struct workqueue_struct *async_submit_workqueue;
+static void end_workqueue_fn(struct btrfs_work *work);
 
 struct end_io_wq {
        struct bio *bio;
@@ -57,6 +65,7 @@ struct end_io_wq {
        int error;
        int metadata;
        struct list_head list;
+       struct btrfs_work work;
 };
 
 struct async_submit_bio {
@@ -66,6 +75,7 @@ struct async_submit_bio {
        extent_submit_bio_hook_t *submit_bio_hook;
        int rw;
        int mirror_num;
+       struct btrfs_work work;
 };
 
 struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
@@ -78,9 +88,13 @@ struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
 
        spin_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, start, len);
-       spin_unlock(&em_tree->lock);
-       if (em)
+       if (em) {
+               em->bdev =
+                       BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+               spin_unlock(&em_tree->lock);
                goto out;
+       }
+       spin_unlock(&em_tree->lock);
 
        em = alloc_extent_map(GFP_NOFS);
        if (!em) {
@@ -90,7 +104,7 @@ struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
        em->start = 0;
        em->len = (u64)-1;
        em->block_start = 0;
-       em->bdev = inode->i_sb->s_bdev;
+       em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 
        spin_lock(&em_tree->lock);
        ret = add_extent_mapping(em_tree, em);
@@ -201,9 +215,36 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
        return 0;
 }
 
+static int verify_parent_transid(struct extent_io_tree *io_tree,
+                                struct extent_buffer *eb, u64 parent_transid)
+{
+       int ret;
+
+       if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
+               return 0;
+
+       lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
+       if (extent_buffer_uptodate(io_tree, eb) &&
+           btrfs_header_generation(eb) == parent_transid) {
+               ret = 0;
+               goto out;
+       }
+       printk("parent transid verify failed on %llu wanted %llu found %llu\n",
+              (unsigned long long)eb->start,
+              (unsigned long long)parent_transid,
+              (unsigned long long)btrfs_header_generation(eb));
+       ret = 1;
+out:
+       clear_extent_buffer_uptodate(io_tree, eb);
+       unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
+                     GFP_NOFS);
+       return ret;
+
+}
+
 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
                                          struct extent_buffer *eb,
-                                         u64 start)
+                                         u64 start, u64 parent_transid)
 {
        struct extent_io_tree *io_tree;
        int ret;
@@ -214,25 +255,19 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
        while (1) {
                ret = read_extent_buffer_pages(io_tree, eb, start, 1,
                                               btree_get_extent, mirror_num);
-               if (!ret) {
-                       if (mirror_num)
-printk("good read %Lu mirror %d total %d\n", eb->start, mirror_num, num_copies);
+               if (!ret &&
+                   !verify_parent_transid(io_tree, eb, parent_transid))
                        return ret;
-               }
+
                num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
                                              eb->start, eb->len);
-printk("failed to read %Lu mirror %d total %d\n", eb->start, mirror_num, num_copies);
-               if (num_copies == 1) {
-printk("reading %Lu failed only one copy\n", eb->start);
+               if (num_copies == 1)
                        return ret;
-               }
+
                mirror_num++;
-               if (mirror_num > num_copies) {
-printk("bailing at mirror %d of %d\n", mirror_num, num_copies);
+               if (mirror_num > num_copies)
                        return ret;
-               }
        }
-printk("read extent buffer page last\n");
        return -EIO;
 }
 
@@ -257,9 +292,9 @@ int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
                WARN_ON(1);
        }
        eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
-       ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE);
+       ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
+                                            btrfs_header_generation(eb));
        BUG_ON(ret);
-       btrfs_clear_buffer_defrag(eb);
        found_start = btrfs_header_bytenr(eb);
        if (found_start != start) {
                printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
@@ -319,10 +354,8 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
        }
        eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
 
-       btrfs_clear_buffer_defrag(eb);
        found_start = btrfs_header_bytenr(eb);
        if (found_start != start) {
-printk("bad start on %Lu found %Lu\n", eb->start, found_start);
                ret = -EIO;
                goto err;
        }
@@ -333,6 +366,13 @@ printk("bad start on %Lu found %Lu\n", eb->start, found_start);
                ret = -EIO;
                goto err;
        }
+       if (memcmp_extent_buffer(eb, root->fs_info->fsid,
+                                (unsigned long)btrfs_header_fsid(eb),
+                                BTRFS_FSID_SIZE)) {
+               printk("bad fsid on block %Lu\n", eb->start);
+               ret = -EIO;
+               goto err;
+       }
        found_level = btrfs_header_level(eb);
 
        ret = csum_tree_block(root, eb, 1);
@@ -341,7 +381,6 @@ printk("bad start on %Lu found %Lu\n", eb->start, found_start);
 
        end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
        end = eb->start + end - 1;
-       release_extent_buffer_tail_pages(eb);
 err:
        free_extent_buffer(eb);
 out:
@@ -357,7 +396,6 @@ static int end_workqueue_bio(struct bio *bio,
 {
        struct end_io_wq *end_io_wq = bio->bi_private;
        struct btrfs_fs_info *fs_info;
-       unsigned long flags;
 
 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
        if (bio->bi_size)
@@ -365,11 +403,14 @@ static int end_workqueue_bio(struct bio *bio,
 #endif
 
        fs_info = end_io_wq->info;
-       spin_lock_irqsave(&fs_info->end_io_work_lock, flags);
        end_io_wq->error = err;
-       list_add_tail(&end_io_wq->list, &fs_info->end_io_work_list);
-       spin_unlock_irqrestore(&fs_info->end_io_work_lock, flags);
-       queue_work(end_io_workqueue, &fs_info->end_io_work);
+       end_io_wq->work.func = end_workqueue_fn;
+       end_io_wq->work.flags = 0;
+       if (bio->bi_rw & (1 << BIO_RW))
+               btrfs_queue_worker(&fs_info->endio_write_workers,
+                                  &end_io_wq->work);
+       else
+               btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
 
 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
        return 0;
@@ -396,19 +437,25 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
        return 0;
 }
 
+static void run_one_async_submit(struct btrfs_work *work)
+{
+       struct btrfs_fs_info *fs_info;
+       struct async_submit_bio *async;
+
+       async = container_of(work, struct  async_submit_bio, work);
+       fs_info = BTRFS_I(async->inode)->root->fs_info;
+       atomic_dec(&fs_info->nr_async_submits);
+       async->submit_bio_hook(async->inode, async->rw, async->bio,
+                              async->mirror_num);
+       kfree(async);
+}
+
 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
                        int rw, struct bio *bio, int mirror_num,
                        extent_submit_bio_hook_t *submit_bio_hook)
 {
        struct async_submit_bio *async;
 
-       /*
-        * inline writerback should stay inline, only hop to the async
-        * queue if we're pdflush
-        */
-       if (!current_is_pdflush())
-               return submit_bio_hook(inode, rw, bio, mirror_num);
-
        async = kmalloc(sizeof(*async), GFP_NOFS);
        if (!async)
                return -ENOMEM;
@@ -418,12 +465,10 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
        async->bio = bio;
        async->mirror_num = mirror_num;
        async->submit_bio_hook = submit_bio_hook;
-
-       spin_lock(&fs_info->async_submit_work_lock);
-       list_add_tail(&async->list, &fs_info->async_submit_work_list);
-       spin_unlock(&fs_info->async_submit_work_lock);
-
-       queue_work(async_submit_workqueue, &fs_info->async_submit_work);
+       async->work.func = run_one_async_submit;
+       async->work.flags = 0;
+       atomic_inc(&fs_info->nr_async_submits);
+       btrfs_queue_worker(&fs_info->workers, &async->work);
        return 0;
 }
 
@@ -436,24 +481,32 @@ static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
 
        offset = bio->bi_sector << 9;
 
+       /*
+        * when we're called for a write, we're already in the async
+        * submission context.  Just jump ingo btrfs_map_bio
+        */
        if (rw & (1 << BIO_RW)) {
-               return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num);
+               return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
+                                    mirror_num, 0);
        }
 
+       /*
+        * called for a read, do the setup so that checksum validation
+        * can happen in the async kernel threads
+        */
        ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
        BUG_ON(ret);
 
-       if (offset == BTRFS_SUPER_INFO_OFFSET) {
-               bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
-               submit_bio(rw, bio);
-               return 0;
-       }
-       return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num);
+       return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
 }
 
 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
                                 int mirror_num)
 {
+       /*
+        * kthread helpers are used to submit writes so that checksumming
+        * can happen in parallel across all CPUs
+        */
        if (!(rw & (1 << BIO_RW))) {
                return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
        }
@@ -509,21 +562,21 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
        struct extent_map_tree *map;
        int ret;
 
-       if (page_count(page) > 3) {
-               /* once for page->private, once for the caller, once
-                * once for the page cache
-                */
-               return 0;
-       }
        tree = &BTRFS_I(page->mapping->host)->io_tree;
        map = &BTRFS_I(page->mapping->host)->extent_tree;
+
        ret = try_release_extent_state(map, tree, page, gfp_flags);
+       if (!ret) {
+               return 0;
+       }
+
+       ret = try_release_extent_buffer(tree, page);
        if (ret == 1) {
-               invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
                ClearPagePrivate(page);
                set_page_private(page, 0);
                page_cache_release(page);
        }
+
        return ret;
 }
 
@@ -534,7 +587,8 @@ static void btree_invalidatepage(struct page *page, unsigned long offset)
        extent_invalidatepage(tree, page, offset);
        btree_releasepage(page, GFP_NOFS);
        if (PagePrivate(page)) {
-               printk("2invalidate page cleaning up after releasepage\n");
+               printk("warning page private not zero on page %Lu\n",
+                      page_offset(page));
                ClearPagePrivate(page);
                set_page_private(page, 0);
                page_cache_release(page);
@@ -571,7 +625,8 @@ static struct address_space_operations btree_aops = {
        .sync_page      = block_sync_page,
 };
 
-int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize)
+int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
+                        u64 parent_transid)
 {
        struct extent_buffer *buf = NULL;
        struct inode *btree_inode = root->fs_info->btree_inode;
@@ -586,28 +641,6 @@ int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize)
        return ret;
 }
 
-static int close_all_devices(struct btrfs_fs_info *fs_info)
-{
-       struct list_head *list;
-       struct list_head *next;
-       struct btrfs_device *device;
-
-       list = &fs_info->fs_devices->devices;
-       list_for_each(next, list) {
-               device = list_entry(next, struct btrfs_device, dev_list);
-               if (device->bdev && device->bdev != fs_info->sb->s_bdev)
-                       close_bdev_excl(device->bdev);
-               device->bdev = NULL;
-       }
-       return 0;
-}
-
-int btrfs_verify_block_csum(struct btrfs_root *root,
-                           struct extent_buffer *buf)
-{
-       return btrfs_buffer_uptodate(buf);
-}
-
 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
                                            u64 bytenr, u32 blocksize)
 {
@@ -631,7 +664,7 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
 
 
 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
-                                     u32 blocksize)
+                                     u32 blocksize, u64 parent_transid)
 {
        struct extent_buffer *buf = NULL;
        struct inode *btree_inode = root->fs_info->btree_inode;
@@ -644,7 +677,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
        if (!buf)
                return NULL;
 
-       ret = btree_read_extent_buffer_pages(root, buf, 0);
+       ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
 
        if (ret == 0) {
                buf->flags |= EXTENT_UPTODATE;
@@ -658,9 +691,11 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 {
        struct inode *btree_inode = root->fs_info->btree_inode;
        if (btrfs_header_generation(buf) ==
-           root->fs_info->running_transaction->transid)
+           root->fs_info->running_transaction->transid) {
+               WARN_ON(!btrfs_tree_locked(buf));
                clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
                                          buf);
+       }
        return 0;
 }
 
@@ -697,10 +732,13 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        root->in_sysfs = 0;
 
        INIT_LIST_HEAD(&root->dirty_list);
+       spin_lock_init(&root->node_lock);
+       mutex_init(&root->objectid_mutex);
        memset(&root->root_key, 0, sizeof(root->root_key));
        memset(&root->root_item, 0, sizeof(root->root_item));
        memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
        memset(&root->root_kobj, 0, sizeof(root->root_kobj));
+       root->defrag_trans_start = fs_info->generation;
        init_completion(&root->kobj_unregister);
        root->defrag_running = 0;
        root->defrag_level = 0;
@@ -725,7 +763,7 @@ static int find_and_setup_root(struct btrfs_root *tree_root,
 
        blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
        root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
-                                    blocksize);
+                                    blocksize, 0);
        BUG_ON(!root->node);
        return 0;
 }
@@ -781,7 +819,7 @@ out:
        }
        blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
        root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
-                                    blocksize);
+                                    blocksize, 0);
        BUG_ON(!root->node);
 insert:
        root->ref_cows = 1;
@@ -818,6 +856,10 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
                return fs_info->tree_root;
        if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
                return fs_info->extent_root;
+       if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
+               return fs_info->chunk_root;
+       if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
+               return fs_info->dev_root;
 
        root = radix_tree_lookup(&fs_info->fs_roots_radix,
                                 (unsigned long)location->objectid);
@@ -896,12 +938,20 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
 {
        struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
        int ret = 0;
+       int limit = 256 * info->fs_devices->open_devices;
        struct list_head *cur;
        struct btrfs_device *device;
        struct backing_dev_info *bdi;
 
+       if ((bdi_bits & (1 << BDI_write_congested)) &&
+           atomic_read(&info->nr_async_submits) > limit) {
+               return 1;
+       }
+
        list_for_each(cur, &info->fs_devices->devices) {
                device = list_entry(cur, struct btrfs_device, dev_list);
+               if (!device->bdev)
+                       continue;
                bdi = blk_get_backing_dev_info(device->bdev);
                if (bdi && bdi_congested(bdi, bdi_bits)) {
                        ret = 1;
@@ -911,7 +961,11 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
        return ret;
 }
 
-void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
+/*
+ * this unplugs every device on the box, and it is only used when page
+ * is null
+ */
+static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
 {
        struct list_head *cur;
        struct btrfs_device *device;
@@ -927,9 +981,55 @@ void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
        }
 }
 
+void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
+{
+       struct inode *inode;
+       struct extent_map_tree *em_tree;
+       struct extent_map *em;
+       struct address_space *mapping;
+       u64 offset;
+
+       /* the generic O_DIRECT read code does this */
+       if (!page) {
+               __unplug_io_fn(bdi, page);
+               return;
+       }
+
+       /*
+        * page->mapping may change at any time.  Get a consistent copy
+        * and use that for everything below
+        */
+       smp_mb();
+       mapping = page->mapping;
+       if (!mapping)
+               return;
+
+       inode = mapping->host;
+       offset = page_offset(page);
+
+       em_tree = &BTRFS_I(inode)->extent_tree;
+       spin_lock(&em_tree->lock);
+       em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
+       spin_unlock(&em_tree->lock);
+       if (!em) {
+               __unplug_io_fn(bdi, page);
+               return;
+       }
+
+       if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+               free_extent_map(em);
+               __unplug_io_fn(bdi, page);
+               return;
+       }
+       offset = offset - em->start;
+       btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
+                         em->block_start + offset, page);
+       free_extent_map(em);
+}
+
 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
 {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
        bdi_init(bdi);
 #endif
        bdi->ra_pages   = default_backing_dev_info.ra_pages;
@@ -981,104 +1081,130 @@ static int bio_ready_for_csum(struct bio *bio)
        return ret;
 }
 
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-static void btrfs_end_io_csum(void *p)
-#else
-static void btrfs_end_io_csum(struct work_struct *work)
-#endif
+/*
+ * called by the kthread helper functions to finally call the bio end_io
+ * functions.  This is where read checksum verification actually happens
+ */
+static void end_workqueue_fn(struct btrfs_work *work)
 {
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-       struct btrfs_fs_info *fs_info = p;
-#else
-       struct btrfs_fs_info *fs_info = container_of(work,
-                                                    struct btrfs_fs_info,
-                                                    end_io_work);
-#endif
-       unsigned long flags;
-       struct end_io_wq *end_io_wq;
        struct bio *bio;
-       struct list_head *next;
+       struct end_io_wq *end_io_wq;
+       struct btrfs_fs_info *fs_info;
        int error;
-       int was_empty;
 
-       while(1) {
-               spin_lock_irqsave(&fs_info->end_io_work_lock, flags);
-               if (list_empty(&fs_info->end_io_work_list)) {
-                       spin_unlock_irqrestore(&fs_info->end_io_work_lock,
-                                              flags);
-                       return;
-               }
-               next = fs_info->end_io_work_list.next;
-               list_del(next);
-               spin_unlock_irqrestore(&fs_info->end_io_work_lock, flags);
-
-               end_io_wq = list_entry(next, struct end_io_wq, list);
-
-               bio = end_io_wq->bio;
-               if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
-                       spin_lock_irqsave(&fs_info->end_io_work_lock, flags);
-                       was_empty = list_empty(&fs_info->end_io_work_list);
-                       list_add_tail(&end_io_wq->list,
-                                     &fs_info->end_io_work_list);
-                       spin_unlock_irqrestore(&fs_info->end_io_work_lock,
-                                              flags);
-                       if (was_empty)
-                               return;
-                       continue;
-               }
-               error = end_io_wq->error;
-               bio->bi_private = end_io_wq->private;
-               bio->bi_end_io = end_io_wq->end_io;
-               kfree(end_io_wq);
+       end_io_wq = container_of(work, struct end_io_wq, work);
+       bio = end_io_wq->bio;
+       fs_info = end_io_wq->info;
+
+       /* metadata bios are special because the whole tree block must
+        * be checksummed at once.  This makes sure the entire block is in
+        * ram and up to date before trying to verify things.  For
+        * blocksize <= pagesize, it is basically a noop
+        */
+       if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
+               btrfs_queue_worker(&fs_info->endio_workers,
+                                  &end_io_wq->work);
+               return;
+       }
+       error = end_io_wq->error;
+       bio->bi_private = end_io_wq->private;
+       bio->bi_end_io = end_io_wq->end_io;
+       kfree(end_io_wq);
 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
-               bio_endio(bio, bio->bi_size, error);
+       bio_endio(bio, bio->bi_size, error);
 #else
-               bio_endio(bio, error);
+       bio_endio(bio, error);
 #endif
-       }
 }
 
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-static void btrfs_async_submit_work(void *p)
-#else
-static void btrfs_async_submit_work(struct work_struct *work)
-#endif
+static int cleaner_kthread(void *arg)
 {
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-       struct btrfs_fs_info *fs_info = p;
-#else
-       struct btrfs_fs_info *fs_info = container_of(work,
-                                                    struct btrfs_fs_info,
-                                                    async_submit_work);
-#endif
-       struct async_submit_bio *async;
-       struct list_head *next;
+       struct btrfs_root *root = arg;
 
-       while(1) {
-               spin_lock(&fs_info->async_submit_work_lock);
-               if (list_empty(&fs_info->async_submit_work_list)) {
-                       spin_unlock(&fs_info->async_submit_work_lock);
-                       return;
+       do {
+               smp_mb();
+               if (root->fs_info->closing)
+                       break;
+
+               vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
+               mutex_lock(&root->fs_info->cleaner_mutex);
+               btrfs_clean_old_snapshots(root);
+               mutex_unlock(&root->fs_info->cleaner_mutex);
+
+               if (freezing(current)) {
+                       refrigerator();
+               } else {
+                       smp_mb();
+                       if (root->fs_info->closing)
+                               break;
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       schedule();
+                       __set_current_state(TASK_RUNNING);
                }
-               next = fs_info->async_submit_work_list.next;
-               list_del(next);
-               spin_unlock(&fs_info->async_submit_work_lock);
-
-               async = list_entry(next, struct async_submit_bio, list);
-               async->submit_bio_hook(async->inode, async->rw, async->bio,
-                                      async->mirror_num);
-               kfree(async);
-       }
+       } while (!kthread_should_stop());
+       return 0;
+}
+
+static int transaction_kthread(void *arg)
+{
+       struct btrfs_root *root = arg;
+       struct btrfs_trans_handle *trans;
+       struct btrfs_transaction *cur;
+       unsigned long now;
+       unsigned long delay;
+       int ret;
+
+       do {
+               smp_mb();
+               if (root->fs_info->closing)
+                       break;
+
+               delay = HZ * 30;
+               vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
+               mutex_lock(&root->fs_info->transaction_kthread_mutex);
+
+               mutex_lock(&root->fs_info->trans_mutex);
+               cur = root->fs_info->running_transaction;
+               if (!cur) {
+                       mutex_unlock(&root->fs_info->trans_mutex);
+                       goto sleep;
+               }
+               now = get_seconds();
+               if (now < cur->start_time || now - cur->start_time < 30) {
+                       mutex_unlock(&root->fs_info->trans_mutex);
+                       delay = HZ * 5;
+                       goto sleep;
+               }
+               mutex_unlock(&root->fs_info->trans_mutex);
+               trans = btrfs_start_transaction(root, 1);
+               ret = btrfs_commit_transaction(trans, root);
+sleep:
+               wake_up_process(root->fs_info->cleaner_kthread);
+               mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+
+               if (freezing(current)) {
+                       refrigerator();
+               } else {
+                       if (root->fs_info->closing)
+                               break;
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       schedule_timeout(delay);
+                       __set_current_state(TASK_RUNNING);
+               }
+       } while (!kthread_should_stop());
+       return 0;
 }
 
 struct btrfs_root *open_ctree(struct super_block *sb,
-                             struct btrfs_fs_devices *fs_devices)
+                             struct btrfs_fs_devices *fs_devices,
+                             char *options)
 {
        u32 sectorsize;
        u32 nodesize;
        u32 leafsize;
        u32 blocksize;
        u32 stripesize;
+       struct buffer_head *bh;
        struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
                                                 GFP_NOFS);
        struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
@@ -1091,30 +1217,22 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                                              GFP_NOFS);
        int ret;
        int err = -EINVAL;
+
        struct btrfs_super_block *disk_super;
 
        if (!extent_root || !tree_root || !fs_info) {
                err = -ENOMEM;
                goto fail;
        }
-       end_io_workqueue = create_workqueue("btrfs-end-io");
-       BUG_ON(!end_io_workqueue);
-       async_submit_workqueue = create_workqueue("btrfs-async-submit");
-
        INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
        INIT_LIST_HEAD(&fs_info->trans_list);
        INIT_LIST_HEAD(&fs_info->dead_roots);
        INIT_LIST_HEAD(&fs_info->hashers);
-       INIT_LIST_HEAD(&fs_info->end_io_work_list);
-       INIT_LIST_HEAD(&fs_info->async_submit_work_list);
        spin_lock_init(&fs_info->hash_lock);
-       spin_lock_init(&fs_info->end_io_work_lock);
-       spin_lock_init(&fs_info->async_submit_work_lock);
        spin_lock_init(&fs_info->delalloc_lock);
        spin_lock_init(&fs_info->new_trans_lock);
 
        init_completion(&fs_info->kobj_unregister);
-       sb_set_blocksize(sb, BTRFS_SUPER_INFO_SIZE);
        fs_info->tree_root = tree_root;
        fs_info->extent_root = extent_root;
        fs_info->chunk_root = chunk_root;
@@ -1123,6 +1241,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
        INIT_LIST_HEAD(&fs_info->space_info);
        btrfs_mapping_init(&fs_info->mapping_tree);
+       atomic_set(&fs_info->nr_async_submits, 0);
+       atomic_set(&fs_info->throttles, 0);
        fs_info->sb = sb;
        fs_info->max_extent = (u64)-1;
        fs_info->max_inline = 8192 * 1024;
@@ -1130,6 +1250,10 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        fs_info->btree_inode = new_inode(sb);
        fs_info->btree_inode->i_ino = 1;
        fs_info->btree_inode->i_nlink = 1;
+       fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
+
+       sb->s_blocksize = 4096;
+       sb->s_blocksize_bits = blksize_bits(4096);
 
        /*
         * we set the i_size on the btree inode to the max possible int.
@@ -1160,16 +1284,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                             fs_info->btree_inode->i_mapping, GFP_NOFS);
        fs_info->do_barriers = 1;
 
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-       INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum, fs_info);
-       INIT_WORK(&fs_info->async_submit_work, btrfs_async_submit_work,
-                 fs_info);
-       INIT_WORK(&fs_info->trans_work, btrfs_transaction_cleaner, fs_info);
-#else
-       INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum);
-       INIT_WORK(&fs_info->async_submit_work, btrfs_async_submit_work);
-       INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
-#endif
        BTRFS_I(fs_info->btree_inode)->root = tree_root;
        memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
               sizeof(struct btrfs_key));
@@ -1177,7 +1291,14 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
 
        mutex_init(&fs_info->trans_mutex);
-       mutex_init(&fs_info->fs_mutex);
+       mutex_init(&fs_info->drop_mutex);
+       mutex_init(&fs_info->alloc_mutex);
+       mutex_init(&fs_info->chunk_mutex);
+       mutex_init(&fs_info->transaction_kthread_mutex);
+       mutex_init(&fs_info->cleaner_mutex);
+       mutex_init(&fs_info->volume_mutex);
+       init_waitqueue_head(&fs_info->transaction_throttle);
+       init_waitqueue_head(&fs_info->transaction_wait);
 
 #if 0
        ret = add_hasher(fs_info, "crc32c");
@@ -1190,30 +1311,55 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        __setup_root(4096, 4096, 4096, 4096, tree_root,
                     fs_info, BTRFS_ROOT_TREE_OBJECTID);
 
-       fs_info->sb_buffer = read_tree_block(tree_root,
-                                            BTRFS_SUPER_INFO_OFFSET,
-                                            4096);
 
-       if (!fs_info->sb_buffer)
+       bh = __bread(fs_devices->latest_bdev,
+                    BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
+       if (!bh)
                goto fail_iput;
 
-       read_extent_buffer(fs_info->sb_buffer, &fs_info->super_copy, 0,
-                          sizeof(fs_info->super_copy));
+       memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
+       brelse(bh);
 
-       read_extent_buffer(fs_info->sb_buffer, fs_info->fsid,
-                          (unsigned long)btrfs_super_fsid(fs_info->sb_buffer),
-                          BTRFS_FSID_SIZE);
+       memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
 
        disk_super = &fs_info->super_copy;
        if (!btrfs_super_root(disk_super))
                goto fail_sb_buffer;
 
-       if (btrfs_super_num_devices(disk_super) != fs_devices->num_devices) {
+       err = btrfs_parse_options(tree_root, options);
+       if (err)
+               goto fail_sb_buffer;
+
+       /*
+        * we need to start all the end_io workers up front because the
+        * queue work function gets called at interrupt time, and so it
+        * cannot dynamically grow.
+        */
+       btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size);
+       btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size);
+       btrfs_init_workers(&fs_info->fixup_workers, 1);
+       btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
+       btrfs_init_workers(&fs_info->endio_write_workers,
+                          fs_info->thread_pool_size);
+       btrfs_start_workers(&fs_info->workers, 1);
+       btrfs_start_workers(&fs_info->submit_workers, 1);
+       btrfs_start_workers(&fs_info->fixup_workers, 1);
+       btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
+       btrfs_start_workers(&fs_info->endio_write_workers,
+                           fs_info->thread_pool_size);
+
+       err = -EINVAL;
+       if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
                printk("Btrfs: wanted %llu devices, but found %llu\n",
                       (unsigned long long)btrfs_super_num_devices(disk_super),
-                      (unsigned long long)fs_devices->num_devices);
-               goto fail_sb_buffer;
+                      (unsigned long long)fs_devices->open_devices);
+               if (btrfs_test_opt(tree_root, DEGRADED))
+                       printk("continuing in degraded mode\n");
+               else {
+                       goto fail_sb_buffer;
+               }
        }
+
        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
 
        nodesize = btrfs_super_nodesize(disk_super);
@@ -1224,7 +1370,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        tree_root->leafsize = leafsize;
        tree_root->sectorsize = sectorsize;
        tree_root->stripesize = stripesize;
-       sb_set_blocksize(sb, sectorsize);
+
+       sb->s_blocksize = sectorsize;
+       sb->s_blocksize_bits = blksize_bits(sectorsize);
 
        if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
                    sizeof(disk_super->magic))) {
@@ -1232,10 +1380,14 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                goto fail_sb_buffer;
        }
 
-       mutex_lock(&fs_info->fs_mutex);
-
+       mutex_lock(&fs_info->chunk_mutex);
        ret = btrfs_read_sys_array(tree_root);
-       BUG_ON(ret);
+       mutex_unlock(&fs_info->chunk_mutex);
+       if (ret) {
+               printk("btrfs: failed to read the system array on %s\n",
+                      sb->s_id);
+               goto fail_sys_array;
+       }
 
        blocksize = btrfs_level_size(tree_root,
                                     btrfs_super_chunk_root_level(disk_super));
@@ -1245,23 +1397,27 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 
        chunk_root->node = read_tree_block(chunk_root,
                                           btrfs_super_chunk_root(disk_super),
-                                          blocksize);
+                                          blocksize, 0);
        BUG_ON(!chunk_root->node);
 
        read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
                 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
                 BTRFS_UUID_SIZE);
 
+       mutex_lock(&fs_info->chunk_mutex);
        ret = btrfs_read_chunk_tree(chunk_root);
+       mutex_unlock(&fs_info->chunk_mutex);
        BUG_ON(ret);
 
+       btrfs_close_extra_devices(fs_devices);
+
        blocksize = btrfs_level_size(tree_root,
                                     btrfs_super_root_level(disk_super));
 
 
        tree_root->node = read_tree_block(tree_root,
                                          btrfs_super_root(disk_super),
-                                         blocksize);
+                                         blocksize, 0);
        if (!tree_root->node)
                goto fail_sb_buffer;
 
@@ -1285,25 +1441,42 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        fs_info->data_alloc_profile = (u64)-1;
        fs_info->metadata_alloc_profile = (u64)-1;
        fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
+       fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
+                                              "btrfs-cleaner");
+       if (!fs_info->cleaner_kthread)
+               goto fail_extent_root;
+
+       fs_info->transaction_kthread = kthread_run(transaction_kthread,
+                                                  tree_root,
+                                                  "btrfs-transaction");
+       if (!fs_info->transaction_kthread)
+               goto fail_cleaner;
+
 
-       mutex_unlock(&fs_info->fs_mutex);
        return tree_root;
 
+fail_cleaner:
+       kthread_stop(fs_info->cleaner_kthread);
 fail_extent_root:
        free_extent_buffer(extent_root->node);
 fail_tree_root:
-       mutex_unlock(&fs_info->fs_mutex);
        free_extent_buffer(tree_root->node);
+fail_sys_array:
 fail_sb_buffer:
-       free_extent_buffer(fs_info->sb_buffer);
-       extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
+       btrfs_stop_workers(&fs_info->fixup_workers);
+       btrfs_stop_workers(&fs_info->workers);
+       btrfs_stop_workers(&fs_info->endio_workers);
+       btrfs_stop_workers(&fs_info->endio_write_workers);
+       btrfs_stop_workers(&fs_info->submit_workers);
 fail_iput:
        iput(fs_info->btree_inode);
 fail:
-       close_all_devices(fs_info);
+       btrfs_close_devices(fs_info->fs_devices);
+       btrfs_mapping_tree_free(&fs_info->mapping_tree);
+
        kfree(extent_root);
        kfree(tree_root);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
        bdi_destroy(&fs_info->bdi);
 #endif
        kfree(fs_info);
@@ -1322,7 +1495,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
                                        "I/O error on %s\n",
                                       bdevname(bh->b_bdev, b));
                }
-               set_buffer_write_io_error(bh);
+               /* note, we dont' set_buffer_write_io_error because we have
+                * our own ways of dealing with the IO errors
+                */
                clear_buffer_uptodate(bh);
        }
        unlock_buffer(bh);
@@ -1334,38 +1509,51 @@ int write_all_supers(struct btrfs_root *root)
        struct list_head *cur;
        struct list_head *head = &root->fs_info->fs_devices->devices;
        struct btrfs_device *dev;
-       struct extent_buffer *sb;
+       struct btrfs_super_block *sb;
        struct btrfs_dev_item *dev_item;
        struct buffer_head *bh;
        int ret;
        int do_barriers;
+       int max_errors;
+       int total_errors = 0;
+       u32 crc;
+       u64 flags;
 
+       max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
        do_barriers = !btrfs_test_opt(root, NOBARRIER);
 
-       sb = root->fs_info->sb_buffer;
-       dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
-                                                     dev_item);
+       sb = &root->fs_info->super_for_commit;
+       dev_item = &sb->dev_item;
        list_for_each(cur, head) {
                dev = list_entry(cur, struct btrfs_device, dev_list);
-               btrfs_set_device_type(sb, dev_item, dev->type);
-               btrfs_set_device_id(sb, dev_item, dev->devid);
-               btrfs_set_device_total_bytes(sb, dev_item, dev->total_bytes);
-               btrfs_set_device_bytes_used(sb, dev_item, dev->bytes_used);
-               btrfs_set_device_io_align(sb, dev_item, dev->io_align);
-               btrfs_set_device_io_width(sb, dev_item, dev->io_width);
-               btrfs_set_device_sector_size(sb, dev_item, dev->sector_size);
-               write_extent_buffer(sb, dev->uuid,
-                                   (unsigned long)btrfs_device_uuid(dev_item),
-                                   BTRFS_UUID_SIZE);
-
-               btrfs_set_header_flag(sb, BTRFS_HEADER_FLAG_WRITTEN);
-               csum_tree_block(root, sb, 0);
-
-               bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET /
-                             root->fs_info->sb->s_blocksize,
+               if (!dev->bdev) {
+                       total_errors++;
+                       continue;
+               }
+               if (!dev->in_fs_metadata)
+                       continue;
+
+               btrfs_set_stack_device_type(dev_item, dev->type);
+               btrfs_set_stack_device_id(dev_item, dev->devid);
+               btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
+               btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
+               btrfs_set_stack_device_io_align(dev_item, dev->io_align);
+               btrfs_set_stack_device_io_width(dev_item, dev->io_width);
+               btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
+               memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
+               flags = btrfs_super_flags(sb);
+               btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
+
+
+               crc = ~(u32)0;
+               crc = btrfs_csum_data(root, (char *)sb + BTRFS_CSUM_SIZE, crc,
+                                     BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
+               btrfs_csum_final(crc, sb->csum);
+
+               bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET / 4096,
                              BTRFS_SUPER_INFO_SIZE);
 
-               read_extent_buffer(sb, bh->b_data, 0, BTRFS_SUPER_INFO_SIZE);
+               memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
                dev->pending_io = bh;
 
                get_bh(bh);
@@ -1387,11 +1575,22 @@ int write_all_supers(struct btrfs_root *root)
                } else {
                        ret = submit_bh(WRITE, bh);
                }
-               BUG_ON(ret);
+               if (ret)
+                       total_errors++;
        }
+       if (total_errors > max_errors) {
+               printk("btrfs: %d errors while writing supers\n", total_errors);
+               BUG();
+       }
+       total_errors = 0;
 
        list_for_each(cur, head) {
                dev = list_entry(cur, struct btrfs_device, dev_list);
+               if (!dev->bdev)
+                       continue;
+               if (!dev->in_fs_metadata)
+                       continue;
+
                BUG_ON(!dev->pending_io);
                bh = dev->pending_io;
                wait_on_buffer(bh);
@@ -1406,15 +1605,20 @@ int write_all_supers(struct btrfs_root *root)
                                ret = submit_bh(WRITE, bh);
                                BUG_ON(ret);
                                wait_on_buffer(bh);
-                               BUG_ON(!buffer_uptodate(bh));
+                               if (!buffer_uptodate(bh))
+                                       total_errors++;
                        } else {
-                               BUG();
+                               total_errors++;
                        }
 
                }
                dev->pending_io = NULL;
                brelse(bh);
        }
+       if (total_errors > max_errors) {
+               printk("btrfs: %d errors while writing supers\n", total_errors);
+               BUG();
+       }
        return 0;
 }
 
@@ -1424,15 +1628,6 @@ int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
        int ret;
 
        ret = write_all_supers(root);
-#if 0
-       if (!btrfs_test_opt(root, NOBARRIER))
-               blkdev_issue_flush(sb->s_bdev, NULL);
-       set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, super);
-       ret = sync_page_range_nolock(btree_inode, btree_inode->i_mapping,
-                                    super->start, super->len);
-       if (!btrfs_test_opt(root, NOBARRIER))
-               blkdev_issue_flush(sb->s_bdev, NULL);
-#endif
        return ret;
 }
 
@@ -1479,9 +1674,12 @@ int close_ctree(struct btrfs_root *root)
        struct btrfs_fs_info *fs_info = root->fs_info;
 
        fs_info->closing = 1;
-       btrfs_transaction_flush_work(root);
-       mutex_lock(&fs_info->fs_mutex);
-       btrfs_defrag_dirty_roots(root->fs_info);
+       smp_mb();
+
+       kthread_stop(root->fs_info->transaction_kthread);
+       kthread_stop(root->fs_info->cleaner_kthread);
+
+       btrfs_clean_old_snapshots(root);
        trans = btrfs_start_transaction(root, 1);
        ret = btrfs_commit_transaction(trans, root);
        /* run commit again to  drop the original snapshot */
@@ -1489,10 +1687,8 @@ int close_ctree(struct btrfs_root *root)
        btrfs_commit_transaction(trans, root);
        ret = btrfs_write_and_wait_transaction(NULL, root);
        BUG_ON(ret);
-       write_ctree_super(NULL, root);
-       mutex_unlock(&fs_info->fs_mutex);
 
-       btrfs_transaction_flush_work(root);
+       write_ctree_super(NULL, root);
 
        if (fs_info->delalloc_bytes) {
                printk("btrfs: at unmount delalloc count %Lu\n",
@@ -1510,30 +1706,18 @@ int close_ctree(struct btrfs_root *root)
        if (root->fs_info->dev_root->node);
                free_extent_buffer(root->fs_info->dev_root->node);
 
-       free_extent_buffer(fs_info->sb_buffer);
-
        btrfs_free_block_groups(root->fs_info);
        del_fs_roots(fs_info);
 
        filemap_write_and_wait(fs_info->btree_inode->i_mapping);
 
-       extent_io_tree_empty_lru(&fs_info->free_space_cache);
-       extent_io_tree_empty_lru(&fs_info->block_group_cache);
-       extent_io_tree_empty_lru(&fs_info->pinned_extents);
-       extent_io_tree_empty_lru(&fs_info->pending_del);
-       extent_io_tree_empty_lru(&fs_info->extent_ins);
-       extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
-
-       flush_workqueue(end_io_workqueue);
-       flush_workqueue(async_submit_workqueue);
-
        truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
 
-       flush_workqueue(end_io_workqueue);
-       destroy_workqueue(end_io_workqueue);
-
-       flush_workqueue(async_submit_workqueue);
-       destroy_workqueue(async_submit_workqueue);
+       btrfs_stop_workers(&fs_info->fixup_workers);
+       btrfs_stop_workers(&fs_info->workers);
+       btrfs_stop_workers(&fs_info->endio_workers);
+       btrfs_stop_workers(&fs_info->endio_write_workers);
+       btrfs_stop_workers(&fs_info->submit_workers);
 
        iput(fs_info->btree_inode);
 #if 0
@@ -1546,10 +1730,10 @@ int close_ctree(struct btrfs_root *root)
                kfree(hasher);
        }
 #endif
-       close_all_devices(fs_info);
+       btrfs_close_devices(fs_info->fs_devices);
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
        bdi_destroy(&fs_info->bdi);
 #endif
 
@@ -1560,10 +1744,18 @@ int close_ctree(struct btrfs_root *root)
        return 0;
 }
 
-int btrfs_buffer_uptodate(struct extent_buffer *buf)
+int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
 {
+       int ret;
        struct inode *btree_inode = buf->first_page->mapping->host;
-       return extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
+
+       ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
+       if (!ret)
+               return ret;
+
+       ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
+                                   parent_transid);
+       return !ret;
 }
 
 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
@@ -1579,6 +1771,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
        u64 transid = btrfs_header_generation(buf);
        struct inode *btree_inode = root->fs_info->btree_inode;
 
+       WARN_ON(!btrfs_tree_locked(buf));
        if (transid != root->fs_info->generation) {
                printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
                        (unsigned long long)buf->start,
@@ -1588,83 +1781,35 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
        set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
 }
 
-void btrfs_throttle(struct btrfs_root *root)
-{
-       struct backing_dev_info *bdi;
-
-       bdi = root->fs_info->sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
-       if (root->fs_info->throttles && bdi_write_congested(bdi)) {
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
-               congestion_wait(WRITE, HZ/20);
-#else
-               blk_congestion_wait(WRITE, HZ/20);
-#endif
-       }
-}
-
 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
 {
-       balance_dirty_pages_ratelimited_nr(
-                                  root->fs_info->btree_inode->i_mapping, 1);
-}
-
-void btrfs_set_buffer_defrag(struct extent_buffer *buf)
-{
-       struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
-       struct inode *btree_inode = root->fs_info->btree_inode;
-       set_extent_bits(&BTRFS_I(btree_inode)->io_tree, buf->start,
-                       buf->start + buf->len - 1, EXTENT_DEFRAG, GFP_NOFS);
-}
-
-void btrfs_set_buffer_defrag_done(struct extent_buffer *buf)
-{
-       struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
-       struct inode *btree_inode = root->fs_info->btree_inode;
-       set_extent_bits(&BTRFS_I(btree_inode)->io_tree, buf->start,
-                       buf->start + buf->len - 1, EXTENT_DEFRAG_DONE,
-                       GFP_NOFS);
-}
-
-int btrfs_buffer_defrag(struct extent_buffer *buf)
-{
-       struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
-       struct inode *btree_inode = root->fs_info->btree_inode;
-       return test_range_bit(&BTRFS_I(btree_inode)->io_tree,
-                    buf->start, buf->start + buf->len - 1, EXTENT_DEFRAG, 0);
-}
-
-int btrfs_buffer_defrag_done(struct extent_buffer *buf)
-{
-       struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
-       struct inode *btree_inode = root->fs_info->btree_inode;
-       return test_range_bit(&BTRFS_I(btree_inode)->io_tree,
-                    buf->start, buf->start + buf->len - 1,
-                    EXTENT_DEFRAG_DONE, 0);
-}
+       /*
+        * looks as though older kernels can get into trouble with
+        * this code, they end up stuck in balance_dirty_pages forever
+        */
+       struct extent_io_tree *tree;
+       u64 num_dirty;
+       u64 start = 0;
+       unsigned long thresh = 16 * 1024 * 1024;
+       tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
 
-int btrfs_clear_buffer_defrag_done(struct extent_buffer *buf)
-{
-       struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
-       struct inode *btree_inode = root->fs_info->btree_inode;
-       return clear_extent_bits(&BTRFS_I(btree_inode)->io_tree,
-                    buf->start, buf->start + buf->len - 1,
-                    EXTENT_DEFRAG_DONE, GFP_NOFS);
-}
+       if (current_is_pdflush())
+               return;
 
-int btrfs_clear_buffer_defrag(struct extent_buffer *buf)
-{
-       struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
-       struct inode *btree_inode = root->fs_info->btree_inode;
-       return clear_extent_bits(&BTRFS_I(btree_inode)->io_tree,
-                    buf->start, buf->start + buf->len - 1,
-                    EXTENT_DEFRAG, GFP_NOFS);
+       num_dirty = count_range_bits(tree, &start, (u64)-1,
+                                    thresh, EXTENT_DIRTY);
+       if (num_dirty > thresh) {
+               balance_dirty_pages_ratelimited_nr(
+                                  root->fs_info->btree_inode->i_mapping, 1);
+       }
+       return;
 }
 
-int btrfs_read_buffer(struct extent_buffer *buf)
+int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
 {
        struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
        int ret;
-       ret = btree_read_extent_buffer_pages(root, buf, 0);
+       ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
        if (ret == 0) {
                buf->flags |= EXTENT_UPTODATE;
        }