Btrfs: Delete csum items when freeing extents
[linux-2.6-block.git] / fs / btrfs / disk-io.c
index 8efc123d222b01b51a8f1c065d863f789b4fb602..c72f4f3b912c099d434d85751d5421bc4c9b56a1 100644 (file)
@@ -26,7 +26,8 @@
 #include <linux/buffer_head.h> // for block_sync_page
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
-# include <linux/freezer.h>
+#include <linux/freezer.h>
+#include "compat.h"
 #include "crc32c.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -92,9 +93,9 @@ struct async_submit_bio {
  * extents on the btree inode are pretty simple, there's one extent
  * that covers the entire device
  */
-struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
-                                   size_t page_offset, u64 start, u64 len,
-                                   int create)
+static struct extent_map *btree_get_extent(struct inode *inode,
+               struct page *page, size_t page_offset, u64 start, u64 len,
+               int create)
 {
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        struct extent_map *em;
@@ -175,7 +176,9 @@ void btrfs_csum_final(u32 crc, char *result)
 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
                           int verify)
 {
-       char result[BTRFS_CRC32_SIZE];
+       u16 csum_size =
+               btrfs_super_csum_size(&root->fs_info->super_copy);
+       char *result = NULL;
        unsigned long len;
        unsigned long cur_len;
        unsigned long offset = BTRFS_CSUM_SIZE;
@@ -185,6 +188,7 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
        unsigned long map_len;
        int err;
        u32 crc = ~(u32)0;
+       unsigned long inline_result;
 
        len = buf->len - offset;
        while(len > 0) {
@@ -203,25 +207,37 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
                offset += cur_len;
                unmap_extent_buffer(buf, map_token, KM_USER0);
        }
+       if (csum_size > sizeof(inline_result)) {
+               result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
+               if (!result)
+                       return 1;
+       } else {
+               result = (char *)&inline_result;
+       }
+
        btrfs_csum_final(crc, result);
 
        if (verify) {
                /* FIXME, this is not good */
-               if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
+               if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
                        u32 val;
                        u32 found = 0;
-                       memcpy(&found, result, BTRFS_CRC32_SIZE);
+                       memcpy(&found, result, csum_size);
 
-                       read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
+                       read_extent_buffer(buf, &val, 0, csum_size);
                        printk("btrfs: %s checksum verify failed on %llu "
                               "wanted %X found %X level %d\n",
                               root->fs_info->sb->s_id,
                               buf->start, val, found, btrfs_header_level(buf));
+                       if (result != (char *)&inline_result)
+                               kfree(result);
                        return 1;
                }
        } else {
-               write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE);
+               write_extent_buffer(buf, result, 0, csum_size);
        }
+       if (result != (char *)&inline_result)
+               kfree(result);
        return 0;
 }
 
@@ -294,7 +310,7 @@ printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror
  * checksum a dirty tree block before IO.  This has extra checks to make
  * sure we only fill in the checksum field in the first page of a multi-page block
  */
-int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
+static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
 {
        struct extent_io_tree *tree;
        u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
@@ -345,7 +361,26 @@ out:
        return 0;
 }
 
-int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
+static int check_tree_block_fsid(struct btrfs_root *root,
+                                struct extent_buffer *eb)
+{
+       struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+       u8 fsid[BTRFS_UUID_SIZE];
+       int ret = 1;
+
+       read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
+                          BTRFS_FSID_SIZE);
+       while (fs_devices) {
+               if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
+                       ret = 0;
+                       break;
+               }
+               fs_devices = fs_devices->seed;
+       }
+       return ret;
+}
+
+static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
                               struct extent_state *state)
 {
        struct extent_io_tree *tree;
@@ -382,9 +417,7 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
                ret = -EIO;
                goto err;
        }
-       if (memcmp_extent_buffer(eb, root->fs_info->fsid,
-                                (unsigned long)btrfs_header_fsid(eb),
-                                BTRFS_FSID_SIZE)) {
+       if (check_tree_block_fsid(root, eb)) {
                printk("bad fsid on block %Lu\n", eb->start);
                ret = -EIO;
                goto err;
@@ -412,11 +445,18 @@ static void end_workqueue_bio(struct bio *bio, int err)
        end_io_wq->error = err;
        end_io_wq->work.func = end_workqueue_fn;
        end_io_wq->work.flags = 0;
-       if (bio->bi_rw & (1 << BIO_RW))
+
+       if (bio->bi_rw & (1 << BIO_RW)) {
                btrfs_queue_worker(&fs_info->endio_write_workers,
                                   &end_io_wq->work);
-       else
-               btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
+       } else {
+               if (end_io_wq->metadata)
+                       btrfs_queue_worker(&fs_info->endio_meta_workers,
+                                          &end_io_wq->work);
+               else
+                       btrfs_queue_worker(&fs_info->endio_workers,
+                                          &end_io_wq->work);
+       }
 }
 
 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
@@ -501,7 +541,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
                        extent_submit_bio_hook_t *submit_bio_done)
 {
        struct async_submit_bio *async;
-       int limit = btrfs_async_submit_limit(fs_info);
 
        async = kmalloc(sizeof(*async), GFP_NOFS);
        if (!async)
@@ -521,15 +560,10 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
        async->work.flags = 0;
        async->bio_flags = bio_flags;
 
-       while(atomic_read(&fs_info->async_submit_draining) &&
-             atomic_read(&fs_info->nr_async_submits)) {
-               wait_event(fs_info->async_submit_wait,
-                          (atomic_read(&fs_info->nr_async_submits) == 0));
-       }
-
        atomic_inc(&fs_info->nr_async_submits);
        btrfs_queue_worker(&fs_info->workers, &async->work);
-
+#if 0
+       int limit = btrfs_async_submit_limit(fs_info);
        if (atomic_read(&fs_info->nr_async_submits) > limit) {
                wait_event_timeout(fs_info->async_submit_wait,
                           (atomic_read(&fs_info->nr_async_submits) < limit),
@@ -539,7 +573,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
                           (atomic_read(&fs_info->nr_async_bios) < limit),
                           HZ/10);
        }
-
+#endif
        while(atomic_read(&fs_info->async_submit_draining) &&
              atomic_read(&fs_info->nr_async_submits)) {
                wait_event(fs_info->async_submit_wait,
@@ -605,7 +639,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
                BUG_ON(ret);
 
                return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
-                                    mirror_num, 1);
+                                    mirror_num, 0);
        }
        return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
                                   inode, rw, bio, mirror_num, 0,
@@ -648,7 +682,7 @@ static int btree_writepages(struct address_space *mapping,
        return extent_writepages(tree, mapping, btree_get_extent, wbc);
 }
 
-int btree_readpage(struct file *file, struct page *page)
+static int btree_readpage(struct file *file, struct page *page)
 {
        struct extent_io_tree *tree;
        tree = &BTRFS_I(page->mapping->host)->io_tree;
@@ -821,7 +855,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
                        u64 objectid)
 {
        root->node = NULL;
-       root->inode = NULL;
        root->commit_root = NULL;
        root->ref_tree = NULL;
        root->sectorsize = sectorsize;
@@ -861,6 +894,12 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        root->defrag_running = 0;
        root->defrag_level = 0;
        root->root_key.objectid = objectid;
+       root->anon_super.s_root = NULL;
+       root->anon_super.s_dev = 0;
+       INIT_LIST_HEAD(&root->anon_super.s_list);
+       INIT_LIST_HEAD(&root->anon_super.s_instances);
+       init_rwsem(&root->anon_super.s_umount);
+
        return 0;
 }
 
@@ -1067,6 +1106,9 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
        root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
        if (IS_ERR(root))
                return root;
+
+       set_anon_super(&root->anon_super, NULL);
+
        ret = radix_tree_insert(&fs_info->fs_roots_radix,
                                (unsigned long)root->root_key.objectid,
                                root);
@@ -1075,10 +1117,12 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
                kfree(root);
                return ERR_PTR(ret);
        }
-       ret = btrfs_find_dead_roots(fs_info->tree_root,
-                                   root->root_key.objectid, root);
-       BUG_ON(ret);
-
+       if (!(fs_info->sb->s_flags & MS_RDONLY)) {
+               ret = btrfs_find_dead_roots(fs_info->tree_root,
+                                           root->root_key.objectid, root);
+               BUG_ON(ret);
+               btrfs_orphan_cleanup(root);
+       }
        return root;
 }
 
@@ -1102,7 +1146,7 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
                kfree(root);
                return ERR_PTR(ret);
        }
-
+#if 0
        ret = btrfs_sysfs_add_root(root);
        if (ret) {
                free_extent_buffer(root->node);
@@ -1110,6 +1154,7 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
                kfree(root);
                return ERR_PTR(ret);
        }
+#endif
        root->in_sysfs = 1;
        return root;
 }
@@ -1139,11 +1184,11 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
        struct list_head *cur;
        struct btrfs_device *device;
        struct backing_dev_info *bdi;
-
+#if 0
        if ((bdi_bits & (1 << BDI_write_congested)) &&
            btrfs_congested_async(info, 0))
                return 1;
-
+#endif
        list_for_each(cur, &info->fs_devices->devices) {
                device = list_entry(cur, struct btrfs_device, dev_list);
                if (!device->bdev)
@@ -1170,6 +1215,9 @@ static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
        info = (struct btrfs_fs_info *)bdi->unplug_io_data;
        list_for_each(cur, &info->fs_devices->devices) {
                device = list_entry(cur, struct btrfs_device, dev_list);
+               if (!device->bdev)
+                       continue;
+
                bdi = blk_get_backing_dev_info(device->bdev);
                if (bdi->unplug_io_fn) {
                        bdi->unplug_io_fn(bdi, page);
@@ -1177,7 +1225,7 @@ static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
        }
 }
 
-void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
+static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
 {
        struct inode *inode;
        struct extent_map_tree *em_tree;
@@ -1186,7 +1234,7 @@ void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
        u64 offset;
 
        /* the generic O_DIRECT read code does this */
-       if (!page) {
+       if (1 || !page) {
                __unplug_io_fn(bdi, page);
                return;
        }
@@ -1201,6 +1249,16 @@ void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
                return;
 
        inode = mapping->host;
+
+       /*
+        * don't do the expensive searching for a small number of
+        * devices
+        */
+       if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
+               __unplug_io_fn(bdi, page);
+               return;
+       }
+
        offset = page_offset(page);
 
        em_tree = &BTRFS_I(inode)->extent_tree;
@@ -1296,7 +1354,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
         * blocksize <= pagesize, it is basically a noop
         */
        if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
-               btrfs_queue_worker(&fs_info->endio_workers,
+               btrfs_queue_worker(&fs_info->endio_meta_workers,
                                   &end_io_wq->work);
                return;
        }
@@ -1401,9 +1459,13 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        u32 blocksize;
        u32 stripesize;
        u64 generation;
+       u64 features;
+       struct btrfs_key location;
        struct buffer_head *bh;
        struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
                                                 GFP_NOFS);
+       struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
+                                                GFP_NOFS);
        struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root),
                                               GFP_NOFS);
        struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
@@ -1420,7 +1482,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        struct btrfs_super_block *disk_super;
 
        if (!extent_root || !tree_root || !fs_info ||
-           !chunk_root || !dev_root) {
+           !chunk_root || !dev_root || !csum_root) {
                err = -ENOMEM;
                goto fail;
        }
@@ -1437,6 +1499,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        init_completion(&fs_info->kobj_unregister);
        fs_info->tree_root = tree_root;
        fs_info->extent_root = extent_root;
+       fs_info->csum_root = csum_root;
        fs_info->chunk_root = chunk_root;
        fs_info->dev_root = dev_root;
        fs_info->fs_devices = fs_devices;
@@ -1532,8 +1595,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                     fs_info, BTRFS_ROOT_TREE_OBJECTID);
 
 
-       bh = __bread(fs_devices->latest_bdev,
-                    BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
+       bh = btrfs_read_dev_super(fs_devices->latest_bdev);
        if (!bh)
                goto fail_iput;
 
@@ -1544,11 +1606,33 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 
        disk_super = &fs_info->super_copy;
        if (!btrfs_super_root(disk_super))
-               goto fail_sb_buffer;
+               goto fail_iput;
 
-       err = btrfs_parse_options(tree_root, options);
-       if (err)
-               goto fail_sb_buffer;
+       ret = btrfs_parse_options(tree_root, options);
+       if (ret) {
+               err = ret;
+               goto fail_iput;
+       }
+
+       features = btrfs_super_incompat_flags(disk_super) &
+               ~BTRFS_FEATURE_INCOMPAT_SUPP;
+       if (features) {
+               printk(KERN_ERR "BTRFS: couldn't mount because of "
+                      "unsupported optional features (%Lx).\n",
+                      features);
+               err = -EINVAL;
+               goto fail_iput;
+       }
+
+       features = btrfs_super_compat_ro_flags(disk_super) &
+               ~BTRFS_FEATURE_COMPAT_RO_SUPP;
+       if (!(sb->s_flags & MS_RDONLY) && features) {
+               printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
+                      "unsupported option features (%Lx).\n",
+                      features);
+               err = -EINVAL;
+               goto fail_iput;
+       }
 
        /*
         * we need to start all the end_io workers up front because the
@@ -1580,6 +1664,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
        btrfs_init_workers(&fs_info->endio_workers, "endio",
                           fs_info->thread_pool_size);
+       btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
+                          fs_info->thread_pool_size);
        btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
                           fs_info->thread_pool_size);
 
@@ -1595,21 +1681,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        btrfs_start_workers(&fs_info->delalloc_workers, 1);
        btrfs_start_workers(&fs_info->fixup_workers, 1);
        btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
+       btrfs_start_workers(&fs_info->endio_meta_workers,
+                           fs_info->thread_pool_size);
        btrfs_start_workers(&fs_info->endio_write_workers,
                            fs_info->thread_pool_size);
 
-       err = -EINVAL;
-       if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
-               printk("Btrfs: wanted %llu devices, but found %llu\n",
-                      (unsigned long long)btrfs_super_num_devices(disk_super),
-                      (unsigned long long)fs_devices->open_devices);
-               if (btrfs_test_opt(tree_root, DEGRADED))
-                       printk("continuing in degraded mode\n");
-               else {
-                       goto fail_sb_buffer;
-               }
-       }
-
        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
                                    4 * 1024 * 1024 / PAGE_CACHE_SIZE);
@@ -1633,7 +1709,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        }
 
        mutex_lock(&fs_info->chunk_mutex);
-       ret = btrfs_read_sys_array(tree_root);
+       ret = btrfs_read_sys_array(tree_root, btrfs_super_bytenr(disk_super));
        mutex_unlock(&fs_info->chunk_mutex);
        if (ret) {
                printk("btrfs: failed to read the system array on %s\n",
@@ -1660,7 +1736,10 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        mutex_lock(&fs_info->chunk_mutex);
        ret = btrfs_read_chunk_tree(chunk_root);
        mutex_unlock(&fs_info->chunk_mutex);
-       BUG_ON(ret);
+       if (ret) {
+               printk("btrfs: failed to read chunk tree on %s\n", sb->s_id);
+               goto fail_chunk_root;
+       }
 
        btrfs_close_extra_devices(fs_devices);
 
@@ -1672,7 +1751,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                                          btrfs_super_root(disk_super),
                                          blocksize, generation);
        if (!tree_root->node)
-               goto fail_sb_buffer;
+               goto fail_chunk_root;
 
 
        ret = find_and_setup_root(tree_root, fs_info,
@@ -1688,16 +1767,24 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        if (ret)
                goto fail_extent_root;
 
+       ret = find_and_setup_root(tree_root, fs_info,
+                                 BTRFS_CSUM_TREE_OBJECTID, csum_root);
+       if (ret)
+               goto fail_extent_root;
+
+       csum_root->track_dirty = 1;
+
        btrfs_read_block_groups(extent_root);
 
-       fs_info->generation = btrfs_super_generation(disk_super) + 1;
+       fs_info->generation = generation + 1;
+       fs_info->last_trans_committed = generation;
        fs_info->data_alloc_profile = (u64)-1;
        fs_info->metadata_alloc_profile = (u64)-1;
        fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
        fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
                                               "btrfs-cleaner");
        if (!fs_info->cleaner_kthread)
-               goto fail_extent_root;
+               goto fail_csum_root;
 
        fs_info->transaction_kthread = kthread_run(transaction_kthread,
                                                   tree_root,
@@ -1706,9 +1793,13 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                goto fail_cleaner;
 
        if (btrfs_super_log_root(disk_super) != 0) {
-               u32 blocksize;
                u64 bytenr = btrfs_super_log_root(disk_super);
 
+               if (fs_devices->rw_devices == 0) {
+                       printk("Btrfs log replay required on RO media\n");
+                       err = -EIO;
+                       goto fail_trans_kthread;
+               }
                blocksize =
                     btrfs_level_size(tree_root,
                                      btrfs_super_log_root_level(disk_super));
@@ -1724,29 +1815,59 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                                                      generation + 1);
                ret = btrfs_recover_log_trees(log_tree_root);
                BUG_ON(ret);
+
+               if (sb->s_flags & MS_RDONLY) {
+                       ret =  btrfs_commit_super(tree_root);
+                       BUG_ON(ret);
+               }
        }
-       fs_info->last_trans_committed = btrfs_super_generation(disk_super);
 
-       ret = btrfs_cleanup_reloc_trees(tree_root);
-       BUG_ON(ret);
+       if (!(sb->s_flags & MS_RDONLY)) {
+               ret = btrfs_cleanup_reloc_trees(tree_root);
+               BUG_ON(ret);
+       }
+
+       location.objectid = BTRFS_FS_TREE_OBJECTID;
+       location.type = BTRFS_ROOT_ITEM_KEY;
+       location.offset = (u64)-1;
 
+       fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
+       if (!fs_info->fs_root)
+               goto fail_trans_kthread;
        return tree_root;
 
+fail_trans_kthread:
+       kthread_stop(fs_info->transaction_kthread);
 fail_cleaner:
        kthread_stop(fs_info->cleaner_kthread);
+
+       /*
+        * make sure we're done with the btree inode before we stop our
+        * kthreads
+        */
+       filemap_write_and_wait(fs_info->btree_inode->i_mapping);
+       invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
+
+fail_csum_root:
+       free_extent_buffer(csum_root->node);
 fail_extent_root:
        free_extent_buffer(extent_root->node);
 fail_tree_root:
        free_extent_buffer(tree_root->node);
+fail_chunk_root:
+       free_extent_buffer(chunk_root->node);
 fail_sys_array:
+       free_extent_buffer(dev_root->node);
 fail_sb_buffer:
        btrfs_stop_workers(&fs_info->fixup_workers);
        btrfs_stop_workers(&fs_info->delalloc_workers);
        btrfs_stop_workers(&fs_info->workers);
        btrfs_stop_workers(&fs_info->endio_workers);
+       btrfs_stop_workers(&fs_info->endio_meta_workers);
        btrfs_stop_workers(&fs_info->endio_write_workers);
        btrfs_stop_workers(&fs_info->submit_workers);
 fail_iput:
+       invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
        iput(fs_info->btree_inode);
 fail:
        btrfs_close_devices(fs_info->fs_devices);
@@ -1758,6 +1879,7 @@ fail:
        kfree(fs_info);
        kfree(chunk_root);
        kfree(dev_root);
+       kfree(csum_root);
        return ERR_PTR(err);
 }
 
@@ -1782,19 +1904,147 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
        put_bh(bh);
 }
 
-int write_all_supers(struct btrfs_root *root)
+struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
+{
+       struct buffer_head *bh;
+       struct buffer_head *latest = NULL;
+       struct btrfs_super_block *super;
+       int i;
+       u64 transid = 0;
+       u64 bytenr;
+
+       /* we would like to check all the supers, but that would make
+        * a btrfs mount succeed after a mkfs from a different FS.
+        * So, we need to add a special mount option to scan for
+        * later supers, using BTRFS_SUPER_MIRROR_MAX instead
+        */
+       for (i = 0; i < 1; i++) {
+               bytenr = btrfs_sb_offset(i);
+               if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
+                       break;
+               bh = __bread(bdev, bytenr / 4096, 4096);
+               if (!bh)
+                       continue;
+
+               super = (struct btrfs_super_block *)bh->b_data;
+               if (btrfs_super_bytenr(super) != bytenr ||
+                   strncmp((char *)(&super->magic), BTRFS_MAGIC,
+                           sizeof(super->magic))) {
+                       brelse(bh);
+                       continue;
+               }
+
+               if (!latest || btrfs_super_generation(super) > transid) {
+                       brelse(latest);
+                       latest = bh;
+                       transid = btrfs_super_generation(super);
+               } else {
+                       brelse(bh);
+               }
+       }
+       return latest;
+}
+
+static int write_dev_supers(struct btrfs_device *device,
+                           struct btrfs_super_block *sb,
+                           int do_barriers, int wait, int max_mirrors)
+{
+       struct buffer_head *bh;
+       int i;
+       int ret;
+       int errors = 0;
+       u32 crc;
+       u64 bytenr;
+       int last_barrier = 0;
+
+       if (max_mirrors == 0)
+               max_mirrors = BTRFS_SUPER_MIRROR_MAX;
+
+       /* make sure only the last submit_bh does a barrier */
+       if (do_barriers) {
+               for (i = 0; i < max_mirrors; i++) {
+                       bytenr = btrfs_sb_offset(i);
+                       if (bytenr + BTRFS_SUPER_INFO_SIZE >=
+                           device->total_bytes)
+                               break;
+                       last_barrier = i;
+               }
+       }
+
+       for (i = 0; i < max_mirrors; i++) {
+               bytenr = btrfs_sb_offset(i);
+               if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
+                       break;
+
+               if (wait) {
+                       bh = __find_get_block(device->bdev, bytenr / 4096,
+                                             BTRFS_SUPER_INFO_SIZE);
+                       BUG_ON(!bh);
+                       brelse(bh);
+                       wait_on_buffer(bh);
+                       if (buffer_uptodate(bh)) {
+                               brelse(bh);
+                               continue;
+                       }
+               } else {
+                       btrfs_set_super_bytenr(sb, bytenr);
+
+                       crc = ~(u32)0;
+                       crc = btrfs_csum_data(NULL, (char *)sb +
+                                             BTRFS_CSUM_SIZE, crc,
+                                             BTRFS_SUPER_INFO_SIZE -
+                                             BTRFS_CSUM_SIZE);
+                       btrfs_csum_final(crc, sb->csum);
+
+                       bh = __getblk(device->bdev, bytenr / 4096,
+                                     BTRFS_SUPER_INFO_SIZE);
+                       memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
+
+                       set_buffer_uptodate(bh);
+                       get_bh(bh);
+                       lock_buffer(bh);
+                       bh->b_end_io = btrfs_end_buffer_write_sync;
+               }
+
+               if (i == last_barrier && do_barriers && device->barriers) {
+                       ret = submit_bh(WRITE_BARRIER, bh);
+                       if (ret == -EOPNOTSUPP) {
+                               printk("btrfs: disabling barriers on dev %s\n",
+                                      device->name);
+                               set_buffer_uptodate(bh);
+                               device->barriers = 0;
+                               get_bh(bh);
+                               lock_buffer(bh);
+                               ret = submit_bh(WRITE, bh);
+                       }
+               } else {
+                       ret = submit_bh(WRITE, bh);
+               }
+
+               if (!ret && wait) {
+                       wait_on_buffer(bh);
+                       if (!buffer_uptodate(bh))
+                               errors++;
+               } else if (ret) {
+                       errors++;
+               }
+               if (wait)
+                       brelse(bh);
+       }
+       return errors < i ? 0 : -1;
+}
+
+int write_all_supers(struct btrfs_root *root, int max_mirrors)
 {
        struct list_head *cur;
        struct list_head *head = &root->fs_info->fs_devices->devices;
        struct btrfs_device *dev;
        struct btrfs_super_block *sb;
        struct btrfs_dev_item *dev_item;
-       struct buffer_head *bh;
        int ret;
        int do_barriers;
        int max_errors;
        int total_errors = 0;
-       u32 crc;
        u64 flags;
 
        max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
@@ -1808,9 +2058,10 @@ int write_all_supers(struct btrfs_root *root)
                        total_errors++;
                        continue;
                }
-               if (!dev->in_fs_metadata)
+               if (!dev->in_fs_metadata || !dev->writeable)
                        continue;
 
+               btrfs_set_stack_device_generation(dev_item, 0);
                btrfs_set_stack_device_type(dev_item, dev->type);
                btrfs_set_stack_device_id(dev_item, dev->devid);
                btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
@@ -1819,40 +2070,12 @@ int write_all_supers(struct btrfs_root *root)
                btrfs_set_stack_device_io_width(dev_item, dev->io_width);
                btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
                memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
+               memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
+
                flags = btrfs_super_flags(sb);
                btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
 
-
-               crc = ~(u32)0;
-               crc = btrfs_csum_data(root, (char *)sb + BTRFS_CSUM_SIZE, crc,
-                                     BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
-               btrfs_csum_final(crc, sb->csum);
-
-               bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET / 4096,
-                             BTRFS_SUPER_INFO_SIZE);
-
-               memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
-               dev->pending_io = bh;
-
-               get_bh(bh);
-               set_buffer_uptodate(bh);
-               lock_buffer(bh);
-               bh->b_end_io = btrfs_end_buffer_write_sync;
-
-               if (do_barriers && dev->barriers) {
-                       ret = submit_bh(WRITE_BARRIER, bh);
-                       if (ret == -EOPNOTSUPP) {
-                               printk("btrfs: disabling barriers on dev %s\n",
-                                      dev->name);
-                               set_buffer_uptodate(bh);
-                               dev->barriers = 0;
-                               get_bh(bh);
-                               lock_buffer(bh);
-                               ret = submit_bh(WRITE, bh);
-                       }
-               } else {
-                       ret = submit_bh(WRITE, bh);
-               }
+               ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
                if (ret)
                        total_errors++;
        }
@@ -1860,38 +2083,18 @@ int write_all_supers(struct btrfs_root *root)
                printk("btrfs: %d errors while writing supers\n", total_errors);
                BUG();
        }
-       total_errors = 0;
 
+       total_errors = 0;
        list_for_each(cur, head) {
                dev = list_entry(cur, struct btrfs_device, dev_list);
                if (!dev->bdev)
                        continue;
-               if (!dev->in_fs_metadata)
+               if (!dev->in_fs_metadata || !dev->writeable)
                        continue;
 
-               BUG_ON(!dev->pending_io);
-               bh = dev->pending_io;
-               wait_on_buffer(bh);
-               if (!buffer_uptodate(dev->pending_io)) {
-                       if (do_barriers && dev->barriers) {
-                               printk("btrfs: disabling barriers on dev %s\n",
-                                      dev->name);
-                               set_buffer_uptodate(bh);
-                               get_bh(bh);
-                               lock_buffer(bh);
-                               dev->barriers = 0;
-                               ret = submit_bh(WRITE, bh);
-                               BUG_ON(ret);
-                               wait_on_buffer(bh);
-                               if (!buffer_uptodate(bh))
-                                       total_errors++;
-                       } else {
-                               total_errors++;
-                       }
-
-               }
-               dev->pending_io = NULL;
-               brelse(bh);
+               ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
+               if (ret)
+                       total_errors++;
        }
        if (total_errors > max_errors) {
                printk("btrfs: %d errors while writing supers\n", total_errors);
@@ -1900,12 +2103,12 @@ int write_all_supers(struct btrfs_root *root)
        return 0;
 }
 
-int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
-                     *root)
+int write_ctree_super(struct btrfs_trans_handle *trans,
+                     struct btrfs_root *root, int max_mirrors)
 {
        int ret;
 
-       ret = write_all_supers(root);
+       ret = write_all_supers(root, max_mirrors);
        return ret;
 }
 
@@ -1913,10 +2116,14 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
 {
        radix_tree_delete(&fs_info->fs_roots_radix,
                          (unsigned long)root->root_key.objectid);
+       if (root->anon_super.s_dev) {
+               down_write(&root->anon_super.s_umount);
+               kill_anon_super(&root->anon_super);
+       }
+#if 0
        if (root->in_sysfs)
                btrfs_sysfs_del_root(root);
-       if (root->inode)
-               iput(root->inode);
+#endif
        if (root->node)
                free_extent_buffer(root->node);
        if (root->commit_root)
@@ -1945,28 +2152,69 @@ static int del_fs_roots(struct btrfs_fs_info *fs_info)
        return 0;
 }
 
-int close_ctree(struct btrfs_root *root)
+int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
 {
+       u64 root_objectid = 0;
+       struct btrfs_root *gang[8];
+       int i;
        int ret;
-       struct btrfs_trans_handle *trans;
-       struct btrfs_fs_info *fs_info = root->fs_info;
 
-       fs_info->closing = 1;
-       smp_mb();
+       while (1) {
+               ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+                                            (void **)gang, root_objectid,
+                                            ARRAY_SIZE(gang));
+               if (!ret)
+                       break;
+               for (i = 0; i < ret; i++) {
+                       root_objectid = gang[i]->root_key.objectid;
+                       ret = btrfs_find_dead_roots(fs_info->tree_root,
+                                                   root_objectid, gang[i]);
+                       BUG_ON(ret);
+                       btrfs_orphan_cleanup(gang[i]);
+               }
+               root_objectid++;
+       }
+       return 0;
+}
 
-       kthread_stop(root->fs_info->transaction_kthread);
-       kthread_stop(root->fs_info->cleaner_kthread);
+int btrfs_commit_super(struct btrfs_root *root)
+{
+       struct btrfs_trans_handle *trans;
+       int ret;
 
+       mutex_lock(&root->fs_info->cleaner_mutex);
        btrfs_clean_old_snapshots(root);
+       mutex_unlock(&root->fs_info->cleaner_mutex);
        trans = btrfs_start_transaction(root, 1);
        ret = btrfs_commit_transaction(trans, root);
-       /* run commit again to  drop the original snapshot */
+       BUG_ON(ret);
+       /* run commit again to drop the original snapshot */
        trans = btrfs_start_transaction(root, 1);
        btrfs_commit_transaction(trans, root);
        ret = btrfs_write_and_wait_transaction(NULL, root);
        BUG_ON(ret);
 
-       write_ctree_super(NULL, root);
+       ret = write_ctree_super(NULL, root, 0);
+       return ret;
+}
+
+int close_ctree(struct btrfs_root *root)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       int ret;
+
+       fs_info->closing = 1;
+       smp_mb();
+
+       kthread_stop(root->fs_info->transaction_kthread);
+       kthread_stop(root->fs_info->cleaner_kthread);
+
+       if (!(fs_info->sb->s_flags & MS_RDONLY)) {
+               ret =  btrfs_commit_super(root);
+               if (ret) {
+                       printk("btrfs: commit super returns %d\n", ret);
+               }
+       }
 
        if (fs_info->delalloc_bytes) {
                printk("btrfs: at unmount delalloc count %Lu\n",
@@ -1989,22 +2237,23 @@ int close_ctree(struct btrfs_root *root)
        if (root->fs_info->dev_root->node);
                free_extent_buffer(root->fs_info->dev_root->node);
 
+       if (root->fs_info->csum_root->node);
+               free_extent_buffer(root->fs_info->csum_root->node);
+
        btrfs_free_block_groups(root->fs_info);
-       fs_info->closing = 2;
-       del_fs_roots(fs_info);
 
-       filemap_write_and_wait(fs_info->btree_inode->i_mapping);
+       del_fs_roots(fs_info);
 
-       truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
+       iput(fs_info->btree_inode);
 
        btrfs_stop_workers(&fs_info->fixup_workers);
        btrfs_stop_workers(&fs_info->delalloc_workers);
        btrfs_stop_workers(&fs_info->workers);
        btrfs_stop_workers(&fs_info->endio_workers);
+       btrfs_stop_workers(&fs_info->endio_meta_workers);
        btrfs_stop_workers(&fs_info->endio_write_workers);
        btrfs_stop_workers(&fs_info->submit_workers);
 
-       iput(fs_info->btree_inode);
 #if 0
        while(!list_empty(&fs_info->hashers)) {
                struct btrfs_hasher *hasher;
@@ -2024,6 +2273,7 @@ int close_ctree(struct btrfs_root *root)
        kfree(fs_info->tree_root);
        kfree(fs_info->chunk_root);
        kfree(fs_info->dev_root);
+       kfree(fs_info->csum_root);
        return 0;
 }