Btrfs: Wait for IO on the block device inodes of newly added devices
[linux-2.6-block.git] / fs / btrfs / disk-io.c
index dffb8dabd533b848cbaaf33028a1d1898aca189f..45b4f7285275dd839c81cf7bf4ac0b97b39a9b4a 100644 (file)
@@ -460,6 +460,13 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
        async->submit_bio_hook = submit_bio_hook;
        async->work.func = run_one_async_submit;
        async->work.flags = 0;
+
+       while(atomic_read(&fs_info->async_submit_draining) &&
+             atomic_read(&fs_info->nr_async_submits)) {
+               wait_event(fs_info->async_submit_wait,
+                          (atomic_read(&fs_info->nr_async_submits) == 0));
+       }
+
        atomic_inc(&fs_info->nr_async_submits);
        btrfs_queue_worker(&fs_info->workers, &async->work);
 
@@ -495,11 +502,8 @@ static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
                                 int mirror_num)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       u64 offset;
        int ret;
 
-       offset = bio->bi_sector << 9;
-
        /*
         * when we're called for a write, we're already in the async
         * submission context.  Just jump into btrfs_map_bio
@@ -556,7 +560,7 @@ static int btree_writepages(struct address_space *mapping,
        if (wbc->sync_mode == WB_SYNC_NONE) {
                u64 num_dirty;
                u64 start = 0;
-               unsigned long thresh = 8 * 1024 * 1024;
+               unsigned long thresh = 32 * 1024 * 1024;
 
                if (wbc->for_kupdate)
                        return 0;
@@ -690,7 +694,7 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
 int btrfs_write_tree_block(struct extent_buffer *buf)
 {
        return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start,
-                                     buf->start + buf->len - 1, WB_SYNC_NONE);
+                                     buf->start + buf->len - 1, WB_SYNC_ALL);
 }
 
 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
@@ -1360,6 +1364,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        INIT_LIST_HEAD(&fs_info->space_info);
        btrfs_mapping_init(&fs_info->mapping_tree);
        atomic_set(&fs_info->nr_async_submits, 0);
+       atomic_set(&fs_info->async_submit_draining, 0);
        atomic_set(&fs_info->nr_async_bios, 0);
        atomic_set(&fs_info->throttles, 0);
        atomic_set(&fs_info->throttle_gen, 0);
@@ -1406,6 +1411,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                             fs_info->btree_inode->i_mapping, GFP_NOFS);
        fs_info->do_barriers = 1;
 
+       extent_io_tree_init(&fs_info->reloc_mapping_tree,
+                           fs_info->btree_inode->i_mapping, GFP_NOFS);
+       INIT_LIST_HEAD(&fs_info->dead_reloc_roots);
+       btrfs_leaf_ref_tree_init(&fs_info->reloc_ref_tree);
+       btrfs_leaf_ref_tree_init(&fs_info->shared_ref_tree);
+
        BTRFS_I(fs_info->btree_inode)->root = tree_root;
        memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
               sizeof(struct btrfs_key));
@@ -1419,6 +1430,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        mutex_init(&fs_info->transaction_kthread_mutex);
        mutex_init(&fs_info->cleaner_mutex);
        mutex_init(&fs_info->volume_mutex);
+       mutex_init(&fs_info->tree_reloc_mutex);
        init_waitqueue_head(&fs_info->transaction_throttle);
        init_waitqueue_head(&fs_info->transaction_wait);
        init_waitqueue_head(&fs_info->async_submit_wait);
@@ -1625,6 +1637,10 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                ret = btrfs_recover_log_trees(log_tree_root);
                BUG_ON(ret);
        }
+
+       ret = btrfs_cleanup_reloc_trees(tree_root);
+       BUG_ON(ret);
+
        fs_info->last_trans_committed = btrfs_super_generation(disk_super);
        return tree_root;