Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-block.git] / fs / btrfs / transaction.c
index fa67ba51516e9c1b5d7b23efaddc3068116fda08..17be3dedacbab1c47084270153ed059719984470 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/writeback.h>
 #include <linux/pagemap.h>
 #include <linux/blkdev.h>
+#include <linux/uuid.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -38,7 +39,6 @@ void put_transaction(struct btrfs_transaction *transaction)
        if (atomic_dec_and_test(&transaction->use_count)) {
                BUG_ON(!list_empty(&transaction->list));
                WARN_ON(transaction->delayed_refs.root.rb_node);
-               WARN_ON(!list_empty(&transaction->delayed_refs.seq_head));
                memset(transaction, 0, sizeof(*transaction));
                kmem_cache_free(btrfs_transaction_cachep, transaction);
        }
@@ -100,8 +100,8 @@ loop:
                kmem_cache_free(btrfs_transaction_cachep, cur_trans);
                cur_trans = fs_info->running_transaction;
                goto loop;
-       } else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
-               spin_unlock(&root->fs_info->trans_lock);
+       } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+               spin_unlock(&fs_info->trans_lock);
                kmem_cache_free(btrfs_transaction_cachep, cur_trans);
                return -EROFS;
        }
@@ -126,7 +126,6 @@ loop:
        cur_trans->delayed_refs.num_heads = 0;
        cur_trans->delayed_refs.flushing = 0;
        cur_trans->delayed_refs.run_delayed_start = 0;
-       cur_trans->delayed_refs.seq = 1;
 
        /*
         * although the tree mod log is per file system and not per transaction,
@@ -145,10 +144,8 @@ loop:
        }
        atomic_set(&fs_info->tree_mod_seq, 0);
 
-       init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
        spin_lock_init(&cur_trans->commit_lock);
        spin_lock_init(&cur_trans->delayed_refs.lock);
-       INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
 
        INIT_LIST_HEAD(&cur_trans->pending_snapshots);
        list_add_tail(&cur_trans->list, &fs_info->trans_list);
@@ -299,6 +296,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
        struct btrfs_transaction *cur_trans;
        u64 num_bytes = 0;
        int ret;
+       u64 qgroup_reserved = 0;
 
        if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
                return ERR_PTR(-EROFS);
@@ -317,6 +315,14 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
         * the appropriate flushing if need be.
         */
        if (num_items > 0 && root != root->fs_info->chunk_root) {
+               if (root->fs_info->quota_enabled &&
+                   is_fstree(root->root_key.objectid)) {
+                       qgroup_reserved = num_items * root->leafsize;
+                       ret = btrfs_qgroup_reserve(root, qgroup_reserved);
+                       if (ret)
+                               return ERR_PTR(ret);
+               }
+
                num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
                ret = btrfs_block_rsv_add(root,
                                          &root->fs_info->trans_block_rsv,
@@ -352,11 +358,16 @@ again:
        h->transaction = cur_trans;
        h->blocks_used = 0;
        h->bytes_reserved = 0;
+       h->root = root;
        h->delayed_ref_updates = 0;
        h->use_count = 1;
+       h->adding_csums = 0;
        h->block_rsv = NULL;
        h->orig_rsv = NULL;
        h->aborted = 0;
+       h->qgroup_reserved = qgroup_reserved;
+       h->delayed_ref_elem.seq = 0;
+       INIT_LIST_HEAD(&h->qgroup_ref_list);
 
        smp_mb();
        if (cur_trans->blocked && may_wait_transaction(root, type)) {
@@ -476,7 +487,6 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root)
 {
        struct btrfs_transaction *cur_trans = trans->transaction;
-       struct btrfs_block_rsv *rsv = trans->block_rsv;
        int updates;
        int err;
 
@@ -484,12 +494,6 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
        if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
                return 1;
 
-       /*
-        * We need to do this in case we're deleting csums so the global block
-        * rsv get's used instead of the csum block rsv.
-        */
-       trans->block_rsv = NULL;
-
        updates = trans->delayed_ref_updates;
        trans->delayed_ref_updates = 0;
        if (updates) {
@@ -498,8 +502,6 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
                        return err;
        }
 
-       trans->block_rsv = rsv;
-
        return should_end_transaction(trans, root);
 }
 
@@ -516,8 +518,24 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
                return 0;
        }
 
+       /*
+        * do the qgroup accounting as early as possible
+        */
+       err = btrfs_delayed_refs_qgroup_accounting(trans, info);
+
        btrfs_trans_release_metadata(trans, root);
        trans->block_rsv = NULL;
+       /*
+        * the same root has to be passed to start_transaction and
+        * end_transaction. Subvolume quota depends on this.
+        */
+       WARN_ON(trans->root != root);
+
+       if (trans->qgroup_reserved) {
+               btrfs_qgroup_free(root, trans->qgroup_reserved);
+               trans->qgroup_reserved = 0;
+       }
+
        while (count < 2) {
                unsigned long cur = trans->delayed_ref_updates;
                trans->delayed_ref_updates = 0;
@@ -530,6 +548,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
                }
                count++;
        }
+       btrfs_trans_release_metadata(trans, root);
+       trans->block_rsv = NULL;
 
        sb_end_intwrite(root->fs_info->sb);
 
@@ -572,6 +592,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
            root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
                err = -EIO;
        }
+       assert_qgroups_uptodate(trans);
 
        memset(trans, 0, sizeof(*trans));
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
@@ -790,6 +811,13 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
        ret = btrfs_run_dev_stats(trans, root->fs_info);
        BUG_ON(ret);
 
+       ret = btrfs_run_qgroups(trans, root->fs_info);
+       BUG_ON(ret);
+
+       /* run_qgroups might have added some more refs */
+       ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+       BUG_ON(ret);
+
        while (!list_empty(&fs_info->dirty_cowonly_roots)) {
                next = fs_info->dirty_cowonly_roots.next;
                list_del_init(next);
@@ -931,11 +959,13 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        struct dentry *dentry;
        struct extent_buffer *tmp;
        struct extent_buffer *old;
+       struct timespec cur_time = CURRENT_TIME;
        int ret;
        u64 to_reserve = 0;
        u64 index = 0;
        u64 objectid;
        u64 root_flags;
+       uuid_le new_uuid;
 
        rsv = trans->block_rsv;
 
@@ -962,6 +992,14 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
                }
        }
 
+       ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
+                                  objectid, pending->inherit);
+       kfree(pending->inherit);
+       if (ret) {
+               pending->error = ret;
+               goto fail;
+       }
+
        key.objectid = objectid;
        key.offset = (u64)-1;
        key.type = BTRFS_ROOT_ITEM_KEY;
@@ -1021,6 +1059,20 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
                root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
        btrfs_set_root_flags(new_root_item, root_flags);
 
+       btrfs_set_root_generation_v2(new_root_item,
+                       trans->transid);
+       uuid_le_gen(&new_uuid);
+       memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
+       memcpy(new_root_item->parent_uuid, root->root_item.uuid,
+                       BTRFS_UUID_SIZE);
+       new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
+       new_root_item->otime.nsec = cpu_to_le64(cur_time.tv_nsec);
+       btrfs_set_root_otransid(new_root_item, trans->transid);
+       memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
+       memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
+       btrfs_set_root_stransid(new_root_item, 0);
+       btrfs_set_root_rtransid(new_root_item, 0);
+
        old = btrfs_lock_root_node(root);
        ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
        if (ret) {
@@ -1274,9 +1326,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        btrfs_run_ordered_operations(root, 0);
 
-       btrfs_trans_release_metadata(trans, root);
-       trans->block_rsv = NULL;
-
        if (cur_trans->aborted)
                goto cleanup_transaction;
 
@@ -1287,6 +1336,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        if (ret)
                goto cleanup_transaction;
 
+       btrfs_trans_release_metadata(trans, root);
+       trans->block_rsv = NULL;
+
        cur_trans = trans->transaction;
 
        /*
@@ -1335,7 +1387,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                spin_unlock(&root->fs_info->trans_lock);
        }
 
-       if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
+       if (!btrfs_test_opt(root, SSD) &&
+           (now < cur_trans->start_time || now - cur_trans->start_time < 1))
                should_grow = 1;
 
        do {
@@ -1356,6 +1409,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                if (ret)
                        goto cleanup_transaction;
 
+               /*
+                * running the delayed items may have added new refs. account
+                * them now so that they hinder processing of more delayed refs
+                * as little as possible.
+                */
+               btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
+
                /*
                 * rename don't use btrfs_join_transaction, so, once we
                 * set the transaction to blocked above, we aren't going
@@ -1468,6 +1528,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                            root->fs_info->chunk_root->node);
        switch_commit_root(root->fs_info->chunk_root);
 
+       assert_qgroups_uptodate(trans);
        update_super_roots(root);
 
        if (!root->fs_info->log_root_recovering) {
@@ -1539,6 +1600,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        return ret;
 
 cleanup_transaction:
+       btrfs_trans_release_metadata(trans, root);
+       trans->block_rsv = NULL;
        btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
 //     WARN_ON(1);
        if (current->journal_info == trans)