btrfs: unify the lock/unlock extent variants
[linux-block.git] / fs / btrfs / ordered-data.c
index 1952ac85222c0bb429fda21162f4d7827149ec12..40a364c1117884f1f1dadb0319fa4ed78fab5608 100644 (file)
@@ -524,7 +524,15 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct rb_node *node;
        bool pending;
+       bool freespace_inode;
 
+       /*
+        * If this is a free space inode the thread has not acquired the ordered
+        * extents lockdep map.
+        */
+       freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
+
+       btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
        /* This is paired with btrfs_add_ordered_extent. */
        spin_lock(&btrfs_inode->lock);
        btrfs_mod_outstanding_extents(btrfs_inode, -1);
@@ -580,6 +588,8 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
                }
        }
 
+       btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
+
        spin_lock(&root->ordered_extent_lock);
        list_del_init(&entry->root_extent_list);
        root->nr_ordered_extents--;
@@ -594,6 +604,8 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
        }
        spin_unlock(&root->ordered_extent_lock);
        wake_up(&entry->wait);
+       if (!freespace_inode)
+               btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
 }
 
 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
@@ -712,9 +724,16 @@ void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
        u64 start = entry->file_offset;
        u64 end = start + entry->num_bytes - 1;
        struct btrfs_inode *inode = BTRFS_I(entry->inode);
+       bool freespace_inode;
 
        trace_btrfs_ordered_extent_start(inode, entry);
 
+       /*
+        * If this is a free space inode do not take the ordered extents lockdep
+        * map.
+        */
+       freespace_inode = btrfs_is_free_space_inode(inode);
+
        /*
         * pages in the range can be dirty, clean or writeback.  We
         * start IO on any dirty ones so the wait doesn't stall waiting
@@ -723,6 +742,8 @@ void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
        if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
                filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
        if (wait) {
+               if (!freespace_inode)
+                       btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
                wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
                                                 &entry->flags));
        }
@@ -1022,7 +1043,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
                cachedp = cached_state;
 
        while (1) {
-               lock_extent_bits(&inode->io_tree, start, end, cachedp);
+               lock_extent(&inode->io_tree, start, end, cachedp);
                ordered = btrfs_lookup_ordered_range(inode, start,
                                                     end - start + 1);
                if (!ordered) {
@@ -1035,7 +1056,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
                                refcount_dec(&cache->refs);
                        break;
                }
-               unlock_extent_cached(&inode->io_tree, start, end, cachedp);
+               unlock_extent(&inode->io_tree, start, end, cachedp);
                btrfs_start_ordered_extent(ordered, 1);
                btrfs_put_ordered_extent(ordered);
        }