Merge branch 'for-linus-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[linux-2.6-block.git] / fs / btrfs / inode.c
index 3b8856e182ae7b7ee0d6ee4c32454cfc9bffdc3a..5f06eb1f43843055c0373daeb9ad98648865150f 100644 (file)
@@ -66,6 +66,13 @@ struct btrfs_iget_args {
        struct btrfs_root *root;
 };
 
+struct btrfs_dio_data {
+       u64 outstanding_extents;
+       u64 reserve;
+       u64 unsubmitted_oe_range_start;
+       u64 unsubmitted_oe_range_end;
+};
+
 static const struct inode_operations btrfs_dir_inode_operations;
 static const struct inode_operations btrfs_symlink_inode_operations;
 static const struct inode_operations btrfs_dir_ro_inode_operations;
@@ -74,17 +81,16 @@ static const struct inode_operations btrfs_file_inode_operations;
 static const struct address_space_operations btrfs_aops;
 static const struct address_space_operations btrfs_symlink_aops;
 static const struct file_operations btrfs_dir_file_operations;
-static struct extent_io_ops btrfs_extent_io_ops;
+static const struct extent_io_ops btrfs_extent_io_ops;
 
 static struct kmem_cache *btrfs_inode_cachep;
-static struct kmem_cache *btrfs_delalloc_work_cachep;
 struct kmem_cache *btrfs_trans_handle_cachep;
 struct kmem_cache *btrfs_transaction_cachep;
 struct kmem_cache *btrfs_path_cachep;
 struct kmem_cache *btrfs_free_space_cachep;
 
 #define S_SHIFT 12
-static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
+static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
        [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
        [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
        [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
@@ -414,15 +420,15 @@ static noinline void compress_file_range(struct inode *inode,
        unsigned long nr_pages_ret = 0;
        unsigned long total_compressed = 0;
        unsigned long total_in = 0;
-       unsigned long max_compressed = 128 * 1024;
-       unsigned long max_uncompressed = 128 * 1024;
+       unsigned long max_compressed = SZ_128K;
+       unsigned long max_uncompressed = SZ_128K;
        int i;
        int will_compress;
        int compress_type = root->fs_info->compress_type;
        int redirty = 0;
 
        /* if this is a small write inside eof, kick off a defrag */
-       if ((end - start + 1) < 16 * 1024 &&
+       if ((end - start + 1) < SZ_16K &&
            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
                btrfs_add_inode_defrag(NULL, inode);
 
@@ -430,7 +436,7 @@ static noinline void compress_file_range(struct inode *inode,
 again:
        will_compress = 0;
        nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
-       nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
+       nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_CACHE_SIZE);
 
        /*
         * we don't want to send crud past the end of i_size through
@@ -944,7 +950,7 @@ static noinline int cow_file_range(struct inode *inode,
        disk_num_bytes = num_bytes;
 
        /* if this is a small write inside eof, kick off defrag */
-       if (num_bytes < 64 * 1024 &&
+       if (num_bytes < SZ_64K &&
            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
                btrfs_add_inode_defrag(NULL, inode);
 
@@ -1107,7 +1113,7 @@ static noinline void async_cow_submit(struct btrfs_work *work)
         * atomic_sub_return implies a barrier for waitqueue_active
         */
        if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
-           5 * 1024 * 1024 &&
+           5 * SZ_1M &&
            waitqueue_active(&root->fs_info->async_submit_wait))
                wake_up(&root->fs_info->async_submit_wait);
 
@@ -1132,7 +1138,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
        struct btrfs_root *root = BTRFS_I(inode)->root;
        unsigned long nr_pages;
        u64 cur_end;
-       int limit = 10 * 1024 * 1024;
+       int limit = 10 * SZ_1M;
 
        clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
                         1, 0, NULL, GFP_NOFS);
@@ -1148,7 +1154,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
                    !btrfs_test_opt(root, FORCE_COMPRESS))
                        cur_end = end;
                else
-                       cur_end = min(end, start + 512 * 1024 - 1);
+                       cur_end = min(end, start + SZ_512K - 1);
 
                async_cow->end = cur_end;
                INIT_LIST_HEAD(&async_cow->extents);
@@ -1989,7 +1995,7 @@ again:
        page_start = page_offset(page);
        page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
 
-       lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
+       lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
                         &cached_state);
 
        /* already ordered? We're done */
@@ -2482,7 +2488,7 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
        lock_start = backref->file_pos;
        lock_end = backref->file_pos + backref->num_bytes - 1;
        lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
-                        0, &cached);
+                        &cached);
 
        ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
        if (ordered) {
@@ -2874,7 +2880,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 
        lock_extent_bits(io_tree, ordered_extent->file_offset,
                         ordered_extent->file_offset + ordered_extent->len - 1,
-                        0, &cached_state);
+                        &cached_state);
 
        ret = test_range_bit(io_tree, ordered_extent->file_offset,
                        ordered_extent->file_offset + ordered_extent->len - 1,
@@ -3106,56 +3112,46 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
                                      start, (size_t)(end - start + 1));
 }
 
-struct delayed_iput {
-       struct list_head list;
-       struct inode *inode;
-};
-
-/* JDM: If this is fs-wide, why can't we add a pointer to
- * btrfs_inode instead and avoid the allocation? */
 void btrfs_add_delayed_iput(struct inode *inode)
 {
        struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
-       struct delayed_iput *delayed;
+       struct btrfs_inode *binode = BTRFS_I(inode);
 
        if (atomic_add_unless(&inode->i_count, -1, 1))
                return;
 
-       delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
-       delayed->inode = inode;
-
        spin_lock(&fs_info->delayed_iput_lock);
-       list_add_tail(&delayed->list, &fs_info->delayed_iputs);
+       if (binode->delayed_iput_count == 0) {
+               ASSERT(list_empty(&binode->delayed_iput));
+               list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
+       } else {
+               binode->delayed_iput_count++;
+       }
        spin_unlock(&fs_info->delayed_iput_lock);
 }
 
 void btrfs_run_delayed_iputs(struct btrfs_root *root)
 {
-       LIST_HEAD(list);
        struct btrfs_fs_info *fs_info = root->fs_info;
-       struct delayed_iput *delayed;
-       int empty;
-
-       spin_lock(&fs_info->delayed_iput_lock);
-       empty = list_empty(&fs_info->delayed_iputs);
-       spin_unlock(&fs_info->delayed_iput_lock);
-       if (empty)
-               return;
-
-       down_read(&fs_info->delayed_iput_sem);
 
        spin_lock(&fs_info->delayed_iput_lock);
-       list_splice_init(&fs_info->delayed_iputs, &list);
-       spin_unlock(&fs_info->delayed_iput_lock);
-
-       while (!list_empty(&list)) {
-               delayed = list_entry(list.next, struct delayed_iput, list);
-               list_del(&delayed->list);
-               iput(delayed->inode);
-               kfree(delayed);
+       while (!list_empty(&fs_info->delayed_iputs)) {
+               struct btrfs_inode *inode;
+
+               inode = list_first_entry(&fs_info->delayed_iputs,
+                               struct btrfs_inode, delayed_iput);
+               if (inode->delayed_iput_count) {
+                       inode->delayed_iput_count--;
+                       list_move_tail(&inode->delayed_iput,
+                                       &fs_info->delayed_iputs);
+               } else {
+                       list_del_init(&inode->delayed_iput);
+               }
+               spin_unlock(&fs_info->delayed_iput_lock);
+               iput(&inode->vfs_inode);
+               spin_lock(&fs_info->delayed_iput_lock);
        }
-
-       up_read(&root->fs_info->delayed_iput_sem);
+       spin_unlock(&fs_info->delayed_iput_lock);
 }
 
 /*
@@ -3351,7 +3347,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                ret = -ENOMEM;
                goto out;
        }
-       path->reada = -1;
+       path->reada = READA_BACK;
 
        key.objectid = BTRFS_ORPHAN_OBJECTID;
        key.type = BTRFS_ORPHAN_ITEM_KEY;
@@ -4318,7 +4314,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
-       path->reada = -1;
+       path->reada = READA_BACK;
 
        /*
         * We want to drop from the next block forward in case this new size is
@@ -4349,7 +4345,7 @@ search_again:
         * up a huge file in a single leaf.  Most of the time that
         * bytes_deleted is > 0, it will be huge by the time we get here
         */
-       if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
+       if (be_nice && bytes_deleted > SZ_32M) {
                if (btrfs_should_end_transaction(trans, root)) {
                        err = -EAGAIN;
                        goto error;
@@ -4592,7 +4588,7 @@ error:
 
        btrfs_free_path(path);
 
-       if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
+       if (be_nice && bytes_deleted > SZ_32M) {
                unsigned long updates = trans->delayed_ref_updates;
                if (updates) {
                        trans->delayed_ref_updates = 0;
@@ -4669,7 +4665,7 @@ again:
        }
        wait_on_page_writeback(page);
 
-       lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
+       lock_extent_bits(io_tree, page_start, page_end, &cached_state);
        set_page_extent_mapped(page);
 
        ordered = btrfs_lookup_ordered_extent(inode, page_start);
@@ -4800,7 +4796,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
        while (1) {
                struct btrfs_ordered_extent *ordered;
 
-               lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
+               lock_extent_bits(io_tree, hole_start, block_end - 1,
                                 &cached_state);
                ordered = btrfs_lookup_ordered_range(inode, hole_start,
                                                     block_end - hole_start);
@@ -4876,26 +4872,6 @@ next:
        return err;
 }
 
-static int wait_snapshoting_atomic_t(atomic_t *a)
-{
-       schedule();
-       return 0;
-}
-
-static void wait_for_snapshot_creation(struct btrfs_root *root)
-{
-       while (true) {
-               int ret;
-
-               ret = btrfs_start_write_no_snapshoting(root);
-               if (ret)
-                       break;
-               wait_on_atomic_t(&root->will_be_snapshoted,
-                                wait_snapshoting_atomic_t,
-                                TASK_UNINTERRUPTIBLE);
-       }
-}
-
 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -4927,7 +4903,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
                 * truncation, it must capture all writes that happened before
                 * this truncation.
                 */
-               wait_for_snapshot_creation(root);
+               btrfs_wait_for_snapshot_creation(root);
                ret = btrfs_cont_expand(inode, oldsize, newsize);
                if (ret) {
                        btrfs_end_write_no_snapshoting(root);
@@ -5112,7 +5088,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
                end = state->end;
                spin_unlock(&io_tree->lock);
 
-               lock_extent_bits(io_tree, start, end, 0, &cached_state);
+               lock_extent_bits(io_tree, start, end, &cached_state);
 
                /*
                 * If still has DELALLOC flag, the extent didn't reach disk,
@@ -5305,7 +5281,6 @@ void btrfs_evict_inode(struct inode *inode)
 no_delete:
        btrfs_remove_delayed_node(inode);
        clear_inode(inode);
-       return;
 }
 
 /*
@@ -5754,7 +5729,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
        if (!path)
                return -ENOMEM;
 
-       path->reada = 1;
+       path->reada = READA_FORWARD;
 
        if (key_type == BTRFS_DIR_INDEX_KEY) {
                INIT_LIST_HEAD(&ins_list);
@@ -6482,7 +6457,7 @@ out_unlock_inode:
 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                      struct dentry *dentry)
 {
-       struct btrfs_trans_handle *trans;
+       struct btrfs_trans_handle *trans = NULL;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct inode *inode = d_inode(old_dentry);
        u64 index;
@@ -6508,6 +6483,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        trans = btrfs_start_transaction(root, 5);
        if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
+               trans = NULL;
                goto fail;
        }
 
@@ -6541,9 +6517,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                btrfs_log_new_name(trans, inode, NULL, parent);
        }
 
-       btrfs_end_transaction(trans, root);
        btrfs_balance_delayed_items(root);
 fail:
+       if (trans)
+               btrfs_end_transaction(trans, root);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
@@ -6688,7 +6665,7 @@ static int merge_extent_mapping(struct extent_map_tree *em_tree,
 }
 
 static noinline int uncompress_inline(struct btrfs_path *path,
-                                     struct inode *inode, struct page *page,
+                                     struct page *page,
                                      size_t pg_offset, u64 extent_offset,
                                      struct btrfs_file_extent_item *item)
 {
@@ -6785,7 +6762,7 @@ again:
                 * Chances are we'll be called again, so go ahead and do
                 * readahead
                 */
-               path->reada = 1;
+               path->reada = READA_FORWARD;
        }
 
        ret = btrfs_lookup_file_extent(trans, root, path,
@@ -6884,8 +6861,7 @@ next:
                if (create == 0 && !PageUptodate(page)) {
                        if (btrfs_file_extent_compression(leaf, item) !=
                            BTRFS_COMPRESS_NONE) {
-                               ret = uncompress_inline(path, inode, page,
-                                                       pg_offset,
+                               ret = uncompress_inline(path, page, pg_offset,
                                                        extent_offset, item);
                                if (ret) {
                                        err = ret;
@@ -7140,21 +7116,41 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
        if (ret)
                return ERR_PTR(ret);
 
-       em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
-                             ins.offset, ins.offset, ins.offset, 0);
-       if (IS_ERR(em)) {
-               btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
-               return em;
-       }
-
+       /*
+        * Create the ordered extent before the extent map. This is to avoid
+        * races with the fast fsync path that would lead to it logging file
+        * extent items that point to disk extents that were not yet written to.
+        * The fast fsync path collects ordered extents into a local list and
+        * then collects all the new extent maps, so we must create the ordered
+        * extent first and make sure the fast fsync path collects any new
+        * ordered extents after collecting new extent maps as well.
+        * The fsync path simply can not rely on inode_dio_wait() because it
+        * causes deadlock with AIO.
+        */
        ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
                                           ins.offset, ins.offset, 0);
        if (ret) {
                btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
-               free_extent_map(em);
                return ERR_PTR(ret);
        }
 
+       em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
+                             ins.offset, ins.offset, ins.offset, 0);
+       if (IS_ERR(em)) {
+               struct btrfs_ordered_extent *oe;
+
+               btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
+               oe = btrfs_lookup_ordered_extent(inode, start);
+               ASSERT(oe);
+               if (WARN_ON(!oe))
+                       return em;
+               set_bit(BTRFS_ORDERED_IOERR, &oe->flags);
+               set_bit(BTRFS_ORDERED_IO_DONE, &oe->flags);
+               btrfs_remove_ordered_extent(inode, oe);
+               /* Once for our lookup and once for the ordered extents tree. */
+               btrfs_put_ordered_extent(oe);
+               btrfs_put_ordered_extent(oe);
+       }
        return em;
 }
 
@@ -7381,7 +7377,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
 
        while (1) {
                lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
-                                0, cached_state);
+                                cached_state);
                /*
                 * We're concerned with the entire range that we're going to be
                 * doing DIO to, so we need to make sure theres no ordered
@@ -7409,25 +7405,21 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
                        btrfs_start_ordered_extent(inode, ordered, 1);
                        btrfs_put_ordered_extent(ordered);
                } else {
-                       /* Screw you mmap */
-                       ret = btrfs_fdatawrite_range(inode, lockstart, lockend);
-                       if (ret)
-                               break;
-                       ret = filemap_fdatawait_range(inode->i_mapping,
-                                                     lockstart,
-                                                     lockend);
-                       if (ret)
-                               break;
-
                        /*
-                        * If we found a page that couldn't be invalidated just
-                        * fall back to buffered.
+                        * We could trigger writeback for this range (and wait
+                        * for it to complete) and then invalidate the pages for
+                        * this range (through invalidate_inode_pages2_range()),
+                        * but that can lead us to a deadlock with a concurrent
+                        * call to readpages() (a buffered read or a defrag call
+                        * triggered a readahead) on a page lock due to an
+                        * ordered dio extent we created before but did not have
+                        * yet a corresponding bio submitted (whence it can not
+                        * complete), which makes readpages() wait for that
+                        * ordered extent to complete while holding a lock on
+                        * that page.
                         */
-                       ret = invalidate_inode_pages2_range(inode->i_mapping,
-                                       lockstart >> PAGE_CACHE_SHIFT,
-                                       lockend >> PAGE_CACHE_SHIFT);
-                       if (ret)
-                               break;
+                       ret = -ENOTBLK;
+                       break;
                }
 
                cond_resched();
@@ -7483,11 +7475,6 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
        return em;
 }
 
-struct btrfs_dio_data {
-       u64 outstanding_extents;
-       u64 reserve;
-};
-
 static void adjust_dio_outstanding_extents(struct inode *inode,
                                           struct btrfs_dio_data *dio_data,
                                           const u64 len)
@@ -7671,6 +7658,7 @@ unlock:
                btrfs_free_reserved_data_space(inode, start, len);
                WARN_ON(dio_data->reserve < len);
                dio_data->reserve -= len;
+               dio_data->unsubmitted_oe_range_end = start + len;
                current->journal_info = dio_data;
        }
 
@@ -7993,22 +7981,22 @@ static void btrfs_endio_direct_read(struct bio *bio)
        bio_put(bio);
 }
 
-static void btrfs_endio_direct_write(struct bio *bio)
+static void btrfs_endio_direct_write_update_ordered(struct inode *inode,
+                                                   const u64 offset,
+                                                   const u64 bytes,
+                                                   const int uptodate)
 {
-       struct btrfs_dio_private *dip = bio->bi_private;
-       struct inode *inode = dip->inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_ordered_extent *ordered = NULL;
-       u64 ordered_offset = dip->logical_offset;
-       u64 ordered_bytes = dip->bytes;
-       struct bio *dio_bio;
+       u64 ordered_offset = offset;
+       u64 ordered_bytes = bytes;
        int ret;
 
 again:
        ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
                                                   &ordered_offset,
                                                   ordered_bytes,
-                                                  !bio->bi_error);
+                                                  uptodate);
        if (!ret)
                goto out_test;
 
@@ -8021,13 +8009,22 @@ out_test:
         * our bio might span multiple ordered extents.  If we haven't
         * completed the accounting for the whole dio, go back and try again
         */
-       if (ordered_offset < dip->logical_offset + dip->bytes) {
-               ordered_bytes = dip->logical_offset + dip->bytes -
-                       ordered_offset;
+       if (ordered_offset < offset + bytes) {
+               ordered_bytes = offset + bytes - ordered_offset;
                ordered = NULL;
                goto again;
        }
-       dio_bio = dip->dio_bio;
+}
+
+static void btrfs_endio_direct_write(struct bio *bio)
+{
+       struct btrfs_dio_private *dip = bio->bi_private;
+       struct bio *dio_bio = dip->dio_bio;
+
+       btrfs_endio_direct_write_update_ordered(dip->inode,
+                                               dip->logical_offset,
+                                               dip->bytes,
+                                               !bio->bi_error);
 
        kfree(dip);
 
@@ -8335,6 +8332,21 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
                dip->subio_endio = btrfs_subio_endio_read;
        }
 
+       /*
+        * Reset the range for unsubmitted ordered extents (to a 0 length range)
+        * even if we fail to submit a bio, because in such case we do the
+        * corresponding error handling below and it must not be done a second
+        * time by btrfs_direct_IO().
+        */
+       if (write) {
+               struct btrfs_dio_data *dio_data = current->journal_info;
+
+               dio_data->unsubmitted_oe_range_end = dip->logical_offset +
+                       dip->bytes;
+               dio_data->unsubmitted_oe_range_start =
+                       dio_data->unsubmitted_oe_range_end;
+       }
+
        ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
        if (!ret)
                return;
@@ -8363,24 +8375,15 @@ free_ordered:
                dip = NULL;
                io_bio = NULL;
        } else {
-               if (write) {
-                       struct btrfs_ordered_extent *ordered;
-
-                       ordered = btrfs_lookup_ordered_extent(inode,
-                                                             file_offset);
-                       set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
-                       /*
-                        * Decrements our ref on the ordered extent and removes
-                        * the ordered extent from the inode's ordered tree,
-                        * doing all the proper resource cleanup such as for the
-                        * reserved space and waking up any waiters for this
-                        * ordered extent (through btrfs_remove_ordered_extent).
-                        */
-                       btrfs_finish_ordered_io(ordered);
-               } else {
+               if (write)
+                       btrfs_endio_direct_write_update_ordered(inode,
+                                               file_offset,
+                                               dio_bio->bi_iter.bi_size,
+                                               0);
+               else
                        unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
                              file_offset + dio_bio->bi_iter.bi_size - 1);
-               }
+
                dio_bio->bi_error = -EIO;
                /*
                 * Releases and cleans up our dio_bio, no need to bio_put()
@@ -8464,7 +8467,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                 * not unlock the i_mutex at this case.
                 */
                if (offset + count <= inode->i_size) {
-                       mutex_unlock(&inode->i_mutex);
+                       inode_unlock(inode);
                        relock = true;
                }
                ret = btrfs_delalloc_reserve_space(inode, offset, count);
@@ -8480,6 +8483,8 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                 * originally calculated.  Abuse current->journal_info for this.
                 */
                dio_data.reserve = round_up(count, root->sectorsize);
+               dio_data.unsubmitted_oe_range_start = (u64)offset;
+               dio_data.unsubmitted_oe_range_end = (u64)offset;
                current->journal_info = &dio_data;
        } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
                                     &BTRFS_I(inode)->runtime_flags)) {
@@ -8498,6 +8503,19 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                        if (dio_data.reserve)
                                btrfs_delalloc_release_space(inode, offset,
                                                             dio_data.reserve);
+                       /*
+                        * On error we might have left some ordered extents
+                        * without submitting corresponding bios for them, so
+                        * cleanup them up to avoid other tasks getting them
+                        * and waiting for them to complete forever.
+                        */
+                       if (dio_data.unsubmitted_oe_range_start <
+                           dio_data.unsubmitted_oe_range_end)
+                               btrfs_endio_direct_write_update_ordered(inode,
+                                       dio_data.unsubmitted_oe_range_start,
+                                       dio_data.unsubmitted_oe_range_end -
+                                       dio_data.unsubmitted_oe_range_start,
+                                       0);
                } else if (ret >= 0 && (size_t)ret < count)
                        btrfs_delalloc_release_space(inode, offset,
                                                     count - (size_t)ret);
@@ -8506,7 +8524,7 @@ out:
        if (wakeup)
                inode_dio_end(inode);
        if (relock)
-               mutex_lock(&inode->i_mutex);
+               inode_lock(inode);
 
        return ret;
 }
@@ -8535,15 +8553,28 @@ int btrfs_readpage(struct file *file, struct page *page)
 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
 {
        struct extent_io_tree *tree;
-
+       struct inode *inode = page->mapping->host;
+       int ret;
 
        if (current->flags & PF_MEMALLOC) {
                redirty_page_for_writepage(wbc, page);
                unlock_page(page);
                return 0;
        }
+
+       /*
+        * If we are under memory pressure we will call this directly from the
+        * VM, we need to make sure we have the inode referenced for the ordered
+        * extent.  If not just return like we didn't do anything.
+        */
+       if (!igrab(inode)) {
+               redirty_page_for_writepage(wbc, page);
+               return AOP_WRITEPAGE_ACTIVATE;
+       }
        tree = &BTRFS_I(page->mapping->host)->io_tree;
-       return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
+       ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
+       btrfs_add_delayed_iput(inode);
+       return ret;
 }
 
 static int btrfs_writepages(struct address_space *mapping,
@@ -8615,7 +8646,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
        }
 
        if (!inode_evicting)
-               lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
+               lock_extent_bits(tree, page_start, page_end, &cached_state);
        ordered = btrfs_lookup_ordered_extent(inode, page_start);
        if (ordered) {
                /*
@@ -8653,7 +8684,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
                btrfs_put_ordered_extent(ordered);
                if (!inode_evicting) {
                        cached_state = NULL;
-                       lock_extent_bits(tree, page_start, page_end, 0,
+                       lock_extent_bits(tree, page_start, page_end,
                                         &cached_state);
                }
        }
@@ -8751,7 +8782,7 @@ again:
        }
        wait_on_page_writeback(page);
 
-       lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
+       lock_extent_bits(io_tree, page_start, page_end, &cached_state);
        set_page_extent_mapped(page);
 
        /*
@@ -9025,6 +9056,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        ei->dir_index = 0;
        ei->last_unlink_trans = 0;
        ei->last_log_commit = 0;
+       ei->delayed_iput_count = 0;
 
        spin_lock_init(&ei->lock);
        ei->outstanding_extents = 0;
@@ -9049,6 +9081,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        mutex_init(&ei->delalloc_mutex);
        btrfs_ordered_inode_tree_init(&ei->ordered_tree);
        INIT_LIST_HEAD(&ei->delalloc_inodes);
+       INIT_LIST_HEAD(&ei->delayed_iput);
        RB_CLEAR_NODE(&ei->rb_node);
 
        return inode;
@@ -9153,15 +9186,14 @@ void btrfs_destroy_cachep(void)
                kmem_cache_destroy(btrfs_path_cachep);
        if (btrfs_free_space_cachep)
                kmem_cache_destroy(btrfs_free_space_cachep);
-       if (btrfs_delalloc_work_cachep)
-               kmem_cache_destroy(btrfs_delalloc_work_cachep);
 }
 
 int btrfs_init_cachep(void)
 {
        btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
                        sizeof(struct btrfs_inode), 0,
-                       SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
+                       SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
+                       init_once);
        if (!btrfs_inode_cachep)
                goto fail;
 
@@ -9189,13 +9221,6 @@ int btrfs_init_cachep(void)
        if (!btrfs_free_space_cachep)
                goto fail;
 
-       btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
-                       sizeof(struct btrfs_delalloc_work), 0,
-                       SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
-                       NULL);
-       if (!btrfs_delalloc_work_cachep)
-               goto fail;
-
        return 0;
 fail:
        btrfs_destroy_cachep();
@@ -9419,14 +9444,10 @@ static void btrfs_run_delalloc_work(struct btrfs_work *work)
        delalloc_work = container_of(work, struct btrfs_delalloc_work,
                                     work);
        inode = delalloc_work->inode;
-       if (delalloc_work->wait) {
-               btrfs_wait_ordered_range(inode, 0, (u64)-1);
-       } else {
+       filemap_flush(inode->i_mapping);
+       if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+                               &BTRFS_I(inode)->runtime_flags))
                filemap_flush(inode->i_mapping);
-               if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
-                            &BTRFS_I(inode)->runtime_flags))
-                       filemap_flush(inode->i_mapping);
-       }
 
        if (delalloc_work->delay_iput)
                btrfs_add_delayed_iput(inode);
@@ -9436,18 +9457,17 @@ static void btrfs_run_delalloc_work(struct btrfs_work *work)
 }
 
 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
-                                                   int wait, int delay_iput)
+                                                   int delay_iput)
 {
        struct btrfs_delalloc_work *work;
 
-       work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
+       work = kmalloc(sizeof(*work), GFP_NOFS);
        if (!work)
                return NULL;
 
        init_completion(&work->completion);
        INIT_LIST_HEAD(&work->list);
        work->inode = inode;
-       work->wait = wait;
        work->delay_iput = delay_iput;
        WARN_ON_ONCE(!inode);
        btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
@@ -9459,7 +9479,7 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
 {
        wait_for_completion(&work->completion);
-       kmem_cache_free(btrfs_delalloc_work_cachep, work);
+       kfree(work);
 }
 
 /*
@@ -9495,7 +9515,7 @@ static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
                }
                spin_unlock(&root->delalloc_lock);
 
-               work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
+               work = btrfs_alloc_delalloc_work(inode, delay_iput);
                if (!work) {
                        if (delay_iput)
                                btrfs_add_delayed_iput(inode);
@@ -9637,9 +9657,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        /*
         * 2 items for inode item and ref
         * 2 items for dir items
+        * 1 item for updating parent inode item
+        * 1 item for the inline extent item
         * 1 item for xattr if selinux is on
         */
-       trans = btrfs_start_transaction(root, 5);
+       trans = btrfs_start_transaction(root, 7);
        if (IS_ERR(trans))
                return PTR_ERR(trans);
 
@@ -9670,10 +9692,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        if (err)
                goto out_unlock_inode;
 
-       err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
-       if (err)
-               goto out_unlock_inode;
-
        path = btrfs_alloc_path();
        if (!path) {
                err = -ENOMEM;
@@ -9711,6 +9729,13 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        inode_set_bytes(inode, name_len);
        btrfs_i_size_write(inode, name_len);
        err = btrfs_update_inode(trans, root, inode);
+       /*
+        * Last step, add directory indexes for our symlink inode. This is the
+        * last step to avoid extra cleanup of these indexes if an error happens
+        * elsewhere above.
+        */
+       if (!err)
+               err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err) {
                drop_inode = 1;
                goto out_unlock_inode;
@@ -9761,7 +9786,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
                        }
                }
 
-               cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
+               cur_bytes = min_t(u64, num_bytes, SZ_256M);
                cur_bytes = max(cur_bytes, min_size);
                /*
                 * If we are severely fragmented we could end up with really
@@ -10025,7 +10050,7 @@ static const struct file_operations btrfs_dir_file_operations = {
        .fsync          = btrfs_sync_file,
 };
 
-static struct extent_io_ops btrfs_extent_io_ops = {
+static const struct extent_io_ops btrfs_extent_io_ops = {
        .fill_delalloc = run_delalloc_range,
        .submit_bio_hook = btrfs_submit_bio_hook,
        .merge_bio_hook = btrfs_merge_bio_hook,