f2fs: avoid reverse IO order for NODE and DATA
[linux-block.git] / fs / f2fs / segment.c
index 540669d6978e69f7b1bcf27f479ef961e1eabde0..7b58bfbd84a340ea00c9bf47a1150cbf97477222 100644 (file)
@@ -223,9 +223,11 @@ static int __revoke_inmem_pages(struct inode *inode,
                        f2fs_put_dnode(&dn);
                }
 next:
-               ClearPageUptodate(page);
+               /* we don't need to invalidate this in the sccessful status */
+               if (drop || recover)
+                       ClearPageUptodate(page);
                set_page_private(page, 0);
-               ClearPageUptodate(page);
+               ClearPagePrivate(page);
                f2fs_put_page(page, 1);
 
                list_del(&cur->list);
@@ -239,6 +241,8 @@ void drop_inmem_pages(struct inode *inode)
 {
        struct f2fs_inode_info *fi = F2FS_I(inode);
 
+       clear_inode_flag(inode, FI_ATOMIC_FILE);
+
        mutex_lock(&fi->inmem_lock);
        __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
        mutex_unlock(&fi->inmem_lock);
@@ -341,6 +345,11 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
 {
        if (!need)
                return;
+
+       /* balance_fs_bg is able to be pending */
+       if (excess_cached_nats(sbi))
+               f2fs_balance_fs_bg(sbi);
+
        /*
         * We should do GC or end up with checkpoint, if there are so many dirty
         * dir/node pages without enough free segments.
@@ -429,24 +438,28 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
        if (test_opt(sbi, NOBARRIER))
                return 0;
 
-       if (!test_opt(sbi, FLUSH_MERGE)) {
+       if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
                struct bio *bio = f2fs_bio_alloc(0);
                int ret;
 
+               atomic_inc(&fcc->submit_flush);
                bio->bi_bdev = sbi->sb->s_bdev;
                ret = submit_bio_wait(WRITE_FLUSH, bio);
+               atomic_dec(&fcc->submit_flush);
                bio_put(bio);
                return ret;
        }
 
        init_completion(&cmd.wait);
 
+       atomic_inc(&fcc->submit_flush);
        llist_add(&cmd.llnode, &fcc->issue_list);
 
        if (!fcc->dispatch_list)
                wake_up(&fcc->flush_wait_queue);
 
        wait_for_completion(&cmd.wait);
+       atomic_dec(&fcc->submit_flush);
 
        return cmd.ret;
 }
@@ -460,6 +473,7 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
        fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
        if (!fcc)
                return -ENOMEM;
+       atomic_set(&fcc->submit_flush, 0);
        init_waitqueue_head(&fcc->flush_wait_queue);
        init_llist_head(&fcc->issue_list);
        SM_I(sbi)->cmd_control_info = fcc;
@@ -1385,11 +1399,17 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
 {
        int type = __get_segment_type(fio->page, fio->type);
 
+       if (fio->type == NODE || fio->type == DATA)
+               mutex_lock(&fio->sbi->wio_mutex[fio->type]);
+
        allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
                                        &fio->new_blkaddr, sum, type);
 
        /* writeout dirty page into bdev */
        f2fs_submit_page_mbio(fio);
+
+       if (fio->type == NODE || fio->type == DATA)
+               mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
 }
 
 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)