Merge tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyper...
[linux-block.git] / fs / zonefs / super.c
index 3ce9829a6936da8967062b5bf6ca02f62b748c9a..d79b821ed1c780e2f2bee74b6a2cab73dddae2fb 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/mman.h>
 #include <linux/sched/mm.h>
 #include <linux/crc32.h>
+#include <linux/task_io_accounting_ops.h>
 
 #include "zonefs.h"
 
@@ -78,10 +79,9 @@ static int zonefs_readpage(struct file *unused, struct page *page)
        return iomap_readpage(page, &zonefs_iomap_ops);
 }
 
-static int zonefs_readpages(struct file *unused, struct address_space *mapping,
-                           struct list_head *pages, unsigned int nr_pages)
+static void zonefs_readahead(struct readahead_control *rac)
 {
-       return iomap_readpages(mapping, pages, nr_pages, &zonefs_iomap_ops);
+       iomap_readahead(rac, &zonefs_iomap_ops);
 }
 
 /*
@@ -128,7 +128,7 @@ static int zonefs_writepages(struct address_space *mapping,
 
 static const struct address_space_operations zonefs_file_aops = {
        .readpage               = zonefs_readpage,
-       .readpages              = zonefs_readpages,
+       .readahead              = zonefs_readahead,
        .writepage              = zonefs_writepage,
        .writepages             = zonefs_writepages,
        .set_page_dirty         = iomap_set_page_dirty,
@@ -478,7 +478,7 @@ static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
        if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
                ret = file_write_and_wait_range(file, start, end);
        if (!ret)
-               ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+               ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
 
        if (ret)
                zonefs_io_error(inode, true);
@@ -596,6 +596,61 @@ static const struct iomap_dio_ops zonefs_write_dio_ops = {
        .end_io                 = zonefs_file_write_dio_end_io,
 };
 
+static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
+{
+       struct inode *inode = file_inode(iocb->ki_filp);
+       struct zonefs_inode_info *zi = ZONEFS_I(inode);
+       struct block_device *bdev = inode->i_sb->s_bdev;
+       unsigned int max;
+       struct bio *bio;
+       ssize_t size;
+       int nr_pages;
+       ssize_t ret;
+
+       nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
+       if (!nr_pages)
+               return 0;
+
+       max = queue_max_zone_append_sectors(bdev_get_queue(bdev));
+       max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
+       iov_iter_truncate(from, max);
+
+       bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set);
+       if (!bio)
+               return -ENOMEM;
+
+       bio_set_dev(bio, bdev);
+       bio->bi_iter.bi_sector = zi->i_zsector;
+       bio->bi_write_hint = iocb->ki_hint;
+       bio->bi_ioprio = iocb->ki_ioprio;
+       bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
+       if (iocb->ki_flags & IOCB_DSYNC)
+               bio->bi_opf |= REQ_FUA;
+
+       ret = bio_iov_iter_get_pages(bio, from);
+       if (unlikely(ret)) {
+               bio_io_error(bio);
+               return ret;
+       }
+       size = bio->bi_iter.bi_size;
+       task_io_account_write(ret);
+
+       if (iocb->ki_flags & IOCB_HIPRI)
+               bio_set_polled(bio, iocb);
+
+       ret = submit_bio_wait(bio);
+
+       bio_put(bio);
+
+       zonefs_file_write_dio_end_io(iocb, size, ret, 0);
+       if (ret >= 0) {
+               iocb->ki_pos += size;
+               return size;
+       }
+
+       return ret;
+}
+
 /*
  * Handle direct writes. For sequential zone files, this is the only possible
  * write path. For these files, check that the user is issuing writes
@@ -611,6 +666,8 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
        struct inode *inode = file_inode(iocb->ki_filp);
        struct zonefs_inode_info *zi = ZONEFS_I(inode);
        struct super_block *sb = inode->i_sb;
+       bool sync = is_sync_kiocb(iocb);
+       bool append = false;
        size_t count;
        ssize_t ret;
 
@@ -619,7 +676,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
         * as this can cause write reordering (e.g. the first aio gets EAGAIN
         * on the inode lock but the second goes through but is now unaligned).
         */
-       if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !is_sync_kiocb(iocb) &&
+       if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync &&
            (iocb->ki_flags & IOCB_NOWAIT))
                return -EOPNOTSUPP;
 
@@ -643,16 +700,22 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
        }
 
        /* Enforce sequential writes (append only) in sequential zones */
-       mutex_lock(&zi->i_truncate_mutex);
-       if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && iocb->ki_pos != zi->i_wpoffset) {
+       if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) {
+               mutex_lock(&zi->i_truncate_mutex);
+               if (iocb->ki_pos != zi->i_wpoffset) {
+                       mutex_unlock(&zi->i_truncate_mutex);
+                       ret = -EINVAL;
+                       goto inode_unlock;
+               }
                mutex_unlock(&zi->i_truncate_mutex);
-               ret = -EINVAL;
-               goto inode_unlock;
+               append = sync;
        }
-       mutex_unlock(&zi->i_truncate_mutex);
 
-       ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
-                          &zonefs_write_dio_ops, is_sync_kiocb(iocb));
+       if (append)
+               ret = zonefs_file_dio_append(iocb, from);
+       else
+               ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
+                                  &zonefs_write_dio_ops, sync);
        if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
            (ret > 0 || ret == -EIOCBQUEUED)) {
                if (ret > 0)