Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 17 Apr 2015 03:27:56 +0000 (23:27 -0400)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 17 Apr 2015 03:27:56 +0000 (23:27 -0400)
Pull third hunk of vfs changes from Al Viro:
 "This contains the ->direct_IO() changes from Omar + saner
  generic_write_checks() + dealing with fcntl()/{read,write}() races
  (mirroring O_APPEND/O_DIRECT into iocb->ki_flags and instead of
  repeatedly looking at ->f_flags, which can be changed by fcntl(2),
  check ->ki_flags - which cannot) + infrastructure bits for dhowells'
  d_inode annotations + Christophs switch of /dev/loop to
  vfs_iter_write()"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (30 commits)
  block: loop: switch to VFS ITER_BVEC
  configfs: Fix inconsistent use of file_inode() vs file->f_path.dentry->d_inode
  VFS: Make pathwalk use d_is_reg() rather than S_ISREG()
  VFS: Fix up debugfs to use d_is_dir() in place of S_ISDIR()
  VFS: Combine inode checks with d_is_negative() and d_is_positive() in pathwalk
  NFS: Don't use d_inode as a variable name
  VFS: Impose ordering on accesses of d_inode and d_flags
  VFS: Add owner-filesystem positive/negative dentry checks
  nfs: generic_write_checks() shouldn't be done on swapout...
  ocfs2: use __generic_file_write_iter()
  mirror O_APPEND and O_DIRECT into iocb->ki_flags
  switch generic_write_checks() to iocb and iter
  ocfs2: move generic_write_checks() before the alignment checks
  ocfs2_file_write_iter: stop messing with ppos
  udf_file_write_iter: reorder and simplify
  fuse: ->direct_IO() doesn't need generic_write_checks()
  ext4_file_write_iter: move generic_write_checks() up
  xfs_file_aio_write_checks: switch to iocb/iov_iter
  generic_write_checks(): drop isblk argument
  blkdev_write_iter: expand generic_file_checks() call in there
  ...

55 files changed:
Documentation/filesystems/Locking
Documentation/filesystems/vfs.txt
drivers/block/loop.c
drivers/staging/lustre/lustre/llite/rw26.c
fs/9p/vfs_addr.c
fs/9p/vfs_file.c
fs/affs/file.c
fs/aio.c
fs/block_dev.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/ceph/addr.c
fs/ceph/file.c
fs/cifs/file.c
fs/configfs/dir.c
fs/dax.c
fs/dcache.c
fs/debugfs/inode.c
fs/direct-io.c
fs/exofs/inode.c
fs/ext2/inode.c
fs/ext3/inode.c
fs/ext4/ext4.h
fs/ext4/file.c
fs/ext4/indirect.c
fs/ext4/inode.c
fs/f2fs/data.c
fs/fat/inode.c
fs/fuse/file.c
fs/gfs2/aops.c
fs/gfs2/file.c
fs/hfs/inode.c
fs/hfsplus/inode.c
fs/jfs/inode.c
fs/namei.c
fs/ncpfs/file.c
fs/nfs/direct.c
fs/nfs/file.c
fs/nfs/read.c
fs/nilfs2/inode.c
fs/ntfs/file.c
fs/ocfs2/aops.c
fs/ocfs2/file.c
fs/read_write.c
fs/reiserfs/inode.c
fs/udf/file.c
fs/udf/inode.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_file.c
include/linux/dcache.h
include/linux/fs.h
include/linux/nfs_fs.h
include/linux/uio.h
mm/filemap.c
mm/page_io.c

index 7c3f187d48bf3cebab238d99ab40d30ff29a9bdc..0a926e2ba3ab68ffd541f5a619cd673e59826821 100644 (file)
@@ -196,7 +196,7 @@ prototypes:
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, int);
        void (*freepage)(struct page *);
-       int (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
+       int (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
        int (*migratepage)(struct address_space *, struct page *, struct page *);
        int (*launder_page)(struct page *);
        int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
index 207cdca68bedf118554bd0addf5517546c190e17..5d833b32bbcd1046de40a15fee169ed462d274fc 100644 (file)
@@ -590,7 +590,7 @@ struct address_space_operations {
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, int);
        void (*freepage)(struct page *);
-       ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
+       ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
        /* migrate the contents of a page to the specified target */
        int (*migratepage) (struct page *, struct page *);
        int (*launder_page) (struct page *);
index c4fd1e45ce1e82a8f303aacda953539dba99b3be..ae3fcb4199e9b7d85d2475d40ab4f209258a1cc5 100644 (file)
@@ -88,28 +88,6 @@ static int part_shift;
 
 static struct workqueue_struct *loop_wq;
 
-/*
- * Transfer functions
- */
-static int transfer_none(struct loop_device *lo, int cmd,
-                        struct page *raw_page, unsigned raw_off,
-                        struct page *loop_page, unsigned loop_off,
-                        int size, sector_t real_block)
-{
-       char *raw_buf = kmap_atomic(raw_page) + raw_off;
-       char *loop_buf = kmap_atomic(loop_page) + loop_off;
-
-       if (cmd == READ)
-               memcpy(loop_buf, raw_buf, size);
-       else
-               memcpy(raw_buf, loop_buf, size);
-
-       kunmap_atomic(loop_buf);
-       kunmap_atomic(raw_buf);
-       cond_resched();
-       return 0;
-}
-
 static int transfer_xor(struct loop_device *lo, int cmd,
                        struct page *raw_page, unsigned raw_off,
                        struct page *loop_page, unsigned loop_off,
@@ -148,14 +126,13 @@ static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
 
 static struct loop_func_table none_funcs = {
        .number = LO_CRYPT_NONE,
-       .transfer = transfer_none,
-};     
+}; 
 
 static struct loop_func_table xor_funcs = {
        .number = LO_CRYPT_XOR,
        .transfer = transfer_xor,
        .init = xor_init
-};     
+}; 
 
 /* xfer_funcs[0] is special - its release function is never called */
 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
@@ -215,207 +192,169 @@ lo_do_transfer(struct loop_device *lo, int cmd,
               struct page *lpage, unsigned loffs,
               int size, sector_t rblock)
 {
-       if (unlikely(!lo->transfer))
+       int ret;
+
+       ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
+       if (likely(!ret))
                return 0;
 
-       return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
+       printk_ratelimited(KERN_ERR
+               "loop: Transfer error at byte offset %llu, length %i.\n",
+               (unsigned long long)rblock << 9, size);
+       return ret;
 }
 
-/**
- * __do_lo_send_write - helper for writing data to a loop device
- *
- * This helper just factors out common code between do_lo_send_direct_write()
- * and do_lo_send_write().
- */
-static int __do_lo_send_write(struct file *file,
-               u8 *buf, const int len, loff_t pos)
+static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
 {
-       struct kvec kvec = {.iov_base = buf, .iov_len = len};
-       struct iov_iter from;
+       struct iov_iter i;
        ssize_t bw;
 
-       iov_iter_kvec(&from, ITER_KVEC | WRITE, &kvec, 1, len);
+       iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
 
        file_start_write(file);
-       bw = vfs_iter_write(file, &from, &pos);
+       bw = vfs_iter_write(file, &i, ppos);
        file_end_write(file);
-       if (likely(bw == len))
+
+       if (likely(bw ==  bvec->bv_len))
                return 0;
-       printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
-                       (unsigned long long)pos, len);
+
+       printk_ratelimited(KERN_ERR
+               "loop: Write error at byte offset %llu, length %i.\n",
+               (unsigned long long)*ppos, bvec->bv_len);
        if (bw >= 0)
                bw = -EIO;
        return bw;
 }
 
-/**
- * do_lo_send_direct_write - helper for writing data to a loop device
- *
- * This is the fast, non-transforming version that does not need double
- * buffering.
- */
-static int do_lo_send_direct_write(struct loop_device *lo,
-               struct bio_vec *bvec, loff_t pos, struct page *page)
+static int lo_write_simple(struct loop_device *lo, struct request *rq,
+               loff_t pos)
 {
-       ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
-                       kmap(bvec->bv_page) + bvec->bv_offset,
-                       bvec->bv_len, pos);
-       kunmap(bvec->bv_page);
-       cond_resched();
-       return bw;
+       struct bio_vec bvec;
+       struct req_iterator iter;
+       int ret = 0;
+
+       rq_for_each_segment(bvec, rq, iter) {
+               ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
+               if (ret < 0)
+                       break;
+               cond_resched();
+       }
+
+       return ret;
 }
 
-/**
- * do_lo_send_write - helper for writing data to a loop device
- *
+/*
  * This is the slow, transforming version that needs to double buffer the
  * data as it cannot do the transformations in place without having direct
  * access to the destination pages of the backing file.
  */
-static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
-               loff_t pos, struct page *page)
+static int lo_write_transfer(struct loop_device *lo, struct request *rq,
+               loff_t pos)
 {
-       int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
-                       bvec->bv_offset, bvec->bv_len, pos >> 9);
-       if (likely(!ret))
-               return __do_lo_send_write(lo->lo_backing_file,
-                               page_address(page), bvec->bv_len,
-                               pos);
-       printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, "
-                       "length %i.\n", (unsigned long long)pos, bvec->bv_len);
-       if (ret > 0)
-               ret = -EIO;
-       return ret;
-}
-
-static int lo_send(struct loop_device *lo, struct request *rq, loff_t pos)
-{
-       int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
-                       struct page *page);
-       struct bio_vec bvec;
+       struct bio_vec bvec, b;
        struct req_iterator iter;
-       struct page *page = NULL;
+       struct page *page;
        int ret = 0;
 
-       if (lo->transfer != transfer_none) {
-               page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
-               if (unlikely(!page))
-                       goto fail;
-               kmap(page);
-               do_lo_send = do_lo_send_write;
-       } else {
-               do_lo_send = do_lo_send_direct_write;
-       }
+       page = alloc_page(GFP_NOIO);
+       if (unlikely(!page))
+               return -ENOMEM;
 
        rq_for_each_segment(bvec, rq, iter) {
-               ret = do_lo_send(lo, &bvec, pos, page);
+               ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
+                       bvec.bv_offset, bvec.bv_len, pos >> 9);
+               if (unlikely(ret))
+                       break;
+
+               b.bv_page = page;
+               b.bv_offset = 0;
+               b.bv_len = bvec.bv_len;
+               ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
                if (ret < 0)
                        break;
-               pos += bvec.bv_len;
        }
-       if (page) {
-               kunmap(page);
-               __free_page(page);
-       }
-out:
+
+       __free_page(page);
        return ret;
-fail:
-       printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
-       ret = -ENOMEM;
-       goto out;
 }
 
-struct lo_read_data {
-       struct loop_device *lo;
-       struct page *page;
-       unsigned offset;
-       int bsize;
-};
+static int lo_read_simple(struct loop_device *lo, struct request *rq,
+               loff_t pos)
+{
+       struct bio_vec bvec;
+       struct req_iterator iter;
+       struct iov_iter i;
+       ssize_t len;
 
-static int
-lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
-               struct splice_desc *sd)
-{
-       struct lo_read_data *p = sd->u.data;
-       struct loop_device *lo = p->lo;
-       struct page *page = buf->page;
-       sector_t IV;
-       int size;
-
-       IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
-                                                       (buf->offset >> 9);
-       size = sd->len;
-       if (size > p->bsize)
-               size = p->bsize;
-
-       if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
-               printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n",
-                      page->index);
-               size = -EINVAL;
-       }
+       rq_for_each_segment(bvec, rq, iter) {
+               iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len);
+               len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
+               if (len < 0)
+                       return len;
 
-       flush_dcache_page(p->page);
+               flush_dcache_page(bvec.bv_page);
 
-       if (size > 0)
-               p->offset += size;
+               if (len != bvec.bv_len) {
+                       struct bio *bio;
 
-       return size;
-}
+                       __rq_for_each_bio(bio, rq)
+                               zero_fill_bio(bio);
+                       break;
+               }
+               cond_resched();
+       }
 
-static int
-lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
-{
-       return __splice_from_pipe(pipe, sd, lo_splice_actor);
+       return 0;
 }
 
-static ssize_t
-do_lo_receive(struct loop_device *lo,
-             struct bio_vec *bvec, int bsize, loff_t pos)
+static int lo_read_transfer(struct loop_device *lo, struct request *rq,
+               loff_t pos)
 {
-       struct lo_read_data cookie;
-       struct splice_desc sd;
-       struct file *file;
-       ssize_t retval;
+       struct bio_vec bvec, b;
+       struct req_iterator iter;
+       struct iov_iter i;
+       struct page *page;
+       ssize_t len;
+       int ret = 0;
 
-       cookie.lo = lo;
-       cookie.page = bvec->bv_page;
-       cookie.offset = bvec->bv_offset;
-       cookie.bsize = bsize;
+       page = alloc_page(GFP_NOIO);
+       if (unlikely(!page))
+               return -ENOMEM;
 
-       sd.len = 0;
-       sd.total_len = bvec->bv_len;
-       sd.flags = 0;
-       sd.pos = pos;
-       sd.u.data = &cookie;
+       rq_for_each_segment(bvec, rq, iter) {
+               loff_t offset = pos;
 
-       file = lo->lo_backing_file;
-       retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
+               b.bv_page = page;
+               b.bv_offset = 0;
+               b.bv_len = bvec.bv_len;
 
-       return retval;
-}
+               iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len);
+               len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
+               if (len < 0) {
+                       ret = len;
+                       goto out_free_page;
+               }
 
-static int
-lo_receive(struct loop_device *lo, struct request *rq, int bsize, loff_t pos)
-{
-       struct bio_vec bvec;
-       struct req_iterator iter;
-       ssize_t s;
+               ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
+                       bvec.bv_offset, len, offset >> 9);
+               if (ret)
+                       goto out_free_page;
 
-       rq_for_each_segment(bvec, rq, iter) {
-               s = do_lo_receive(lo, &bvec, bsize, pos);
-               if (s < 0)
-                       return s;
+               flush_dcache_page(bvec.bv_page);
 
-               if (s != bvec.bv_len) {
+               if (len != bvec.bv_len) {
                        struct bio *bio;
 
                        __rq_for_each_bio(bio, rq)
                                zero_fill_bio(bio);
                        break;
                }
-               pos += bvec.bv_len;
        }
-       return 0;
+
+       ret = 0;
+out_free_page:
+       __free_page(page);
+       return ret;
 }
 
 static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
@@ -464,10 +403,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
                        ret = lo_req_flush(lo, rq);
                else if (rq->cmd_flags & REQ_DISCARD)
                        ret = lo_discard(lo, rq, pos);
+               else if (lo->transfer)
+                       ret = lo_write_transfer(lo, rq, pos);
                else
-                       ret = lo_send(lo, rq, pos);
-       } else
-               ret = lo_receive(lo, rq, lo->lo_blocksize, pos);
+                       ret = lo_write_simple(lo, rq, pos);
+
+       } else {
+               if (lo->transfer)
+                       ret = lo_read_transfer(lo, rq, pos);
+               else
+                       ret = lo_read_simple(lo, rq, pos);
+       }
 
        return ret;
 }
@@ -788,7 +734,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
        lo->lo_device = bdev;
        lo->lo_flags = lo_flags;
        lo->lo_backing_file = file;
-       lo->transfer = transfer_none;
+       lo->transfer = NULL;
        lo->ioctl = NULL;
        lo->lo_sizelimit = 0;
        lo->old_gfp_mask = mapping_gfp_mask(mapping);
@@ -1007,7 +953,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
                memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
                       info->lo_encrypt_key_size);
                lo->lo_key_owner = uid;
-       }       
+       }
 
        return 0;
 }
index 91442fab57255df1f4ebd41fa0bfaaf83b24dca2..c6c824356464c72202e5b8b0820ea3e90c708e5f 100644 (file)
@@ -359,8 +359,8 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
  * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
 #define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
                      ~(DT_MAX_BRW_SIZE - 1))
-static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
-                              struct iov_iter *iter, loff_t file_offset)
+static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
+                              loff_t file_offset)
 {
        struct lu_env *env;
        struct cl_io *io;
@@ -399,7 +399,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
         *    size changing by concurrent truncates and writes.
         * 1. Need inode mutex to operate transient pages.
         */
-       if (rw == READ)
+       if (iov_iter_rw(iter) == READ)
                mutex_lock(&inode->i_mutex);
 
        LASSERT(obj->cob_transient_pages == 0);
@@ -408,7 +408,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
                size_t offs;
 
                count = min_t(size_t, iov_iter_count(iter), size);
-               if (rw == READ) {
+               if (iov_iter_rw(iter) == READ) {
                        if (file_offset >= i_size_read(inode))
                                break;
                        if (file_offset + count > i_size_read(inode))
@@ -418,11 +418,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
                result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
                if (likely(result > 0)) {
                        int n = DIV_ROUND_UP(result + offs, PAGE_SIZE);
-                       result = ll_direct_IO_26_seg(env, io, rw, inode,
-                                                    file->f_mapping,
-                                                    result, file_offset,
-                                                    pages, n);
-                       ll_free_user_pages(pages, n, rw==READ);
+                       result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter),
+                                                    inode, file->f_mapping,
+                                                    result, file_offset, pages,
+                                                    n);
+                       ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ);
                }
                if (unlikely(result <= 0)) {
                        /* If we can't allocate a large enough buffer
@@ -449,11 +449,11 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
        }
 out:
        LASSERT(obj->cob_transient_pages == 0);
-       if (rw == READ)
+       if (iov_iter_rw(iter) == READ)
                mutex_unlock(&inode->i_mutex);
 
        if (tot_bytes > 0) {
-               if (rw == WRITE) {
+               if (iov_iter_rw(iter) == WRITE) {
                        struct lov_stripe_md *lsm;
 
                        lsm = ccc_inode_lsm_get(inode);
index 2e38f9a5b472874ea7e3991b03e7ad33fdc88aa7..be35d05a4d0efc00c5955cc047ac20bea293149a 100644 (file)
@@ -230,7 +230,6 @@ static int v9fs_launder_page(struct page *page)
 
 /**
  * v9fs_direct_IO - 9P address space operation for direct I/O
- * @rw: direction (read or write)
  * @iocb: target I/O control block
  * @iov: array of vectors that define I/O buffer
  * @pos: offset in file to begin the operation
@@ -248,12 +247,12 @@ static int v9fs_launder_page(struct page *page)
  *
  */
 static ssize_t
-v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
+v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        ssize_t n;
        int err = 0;
-       if (rw & WRITE) {
+       if (iov_iter_rw(iter) == WRITE) {
                n = p9_client_write(file->private_data, pos, iter, &err);
                if (n) {
                        struct inode *inode = file_inode(file);
index d7fcb775311e5a1697a2d8866a5a9f2bb25eb99a..2a9dd37dc426d6cf38eeac64727217164c903ae6 100644 (file)
@@ -404,21 +404,16 @@ static ssize_t
 v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
-       ssize_t retval = 0;
-       loff_t origin = iocb->ki_pos;
-       size_t count = iov_iter_count(from);
+       ssize_t retval;
+       loff_t origin;
        int err = 0;
 
-       retval = generic_write_checks(file, &origin, &count, 0);
-       if (retval)
+       retval = generic_write_checks(iocb, from);
+       if (retval <= 0)
                return retval;
 
-       iov_iter_truncate(from, count);
-
-       if (!count)
-               return 0;
-
-       retval = p9_client_write(file->private_data, origin, from, &err);
+       origin = iocb->ki_pos;
+       retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
        if (retval > 0) {
                struct inode *inode = file_inode(file);
                loff_t i_size;
@@ -428,12 +423,11 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                if (inode->i_mapping && inode->i_mapping->nrpages)
                        invalidate_inode_pages2_range(inode->i_mapping,
                                                      pg_start, pg_end);
-               origin += retval;
+               iocb->ki_pos += retval;
                i_size = i_size_read(inode);
-               iocb->ki_pos = origin;
-               if (origin > i_size) {
-                       inode_add_bytes(inode, origin - i_size);
-                       i_size_write(inode, origin);
+               if (iocb->ki_pos > i_size) {
+                       inode_add_bytes(inode, iocb->ki_pos - i_size);
+                       i_size_write(inode, iocb->ki_pos);
                }
                return retval;
        }
index 7c1a3d4c19c23cd0fd2e9452d813b1014f0233fd..dcf27951781cfadc90711c0aa41f83401b2eb03b 100644 (file)
@@ -389,8 +389,7 @@ static void affs_write_failed(struct address_space *mapping, loff_t to)
 }
 
 static ssize_t
-affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
-              loff_t offset)
+affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -398,15 +397,15 @@ affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       if (rw == WRITE) {
+       if (iov_iter_rw(iter) == WRITE) {
                loff_t size = offset + count;
 
                if (AFFS_I(inode)->mmu_private < size)
                        return 0;
        }
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, affs_get_block);
-       if (ret < 0 && (rw & WRITE))
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, affs_get_block);
+       if (ret < 0 && iov_iter_rw(iter) == WRITE)
                affs_write_failed(mapping, offset + count);
        return ret;
 }
index fa8b16f47f1a9ee4bdfac0d5ce9192dac74c79e6..480440f4701fb8c546d9e39c640295cad4224b46 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1517,7 +1517,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
        }
        req->common.ki_pos = iocb->aio_offset;
        req->common.ki_complete = aio_complete;
-       req->common.ki_flags = 0;
+       req->common.ki_flags = iocb_flags(req->common.ki_filp);
 
        if (iocb->aio_flags & IOCB_FLAG_RESFD) {
                /*
index b5e87896f517d17aea743eec62cdfeada14db203..897ee0503932fbbf7adc88b0fd27591576696160 100644 (file)
@@ -146,15 +146,13 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
 }
 
 static ssize_t
-blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
-                       loff_t offset)
+blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
 
-       return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter,
-                                   offset, blkdev_get_block,
-                                   NULL, NULL, 0);
+       return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset,
+                                   blkdev_get_block, NULL, NULL, 0);
 }
 
 int __sync_blockdev(struct block_device *bdev, int wait)
@@ -1597,9 +1595,22 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
 ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
+       struct inode *bd_inode = file->f_mapping->host;
+       loff_t size = i_size_read(bd_inode);
        struct blk_plug plug;
        ssize_t ret;
 
+       if (bdev_read_only(I_BDEV(bd_inode)))
+               return -EPERM;
+
+       if (!iov_iter_count(from))
+               return 0;
+
+       if (iocb->ki_pos >= size)
+               return -ENOSPC;
+
+       iov_iter_truncate(from, size - iocb->ki_pos);
+
        blk_start_plug(&plug);
        ret = __generic_file_write_iter(iocb, from);
        if (ret > 0) {
index cdc801c8510570a650a17212427b92d35823fab5..faa7d390841b9a245a95ffb4944d515c3c486cbf 100644 (file)
@@ -1739,27 +1739,19 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        u64 start_pos;
        u64 end_pos;
        ssize_t num_written = 0;
-       ssize_t err = 0;
-       size_t count = iov_iter_count(from);
        bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
-       loff_t pos = iocb->ki_pos;
+       ssize_t err;
+       loff_t pos;
+       size_t count;
 
        mutex_lock(&inode->i_mutex);
-
-       current->backing_dev_info = inode_to_bdi(inode);
-       err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
-       if (err) {
+       err = generic_write_checks(iocb, from);
+       if (err <= 0) {
                mutex_unlock(&inode->i_mutex);
-               goto out;
-       }
-
-       if (count == 0) {
-               mutex_unlock(&inode->i_mutex);
-               goto out;
+               return err;
        }
 
-       iov_iter_truncate(from, count);
-
+       current->backing_dev_info = inode_to_bdi(inode);
        err = file_remove_suid(file);
        if (err) {
                mutex_unlock(&inode->i_mutex);
@@ -1786,6 +1778,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
         */
        update_time_for_write(inode);
 
+       pos = iocb->ki_pos;
+       count = iov_iter_count(from);
        start_pos = round_down(pos, root->sectorsize);
        if (start_pos > i_size_read(inode)) {
                /* Expand hole size to cover write data, preventing empty gap */
@@ -1800,7 +1794,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        if (sync)
                atomic_inc(&BTRFS_I(inode)->sync_writers);
 
-       if (file->f_flags & O_DIRECT) {
+       if (iocb->ki_flags & IOCB_DIRECT) {
                num_written = __btrfs_direct_write(iocb, from, pos);
        } else {
                num_written = __btrfs_buffered_write(file, from, pos);
index 686331f22b15ce0fcc8233c2529a50c2eb6190c7..43192e10cc4331f9f533c61f10f21c602bdee806 100644 (file)
@@ -8081,7 +8081,7 @@ free_ordered:
        bio_endio(dio_bio, ret);
 }
 
-static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
+static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
                        const struct iov_iter *iter, loff_t offset)
 {
        int seg;
@@ -8096,7 +8096,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
                goto out;
 
        /* If this is a write we don't need to check anymore */
-       if (rw & WRITE)
+       if (iov_iter_rw(iter) == WRITE)
                return 0;
        /*
         * Check to make sure we don't have duplicate iov_base's in this
@@ -8114,8 +8114,8 @@ out:
        return retval;
 }
 
-static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
-                       struct iov_iter *iter, loff_t offset)
+static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                              loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -8126,7 +8126,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
        bool relock = false;
        ssize_t ret;
 
-       if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iter, offset))
+       if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
                return 0;
 
        atomic_inc(&inode->i_dio_count);
@@ -8144,7 +8144,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                filemap_fdatawrite_range(inode->i_mapping, offset,
                                         offset + count - 1);
 
-       if (rw & WRITE) {
+       if (iov_iter_rw(iter) == WRITE) {
                /*
                 * If the write DIO is beyond the EOF, we need update
                 * the isize, but it is protected by i_mutex. So we can
@@ -8174,11 +8174,11 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                wakeup = false;
        }
 
-       ret = __blockdev_direct_IO(rw, iocb, inode,
-                       BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
-                       iter, offset, btrfs_get_blocks_direct, NULL,
-                       btrfs_submit_direct, flags);
-       if (rw & WRITE) {
+       ret = __blockdev_direct_IO(iocb, inode,
+                                  BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
+                                  iter, offset, btrfs_get_blocks_direct, NULL,
+                                  btrfs_submit_direct, flags);
+       if (iov_iter_rw(iter) == WRITE) {
                current->journal_info = NULL;
                if (ret < 0 && ret != -EIOCBQUEUED)
                        btrfs_delalloc_release_space(inode, count);
index fd5599d323620a2c5617ea5355e2e1320d6a0954..155ab9c0246b202aed75e3db02313369fab52976 100644 (file)
@@ -1198,8 +1198,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
  * intercept O_DIRECT reads and writes early, this function should
  * never get called.
  */
-static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
-                             struct iov_iter *iter,
+static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter,
                              loff_t pos)
 {
        WARN_ON(1);
index 56237ea5fc227580580e8f27a308e97a7fb1c07b..b9b8eb225f66ead124f4a1484cfc5b76dbb8d2fb 100644 (file)
@@ -457,7 +457,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
        if (ret < 0)
                return ret;
 
-       if (file->f_flags & O_DIRECT) {
+       if (iocb->ki_flags & IOCB_DIRECT) {
                while (iov_iter_count(i)) {
                        size_t start;
                        ssize_t n;
@@ -828,7 +828,7 @@ again:
                return ret;
 
        if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
-           (iocb->ki_filp->f_flags & O_DIRECT) ||
+           (iocb->ki_flags & IOCB_DIRECT) ||
            (fi->flags & CEPH_F_SYNC)) {
 
                dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
@@ -941,9 +941,9 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_osd_client *osdc =
                &ceph_sb_to_client(inode->i_sb)->client->osdc;
-       ssize_t count = iov_iter_count(from), written = 0;
+       ssize_t count, written = 0;
        int err, want, got;
-       loff_t pos = iocb->ki_pos;
+       loff_t pos;
 
        if (ceph_snap(inode) != CEPH_NOSNAP)
                return -EROFS;
@@ -953,14 +953,12 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = inode_to_bdi(inode);
 
-       err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
-       if (err)
-               goto out;
-
-       if (count == 0)
+       err = generic_write_checks(iocb, from);
+       if (err <= 0)
                goto out;
-       iov_iter_truncate(from, count);
 
+       pos = iocb->ki_pos;
+       count = iov_iter_count(from);
        err = file_remove_suid(file);
        if (err)
                goto out;
@@ -997,12 +995,12 @@ retry_snap:
             inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
 
        if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
-           (file->f_flags & O_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
+           (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
                struct iov_iter data;
                mutex_unlock(&inode->i_mutex);
                /* we might need to revert back to that point */
                data = *from;
-               if (file->f_flags & O_DIRECT)
+               if (iocb->ki_flags & IOCB_DIRECT)
                        written = ceph_sync_direct_write(iocb, &data, pos);
                else
                        written = ceph_sync_write(iocb, &data, pos);
index ca30c391a894a0e9df8eac6ed1ede194c89f1885..ca2bc5406306e57fb0d9620ffeec3488ed7d4414 100644 (file)
@@ -2560,10 +2560,9 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
        return rc;
 }
 
-static ssize_t
-cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
+ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
 {
-       size_t len;
+       struct file *file = iocb->ki_filp;
        ssize_t total_written = 0;
        struct cifsFileInfo *open_file;
        struct cifs_tcon *tcon;
@@ -2573,15 +2572,15 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
        struct iov_iter saved_from;
        int rc;
 
-       len = iov_iter_count(from);
-       rc = generic_write_checks(file, poffset, &len, 0);
-       if (rc)
-               return rc;
-
-       if (!len)
-               return 0;
+       /*
+        * BB - optimize the way when signing is disabled. We can drop this
+        * extra memory-to-memory copying and use iovec buffers for constructing
+        * write request.
+        */
 
-       iov_iter_truncate(from, len);
+       rc = generic_write_checks(iocb, from);
+       if (rc <= 0)
+               return rc;
 
        INIT_LIST_HEAD(&wdata_list);
        cifs_sb = CIFS_FILE_SB(file);
@@ -2593,8 +2592,8 @@ cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)
 
        memcpy(&saved_from, from, sizeof(struct iov_iter));
 
-       rc = cifs_write_from_iter(*poffset, len, from, open_file, cifs_sb,
-                                 &wdata_list);
+       rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
+                                 open_file, cifs_sb, &wdata_list);
 
        /*
         * If at least one write was successfully sent, then discard any rc
@@ -2633,7 +2632,7 @@ restart_loop:
                                memcpy(&tmp_from, &saved_from,
                                       sizeof(struct iov_iter));
                                iov_iter_advance(&tmp_from,
-                                                wdata->offset - *poffset);
+                                                wdata->offset - iocb->ki_pos);
 
                                rc = cifs_write_from_iter(wdata->offset,
                                                wdata->bytes, &tmp_from,
@@ -2650,34 +2649,13 @@ restart_loop:
                kref_put(&wdata->refcount, cifs_uncached_writedata_release);
        }
 
-       if (total_written > 0)
-               *poffset += total_written;
+       if (unlikely(!total_written))
+               return rc;
 
+       iocb->ki_pos += total_written;
+       set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
        cifs_stats_bytes_written(tcon, total_written);
-       return total_written ? total_written : (ssize_t)rc;
-}
-
-ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
-{
-       ssize_t written;
-       struct inode *inode;
-       loff_t pos = iocb->ki_pos;
-
-       inode = file_inode(iocb->ki_filp);
-
-       /*
-        * BB - optimize the way when signing is disabled. We can drop this
-        * extra memory-to-memory copying and use iovec buffers for constructing
-        * write request.
-        */
-
-       written = cifs_iovec_write(iocb->ki_filp, from, &pos);
-       if (written > 0) {
-               set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(inode)->flags);
-               iocb->ki_pos = pos;
-       }
-
-       return written;
+       return total_written;
 }
 
 static ssize_t
@@ -2688,8 +2666,7 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
        struct inode *inode = file->f_mapping->host;
        struct cifsInodeInfo *cinode = CIFS_I(inode);
        struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
-       ssize_t rc = -EACCES;
-       loff_t lock_pos = iocb->ki_pos;
+       ssize_t rc;
 
        /*
         * We need to hold the sem to be sure nobody modifies lock list
@@ -2697,23 +2674,24 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
         */
        down_read(&cinode->lock_sem);
        mutex_lock(&inode->i_mutex);
-       if (file->f_flags & O_APPEND)
-               lock_pos = i_size_read(inode);
-       if (!cifs_find_lock_conflict(cfile, lock_pos, iov_iter_count(from),
+
+       rc = generic_write_checks(iocb, from);
+       if (rc <= 0)
+               goto out;
+
+       if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
                                     server->vals->exclusive_lock_type, NULL,
-                                    CIFS_WRITE_OP)) {
+                                    CIFS_WRITE_OP))
                rc = __generic_file_write_iter(iocb, from);
-               mutex_unlock(&inode->i_mutex);
-
-               if (rc > 0) {
-                       ssize_t err;
+       else
+               rc = -EACCES;
+out:
+       mutex_unlock(&inode->i_mutex);
 
-                       err = generic_write_sync(file, iocb->ki_pos - rc, rc);
-                       if (err < 0)
-                               rc = err;
-               }
-       } else {
-               mutex_unlock(&inode->i_mutex);
+       if (rc > 0) {
+               ssize_t err = generic_write_sync(file, iocb->ki_pos - rc, rc);
+               if (err < 0)
+                       rc = err;
        }
        up_read(&cinode->lock_sem);
        return rc;
@@ -3877,8 +3855,7 @@ void cifs_oplock_break(struct work_struct *work)
  * Direct IO is not yet supported in the cached mode. 
  */
 static ssize_t
-cifs_direct_io(int rw, struct kiocb *iocb, struct iov_iter *iter,
-               loff_t pos)
+cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
         /*
          * FIXME
index cf0db005d2f58ab2ed2b42217777f643e6d0d235..acb3d63bc9dc763cf8e3518cfe66a233ee9ee63f 100644 (file)
@@ -1598,7 +1598,7 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence)
                        if (offset >= 0)
                                break;
                default:
-                       mutex_unlock(&file_inode(file)->i_mutex);
+                       mutex_unlock(&dentry->d_inode->i_mutex);
                        return -EINVAL;
        }
        if (offset != file->f_pos) {
index d0bd1f4f81b3528a1482d41a7b1f4b99499f3a23..0bb0aecb556cd15be7c902b3b315345a1017588f 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -98,9 +98,9 @@ static bool buffer_size_valid(struct buffer_head *bh)
        return bh->b_state != 0;
 }
 
-static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
-                       loff_t start, loff_t end, get_block_t get_block,
-                       struct buffer_head *bh)
+static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
+                     loff_t start, loff_t end, get_block_t get_block,
+                     struct buffer_head *bh)
 {
        ssize_t retval = 0;
        loff_t pos = start;
@@ -109,7 +109,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
        void *addr;
        bool hole = false;
 
-       if (rw != WRITE)
+       if (iov_iter_rw(iter) != WRITE)
                end = min(end, i_size_read(inode));
 
        while (pos < end) {
@@ -124,7 +124,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
                                bh->b_size = PAGE_ALIGN(end - pos);
                                bh->b_state = 0;
                                retval = get_block(inode, block, bh,
-                                                               rw == WRITE);
+                                                  iov_iter_rw(iter) == WRITE);
                                if (retval)
                                        break;
                                if (!buffer_size_valid(bh))
@@ -137,7 +137,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
                                bh->b_size -= done;
                        }
 
-                       hole = (rw != WRITE) && !buffer_written(bh);
+                       hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
                        if (hole) {
                                addr = NULL;
                                size = bh->b_size - first;
@@ -154,7 +154,7 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
                        max = min(pos + size, end);
                }
 
-               if (rw == WRITE)
+               if (iov_iter_rw(iter) == WRITE)
                        len = copy_from_iter(addr, max - pos, iter);
                else if (!hole)
                        len = copy_to_iter(addr, max - pos, iter);
@@ -173,7 +173,6 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
 
 /**
  * dax_do_io - Perform I/O to a DAX file
- * @rw: READ to read or WRITE to write
  * @iocb: The control block for this I/O
  * @inode: The file which the I/O is directed at
  * @iter: The addresses to do I/O from or to
@@ -189,9 +188,9 @@ static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter,
  * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
  * is in progress.
  */
-ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
-                       struct iov_iter *iter, loff_t pos,
-                       get_block_t get_block, dio_iodone_t end_io, int flags)
+ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
+                 struct iov_iter *iter, loff_t pos, get_block_t get_block,
+                 dio_iodone_t end_io, int flags)
 {
        struct buffer_head bh;
        ssize_t retval = -EINVAL;
@@ -199,7 +198,7 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
 
        memset(&bh, 0, sizeof(bh));
 
-       if ((flags & DIO_LOCKING) && (rw == READ)) {
+       if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
                struct address_space *mapping = inode->i_mapping;
                mutex_lock(&inode->i_mutex);
                retval = filemap_write_and_wait_range(mapping, pos, end - 1);
@@ -212,9 +211,9 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode,
        /* Protects against truncate */
        atomic_inc(&inode->i_dio_count);
 
-       retval = dax_io(rw, inode, iter, pos, end, get_block, &bh);
+       retval = dax_io(inode, iter, pos, end, get_block, &bh);
 
-       if ((flags & DIO_LOCKING) && (rw == READ))
+       if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
                mutex_unlock(&inode->i_mutex);
 
        if ((retval > 0) && end_io)
index d99736a63e3cf6d5da6850e4eee02ecd7ae672e4..656ce522a218f29850e2415b0a038c8208a5dd52 100644 (file)
@@ -269,6 +269,41 @@ static inline int dname_external(const struct dentry *dentry)
        return dentry->d_name.name != dentry->d_iname;
 }
 
+/*
+ * Make sure other CPUs see the inode attached before the type is set.
+ */
+static inline void __d_set_inode_and_type(struct dentry *dentry,
+                                         struct inode *inode,
+                                         unsigned type_flags)
+{
+       unsigned flags;
+
+       dentry->d_inode = inode;
+       smp_wmb();
+       flags = READ_ONCE(dentry->d_flags);
+       flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+       flags |= type_flags;
+       WRITE_ONCE(dentry->d_flags, flags);
+}
+
+/*
+ * Ideally, we want to make sure that other CPUs see the flags cleared before
+ * the inode is detached, but this is really a violation of RCU principles
+ * since the ordering suggests we should always set inode before flags.
+ *
+ * We should instead replace or discard the entire dentry - but that sucks
+ * performancewise on mass deletion/rename.
+ */
+static inline void __d_clear_type_and_inode(struct dentry *dentry)
+{
+       unsigned flags = READ_ONCE(dentry->d_flags);
+
+       flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+       WRITE_ONCE(dentry->d_flags, flags);
+       smp_wmb();
+       dentry->d_inode = NULL;
+}
+
 static void dentry_free(struct dentry *dentry)
 {
        WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
@@ -311,7 +346,7 @@ static void dentry_iput(struct dentry * dentry)
 {
        struct inode *inode = dentry->d_inode;
        if (inode) {
-               dentry->d_inode = NULL;
+               __d_clear_type_and_inode(dentry);
                hlist_del_init(&dentry->d_u.d_alias);
                spin_unlock(&dentry->d_lock);
                spin_unlock(&inode->i_lock);
@@ -335,8 +370,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
        __releases(dentry->d_inode->i_lock)
 {
        struct inode *inode = dentry->d_inode;
-       __d_clear_type(dentry);
-       dentry->d_inode = NULL;
+       __d_clear_type_and_inode(dentry);
        hlist_del_init(&dentry->d_u.d_alias);
        dentry_rcuwalk_barrier(dentry);
        spin_unlock(&dentry->d_lock);
@@ -1715,11 +1749,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
        unsigned add_flags = d_flags_for_inode(inode);
 
        spin_lock(&dentry->d_lock);
-       dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
-       dentry->d_flags |= add_flags;
        if (inode)
                hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
-       dentry->d_inode = inode;
+       __d_set_inode_and_type(dentry, inode, add_flags);
        dentry_rcuwalk_barrier(dentry);
        spin_unlock(&dentry->d_lock);
        fsnotify_d_instantiate(dentry, inode);
@@ -1937,8 +1969,7 @@ static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
                add_flags |= DCACHE_DISCONNECTED;
 
        spin_lock(&tmp->d_lock);
-       tmp->d_inode = inode;
-       tmp->d_flags |= add_flags;
+       __d_set_inode_and_type(tmp, inode, add_flags);
        hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
        hlist_bl_lock(&tmp->d_sb->s_anon);
        hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
index 61e72d44cf94142c1946f9dbc74904b8f31ec98e..c9ee0dfe90b5cfaa0f90c486812439b0f7a84b0e 100644 (file)
@@ -524,7 +524,7 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
 
        if (debugfs_positive(dentry)) {
                dget(dentry);
-               if (S_ISDIR(dentry->d_inode->i_mode))
+               if (d_is_dir(dentry))
                        ret = simple_rmdir(parent->d_inode, dentry);
                else
                        simple_unlink(parent->d_inode, dentry);
index 6fb00e3f1059791d21b4ffc80671f3d051ecbc8e..c3b560b24a463c50569a2a37c5d04f8d5af92c4d 100644 (file)
@@ -1093,10 +1093,10 @@ static inline int drop_refcount(struct dio *dio)
  * for the whole file.
  */
 static inline ssize_t
-do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, struct iov_iter *iter, loff_t offset, 
-       get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags)
+do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+                     struct block_device *bdev, struct iov_iter *iter,
+                     loff_t offset, get_block_t get_block, dio_iodone_t end_io,
+                     dio_submit_t submit_io, int flags)
 {
        unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
        unsigned blkbits = i_blkbits;
@@ -1110,9 +1110,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        struct blk_plug plug;
        unsigned long align = offset | iov_iter_alignment(iter);
 
-       if (rw & WRITE)
-               rw = WRITE_ODIRECT;
-
        /*
         * Avoid references to bdev if not absolutely needed to give
         * the early prefetch in the caller enough time.
@@ -1127,7 +1124,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        }
 
        /* watch out for a 0 len io from a tricksy fs */
-       if (rw == READ && !iov_iter_count(iter))
+       if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))
                return 0;
 
        dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
@@ -1143,7 +1140,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
 
        dio->flags = flags;
        if (dio->flags & DIO_LOCKING) {
-               if (rw == READ) {
+               if (iov_iter_rw(iter) == READ) {
                        struct address_space *mapping =
                                        iocb->ki_filp->f_mapping;
 
@@ -1169,19 +1166,19 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        if (is_sync_kiocb(iocb))
                dio->is_async = false;
        else if (!(dio->flags & DIO_ASYNC_EXTEND) &&
-            (rw & WRITE) && end > i_size_read(inode))
+                iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
                dio->is_async = false;
        else
                dio->is_async = true;
 
        dio->inode = inode;
-       dio->rw = rw;
+       dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ;
 
        /*
         * For AIO O_(D)SYNC writes we need to defer completions to a workqueue
         * so that we can call ->fsync.
         */
-       if (dio->is_async && (rw & WRITE) &&
+       if (dio->is_async && iov_iter_rw(iter) == WRITE &&
            ((iocb->ki_filp->f_flags & O_DSYNC) ||
             IS_SYNC(iocb->ki_filp->f_mapping->host))) {
                retval = dio_set_defer_completion(dio);
@@ -1274,7 +1271,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
         * we can let i_mutex go now that its achieved its purpose
         * of protecting us from looking up uninitialized blocks.
         */
-       if (rw == READ && (dio->flags & DIO_LOCKING))
+       if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
                mutex_unlock(&dio->inode->i_mutex);
 
        /*
@@ -1286,7 +1283,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
         */
        BUG_ON(retval == -EIOCBQUEUED);
        if (dio->is_async && retval == 0 && dio->result &&
-           (rw == READ || dio->result == count))
+           (iov_iter_rw(iter) == READ || dio->result == count))
                retval = -EIOCBQUEUED;
        else
                dio_await_completion(dio);
@@ -1300,11 +1297,11 @@ out:
        return retval;
 }
 
-ssize_t
-__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, struct iov_iter *iter, loff_t offset,
-       get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags)
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+                            struct block_device *bdev, struct iov_iter *iter,
+                            loff_t offset, get_block_t get_block,
+                            dio_iodone_t end_io, dio_submit_t submit_io,
+                            int flags)
 {
        /*
         * The block device state is needed in the end to finally
@@ -1318,8 +1315,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        prefetch(bdev->bd_queue);
        prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
 
-       return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
-                                    get_block, end_io, submit_io, flags);
+       return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block,
+                                    end_io, submit_io, flags);
 }
 
 EXPORT_SYMBOL(__blockdev_direct_IO);
index a198e94813fec42378c75c568b013f360c5ea1e2..35073aaec6e08895f06ce3f7db86820d7dc982ec 100644 (file)
@@ -963,8 +963,8 @@ static void exofs_invalidatepage(struct page *page, unsigned int offset,
 
 
  /* TODO: Should be easy enough to do proprly */
-static ssize_t exofs_direct_IO(int rw, struct kiocb *iocb,
-               struct iov_iter *iter, loff_t offset)
+static ssize_t exofs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                              loff_t offset)
 {
        return 0;
 }
index b29eb6747116228cc35fa849a830187206e248b7..5d9213963faee582c2520cede532f045e629c5d3 100644 (file)
@@ -851,8 +851,7 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
 }
 
 static ssize_t
-ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
-                       loff_t offset)
+ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -861,12 +860,12 @@ ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
        ssize_t ret;
 
        if (IS_DAX(inode))
-               ret = dax_do_io(rw, iocb, inode, iter, offset, ext2_get_block,
-                               NULL, DIO_LOCKING);
+               ret = dax_do_io(iocb, inode, iter, offset, ext2_get_block, NULL,
+                               DIO_LOCKING);
        else
-               ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
+               ret = blockdev_direct_IO(iocb, inode, iter, offset,
                                         ext2_get_block);
-       if (ret < 0 && (rw & WRITE))
+       if (ret < 0 && iov_iter_rw(iter) == WRITE)
                ext2_write_failed(mapping, offset + count);
        return ret;
 }
index db07ffbe7c85cdabbe89d49b3b2294dd8a1d84cf..13c0868c7160ee572f30601939fcd6f421a01f11 100644 (file)
@@ -1820,8 +1820,8 @@ static int ext3_releasepage(struct page *page, gfp_t wait)
  * crashes then stale disk data _may_ be exposed inside the file. But current
  * VFS code falls back into buffered path in that case so we are safe.
  */
-static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
-                       struct iov_iter *iter, loff_t offset)
+static ssize_t ext3_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                             loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -1832,9 +1832,9 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        int retries = 0;
 
-       trace_ext3_direct_IO_enter(inode, offset, count, rw);
+       trace_ext3_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 
-       if (rw == WRITE) {
+       if (iov_iter_rw(iter) == WRITE) {
                loff_t final_size = offset + count;
 
                if (final_size > inode->i_size) {
@@ -1856,12 +1856,12 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
        }
 
 retry:
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext3_get_block);
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, ext3_get_block);
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
-       if (unlikely((rw & WRITE) && ret < 0)) {
+       if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
                loff_t isize = i_size_read(inode);
                loff_t end = offset + count;
 
@@ -1908,7 +1908,7 @@ retry:
                        ret = err;
        }
 out:
-       trace_ext3_direct_IO_exit(inode, offset, count, rw, ret);
+       trace_ext3_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
        return ret;
 }
 
index 8a3981ea35d874706e00ed17b37d05d509129950..c8eb32eefc3cd512e053b159abddae46d279e0d6 100644 (file)
@@ -2152,8 +2152,8 @@ extern void ext4_da_update_reserve_space(struct inode *inode,
 /* indirect.c */
 extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
                                struct ext4_map_blocks *map, int flags);
-extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
-                               struct iov_iter *iter, loff_t offset);
+extern ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                                 loff_t offset);
 extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
 extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
 extern void ext4_ind_truncate(handle_t *, struct inode *inode);
index 7a6defcf33529279d72dc89154af5779acda63c2..e576d682b35323f324c9cdf698aac6a9f725488a 100644 (file)
@@ -95,11 +95,9 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct inode *inode = file_inode(iocb->ki_filp);
        struct mutex *aio_mutex = NULL;
        struct blk_plug plug;
-       int o_direct = io_is_direct(file);
+       int o_direct = iocb->ki_flags & IOCB_DIRECT;
        int overwrite = 0;
-       size_t length = iov_iter_count(from);
        ssize_t ret;
-       loff_t pos = iocb->ki_pos;
 
        /*
         * Unaligned direct AIO must be serialized; see comment above
@@ -108,16 +106,17 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (o_direct &&
            ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
            !is_sync_kiocb(iocb) &&
-           (file->f_flags & O_APPEND ||
-            ext4_unaligned_aio(inode, from, pos))) {
+           (iocb->ki_flags & IOCB_APPEND ||
+            ext4_unaligned_aio(inode, from, iocb->ki_pos))) {
                aio_mutex = ext4_aio_mutex(inode);
                mutex_lock(aio_mutex);
                ext4_unwritten_wait(inode);
        }
 
        mutex_lock(&inode->i_mutex);
-       if (file->f_flags & O_APPEND)
-               iocb->ki_pos = pos = i_size_read(inode);
+       ret = generic_write_checks(iocb, from);
+       if (ret <= 0)
+               goto out;
 
        /*
         * If we have encountered a bitmap-format file, the size limit
@@ -126,22 +125,19 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
                struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 
-               if ((pos > sbi->s_bitmap_maxbytes) ||
-                   (pos == sbi->s_bitmap_maxbytes && length > 0)) {
-                       mutex_unlock(&inode->i_mutex);
+               if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
                        ret = -EFBIG;
-                       goto errout;
+                       goto out;
                }
-
-               if (pos + length > sbi->s_bitmap_maxbytes)
-                       iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos);
+               iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
        }
 
        iocb->private = &overwrite;
        if (o_direct) {
+               size_t length = iov_iter_count(from);
+               loff_t pos = iocb->ki_pos;
                blk_start_plug(&plug);
 
-
                /* check whether we do a DIO overwrite or not */
                if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
                    !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
@@ -185,7 +181,12 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (o_direct)
                blk_finish_plug(&plug);
 
-errout:
+       if (aio_mutex)
+               mutex_unlock(aio_mutex);
+       return ret;
+
+out:
+       mutex_unlock(&inode->i_mutex);
        if (aio_mutex)
                mutex_unlock(aio_mutex);
        return ret;
index 740c7871c11770a683395989df5548d3d3357c22..3580629e42d32aadaff81effd01fddb2aaa41484 100644 (file)
@@ -642,8 +642,8 @@ out:
  * crashes then stale disk data _may_ be exposed inside the file. But current
  * VFS code falls back into buffered path in that case so we are safe.
  */
-ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
-                          struct iov_iter *iter, loff_t offset)
+ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                          loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -654,7 +654,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        int retries = 0;
 
-       if (rw == WRITE) {
+       if (iov_iter_rw(iter) == WRITE) {
                loff_t final_size = offset + count;
 
                if (final_size > inode->i_size) {
@@ -676,7 +676,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
        }
 
 retry:
-       if (rw == READ && ext4_should_dioread_nolock(inode)) {
+       if (iov_iter_rw(iter) == READ && ext4_should_dioread_nolock(inode)) {
                /*
                 * Nolock dioread optimization may be dynamically disabled
                 * via ext4_inode_block_unlocked_dio(). Check inode's state
@@ -690,23 +690,24 @@ retry:
                        goto locked;
                }
                if (IS_DAX(inode))
-                       ret = dax_do_io(rw, iocb, inode, iter, offset,
+                       ret = dax_do_io(iocb, inode, iter, offset,
                                        ext4_get_block, NULL, 0);
                else
-                       ret = __blockdev_direct_IO(rw, iocb, inode,
-                                       inode->i_sb->s_bdev, iter, offset,
-                                       ext4_get_block, NULL, NULL, 0);
+                       ret = __blockdev_direct_IO(iocb, inode,
+                                                  inode->i_sb->s_bdev, iter,
+                                                  offset, ext4_get_block, NULL,
+                                                  NULL, 0);
                inode_dio_done(inode);
        } else {
 locked:
                if (IS_DAX(inode))
-                       ret = dax_do_io(rw, iocb, inode, iter, offset,
+                       ret = dax_do_io(iocb, inode, iter, offset,
                                        ext4_get_block, NULL, DIO_LOCKING);
                else
-                       ret = blockdev_direct_IO(rw, iocb, inode, iter,
-                                       offset, ext4_get_block);
+                       ret = blockdev_direct_IO(iocb, inode, iter, offset,
+                                                ext4_get_block);
 
-               if (unlikely((rw & WRITE) && ret < 0)) {
+               if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
                        loff_t isize = i_size_read(inode);
                        loff_t end = offset + count;
 
index 035b7a06f1c3b0e09f059a7905b815bf344701f1..b49cf6e5995309a6c7d813512ad07731c3885853 100644 (file)
@@ -2952,8 +2952,8 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
  * if the machine crashes during the write.
  *
  */
-static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
-                             struct iov_iter *iter, loff_t offset)
+static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                                 loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -2966,8 +2966,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
        ext4_io_end_t *io_end = NULL;
 
        /* Use the old path for reads and writes beyond i_size. */
-       if (rw != WRITE || final_size > inode->i_size)
-               return ext4_ind_direct_IO(rw, iocb, iter, offset);
+       if (iov_iter_rw(iter) != WRITE || final_size > inode->i_size)
+               return ext4_ind_direct_IO(iocb, iter, offset);
 
        BUG_ON(iocb->private == NULL);
 
@@ -2976,7 +2976,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
         * conversion. This also disallows race between truncate() and
         * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
         */
-       if (rw == WRITE)
+       if (iov_iter_rw(iter) == WRITE)
                atomic_inc(&inode->i_dio_count);
 
        /* If we do a overwrite dio, i_mutex locking can be released */
@@ -3034,10 +3034,10 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                dio_flags = DIO_LOCKING;
        }
        if (IS_DAX(inode))
-               ret = dax_do_io(rw, iocb, inode, iter, offset, get_block_func,
+               ret = dax_do_io(iocb, inode, iter, offset, get_block_func,
                                ext4_end_io_dio, dio_flags);
        else
-               ret = __blockdev_direct_IO(rw, iocb, inode,
+               ret = __blockdev_direct_IO(iocb, inode,
                                           inode->i_sb->s_bdev, iter, offset,
                                           get_block_func,
                                           ext4_end_io_dio, NULL, dio_flags);
@@ -3078,7 +3078,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
        }
 
 retake_lock:
-       if (rw == WRITE)
+       if (iov_iter_rw(iter) == WRITE)
                inode_dio_done(inode);
        /* take i_mutex locking again if we do a ovewrite dio */
        if (overwrite) {
@@ -3089,8 +3089,8 @@ retake_lock:
        return ret;
 }
 
-static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
-                             struct iov_iter *iter, loff_t offset)
+static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                             loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -3107,12 +3107,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
        if (ext4_has_inline_data(inode))
                return 0;
 
-       trace_ext4_direct_IO_enter(inode, offset, count, rw);
+       trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
-               ret = ext4_ext_direct_IO(rw, iocb, iter, offset);
+               ret = ext4_ext_direct_IO(iocb, iter, offset);
        else
-               ret = ext4_ind_direct_IO(rw, iocb, iter, offset);
-       trace_ext4_direct_IO_exit(inode, offset, count, rw, ret);
+               ret = ext4_ind_direct_IO(iocb, iter, offset);
+       trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
        return ret;
 }
 
index 497f8515d2056283d040b912dd638e65a4576fe7..319eda511c4ff6e4bd869aaf2e1b711ecdc6470c 100644 (file)
@@ -1118,12 +1118,12 @@ static int f2fs_write_end(struct file *file,
        return copied;
 }
 
-static int check_direct_IO(struct inode *inode, int rw,
-               struct iov_iter *iter, loff_t offset)
+static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
+                          loff_t offset)
 {
        unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
 
-       if (rw == READ)
+       if (iov_iter_rw(iter) == READ)
                return 0;
 
        if (offset & blocksize_mask)
@@ -1135,8 +1135,8 @@ static int check_direct_IO(struct inode *inode, int rw,
        return 0;
 }
 
-static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
-               struct iov_iter *iter, loff_t offset)
+static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                             loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -1151,19 +1151,19 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
                        return err;
        }
 
-       if (check_direct_IO(inode, rw, iter, offset))
+       if (check_direct_IO(inode, iter, offset))
                return 0;
 
-       trace_f2fs_direct_IO_enter(inode, offset, count, rw);
+       trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 
-       if (rw & WRITE)
+       if (iov_iter_rw(iter) == WRITE)
                __allocate_data_blocks(inode, offset, count);
 
-       err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
-       if (err < 0 && (rw & WRITE))
+       err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block);
+       if (err < 0 && iov_iter_rw(iter) == WRITE)
                f2fs_write_failed(mapping, offset + count);
 
-       trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
+       trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
 
        return err;
 }
index 8521207de22935464f074b70448cae781ed403e1..41b729933638a128225422013417775fb0858c26 100644 (file)
@@ -245,8 +245,7 @@ static int fat_write_end(struct file *file, struct address_space *mapping,
        return err;
 }
 
-static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
-                            struct iov_iter *iter,
+static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                             loff_t offset)
 {
        struct file *file = iocb->ki_filp;
@@ -255,7 +254,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       if (rw == WRITE) {
+       if (iov_iter_rw(iter) == WRITE) {
                /*
                 * FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
                 * so we need to update the ->mmu_private to block boundary.
@@ -274,8 +273,8 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
         * FAT need to use the DIO_LOCKING for avoiding the race
         * condition of fat_get_block() and ->truncate().
         */
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, fat_get_block);
-       if (ret < 0 && (rw & WRITE))
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, fat_get_block);
+       if (ret < 0 && iov_iter_rw(iter) == WRITE)
                fat_write_failed(mapping, offset + count);
 
        return ret;
index e1afdd7abf90114fffd61da6bcfcc77916afb950..5ef05b5c4cff86e9353a0594f0faadc1ab8612f5 100644 (file)
@@ -1145,13 +1145,11 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
-       size_t count = iov_iter_count(from);
        ssize_t written = 0;
        ssize_t written_buffered = 0;
        struct inode *inode = mapping->host;
        ssize_t err;
        loff_t endbyte = 0;
-       loff_t pos = iocb->ki_pos;
 
        if (get_fuse_conn(inode)->writeback_cache) {
                /* Update size (EOF optimization) and mode (SUID clearing) */
@@ -1167,14 +1165,10 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = inode_to_bdi(inode);
 
-       err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
-       if (err)
-               goto out;
-
-       if (count == 0)
+       err = generic_write_checks(iocb, from);
+       if (err <= 0)
                goto out;
 
-       iov_iter_truncate(from, count);
        err = file_remove_suid(file);
        if (err)
                goto out;
@@ -1183,7 +1177,8 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (err)
                goto out;
 
-       if (file->f_flags & O_DIRECT) {
+       if (iocb->ki_flags & IOCB_DIRECT) {
+               loff_t pos = iocb->ki_pos;
                written = generic_file_direct_write(iocb, from, pos);
                if (written < 0 || !iov_iter_count(from))
                        goto out;
@@ -1209,9 +1204,9 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                written += written_buffered;
                iocb->ki_pos = pos + written_buffered;
        } else {
-               written = fuse_perform_write(file, mapping, from, pos);
+               written = fuse_perform_write(file, mapping, from, iocb->ki_pos);
                if (written >= 0)
-                       iocb->ki_pos = pos + written;
+                       iocb->ki_pos += written;
        }
 out:
        current->backing_dev_info = NULL;
@@ -1412,7 +1407,6 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        struct fuse_io_priv io = { .async = 0, .file = file };
-       size_t count = iov_iter_count(from);
        ssize_t res;
 
        if (is_bad_inode(inode))
@@ -1420,11 +1414,9 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
        /* Don't allow parallel writes to the same file */
        mutex_lock(&inode->i_mutex);
-       res = generic_write_checks(file, &iocb->ki_pos, &count, 0);
-       if (!res) {
-               iov_iter_truncate(from, count);
+       res = generic_write_checks(iocb, from);
+       if (res > 0)
                res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
-       }
        fuse_invalidate_attr(inode);
        if (res > 0)
                fuse_write_update_size(inode, iocb->ki_pos);
@@ -2782,8 +2774,7 @@ static inline loff_t fuse_round_up(loff_t off)
 }
 
 static ssize_t
-fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
-                       loff_t offset)
+fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
        DECLARE_COMPLETION_ONSTACK(wait);
        ssize_t ret = 0;
@@ -2800,15 +2791,15 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
        inode = file->f_mapping->host;
        i_size = i_size_read(inode);
 
-       if ((rw == READ) && (offset > i_size))
+       if ((iov_iter_rw(iter) == READ) && (offset > i_size))
                return 0;
 
        /* optimization for short read */
-       if (async_dio && rw != WRITE && offset + count > i_size) {
+       if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) {
                if (offset >= i_size)
                        return 0;
-               count = min_t(loff_t, count, fuse_round_up(i_size - offset));
-               iov_iter_truncate(iter, count);
+               iov_iter_truncate(iter, fuse_round_up(i_size - offset));
+               count = iov_iter_count(iter);
        }
 
        io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
@@ -2819,7 +2810,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
        io->bytes = -1;
        io->size = 0;
        io->offset = offset;
-       io->write = (rw == WRITE);
+       io->write = (iov_iter_rw(iter) == WRITE);
        io->err = 0;
        io->file = file;
        /*
@@ -2834,19 +2825,15 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
         * to wait on real async I/O requests, so we must submit this request
         * synchronously.
         */
-       if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE)
+       if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
+           iov_iter_rw(iter) == WRITE)
                io->async = false;
 
        if (io->async && is_sync_kiocb(iocb))
                io->done = &wait;
 
-       if (rw == WRITE) {
-               ret = generic_write_checks(file, &pos, &count, 0);
-               if (!ret) {
-                       iov_iter_truncate(iter, count);
-                       ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
-               }
-
+       if (iov_iter_rw(iter) == WRITE) {
+               ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
                fuse_invalidate_attr(inode);
        } else {
                ret = __fuse_direct_read(io, iter, &pos);
@@ -2865,7 +2852,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
 
        kfree(io);
 
-       if (rw == WRITE) {
+       if (iov_iter_rw(iter) == WRITE) {
                if (ret > 0)
                        fuse_write_update_size(inode, pos);
                else if (ret < 0 && offset + count > i_size)
index a6e6990aea395e63c158f6105034d6b7c6b53a62..5551fea0afd7c53803314f524d4ceef84b59eaf3 100644 (file)
@@ -1016,13 +1016,12 @@ out:
 /**
  * gfs2_ok_for_dio - check that dio is valid on this file
  * @ip: The inode
- * @rw: READ or WRITE
  * @offset: The offset at which we are reading or writing
  *
  * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
  *          1 (to accept the i/o request)
  */
-static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
+static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset)
 {
        /*
         * Should we return an error here? I can't see that O_DIRECT for
@@ -1039,8 +1038,8 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
 
 
 
-static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
-                             struct iov_iter *iter, loff_t offset)
+static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                             loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -1061,7 +1060,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
        rv = gfs2_glock_nq(&gh);
        if (rv)
                return rv;
-       rv = gfs2_ok_for_dio(ip, rw, offset);
+       rv = gfs2_ok_for_dio(ip, offset);
        if (rv != 1)
                goto out; /* dio not valid, fall back to buffered i/o */
 
@@ -1091,13 +1090,12 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
                rv = filemap_write_and_wait_range(mapping, lstart, end);
                if (rv)
                        goto out;
-               if (rw == WRITE)
+               if (iov_iter_rw(iter) == WRITE)
                        truncate_inode_pages_range(mapping, lstart, end);
        }
 
-       rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
-                                 iter, offset,
-                                 gfs2_get_block_direct, NULL, NULL, 0);
+       rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
+                                 offset, gfs2_get_block_direct, NULL, NULL, 0);
 out:
        gfs2_glock_dq(&gh);
        gfs2_holder_uninit(&gh);
index 207eb4a8135e656126eb2a50a09e2abea5cd5779..31892871ea87052ad423630f08b0e0d58589d686 100644 (file)
@@ -709,7 +709,7 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
        gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
 
-       if (file->f_flags & O_APPEND) {
+       if (iocb->ki_flags & IOCB_APPEND) {
                struct gfs2_holder gh;
 
                ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
index 9337065bcc6761280e5abb7b8078811172e3a76b..75fd5d873c196dfd76006e77ef25405c9359c224 100644 (file)
@@ -124,8 +124,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
        return res ? try_to_free_buffers(page) : 0;
 }
 
-static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
-               struct iov_iter *iter, loff_t offset)
+static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                            loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -133,13 +133,13 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, hfs_get_block);
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, hfs_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
-       if (unlikely((rw & WRITE) && ret < 0)) {
+       if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
                loff_t isize = i_size_read(inode);
                loff_t end = offset + count;
 
index 5f86cadb0542c074b7015483303d3bdfefe3347f..a43811f909353354ca9bebd3e5d13212d30ae505 100644 (file)
@@ -122,8 +122,8 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
        return res ? try_to_free_buffers(page) : 0;
 }
 
-static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
-               struct iov_iter *iter, loff_t offset)
+static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                                loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -131,14 +131,13 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, 
-                                hfsplus_get_block);
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, hfsplus_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
-       if (unlikely((rw & WRITE) && ret < 0)) {
+       if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
                loff_t isize = i_size_read(inode);
                loff_t end = offset + count;
 
index 3197aed106148d8b0839b80405ecad125c14e7aa..070dc4b335449423091e67dd74c0f1c34617b041 100644 (file)
@@ -330,8 +330,8 @@ static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
        return generic_block_bmap(mapping, block, jfs_get_block);
 }
 
-static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
-       struct iov_iter *iter, loff_t offset)
+static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                            loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -339,13 +339,13 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, jfs_get_block);
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, jfs_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
-       if (unlikely((rw & WRITE) && ret < 0)) {
+       if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
                loff_t isize = i_size_read(inode);
                loff_t end = offset + count;
 
index 76fb76a0818bc274fc67b2d87b582db6690d62a6..ffab2e06e1472eca41449cbe5d8fe22baa219d29 100644 (file)
@@ -1585,7 +1585,7 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
                inode = path->dentry->d_inode;
        }
        err = -ENOENT;
-       if (!inode || d_is_negative(path->dentry))
+       if (d_is_negative(path->dentry))
                goto out_path_put;
 
        if (should_follow_link(path->dentry, follow)) {
@@ -2310,7 +2310,7 @@ mountpoint_last(struct nameidata *nd, struct path *path)
        mutex_unlock(&dir->d_inode->i_mutex);
 
 done:
-       if (!dentry->d_inode || d_is_negative(dentry)) {
+       if (d_is_negative(dentry)) {
                error = -ENOENT;
                dput(dentry);
                goto out;
@@ -3038,7 +3038,7 @@ retry_lookup:
 finish_lookup:
        /* we _can_ be in RCU mode here */
        error = -ENOENT;
-       if (!inode || d_is_negative(path->dentry)) {
+       if (d_is_negative(path->dentry)) {
                path_to_nameidata(path, nd);
                goto out;
        }
@@ -3077,7 +3077,7 @@ finish_open:
        error = -ENOTDIR;
        if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
                goto out;
-       if (!S_ISREG(nd->inode->i_mode))
+       if (!d_is_reg(nd->path.dentry))
                will_truncate = false;
 
        if (will_truncate) {
index 479bf8db264e0cd0f253093f957a2ea32f0012a2..011324ce9df21181d6983f1fe81c659e1120a814 100644 (file)
@@ -170,20 +170,15 @@ ncp_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        size_t already_written = 0;
-       loff_t pos = iocb->ki_pos;
-       size_t count = iov_iter_count(from);
        size_t bufsize;
        int errno;
        void *bouncebuffer;
+       off_t pos;
 
        ncp_dbg(1, "enter %pD2\n", file);
-       errno = generic_write_checks(file, &pos, &count, 0);
-       if (errno)
+       errno = generic_write_checks(iocb, from);
+       if (errno <= 0)
                return errno;
-       iov_iter_truncate(from, count);
-       
-       if (!count)
-               return 0;
 
        errno = ncp_make_open(inode, O_WRONLY);
        if (errno) {
@@ -201,10 +196,11 @@ ncp_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                errno = -EIO;   /* -ENOMEM */
                goto outrel;
        }
+       pos = iocb->ki_pos;
        while (iov_iter_count(from)) {
                int written_this_time;
                size_t to_write = min_t(size_t,
-                                     bufsize - ((off_t)pos % bufsize),
+                                     bufsize - (pos % bufsize),
                                      iov_iter_count(from));
 
                if (copy_from_iter(bouncebuffer, to_write, from) != to_write) {
index c3929fb2ab26c2971e2e4a9f09e0f5a88387c144..682f65fe09b5177dcd64bcd2970bbfcef9824ac3 100644 (file)
@@ -240,7 +240,6 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
 
 /**
  * nfs_direct_IO - NFS address space operation for direct I/O
- * @rw: direction (read or write)
  * @iocb: target I/O control block
  * @iov: array of vectors that define I/O buffer
  * @pos: offset in file to begin the operation
@@ -251,7 +250,7 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
  * shunt off direct read and write requests before the VFS gets them,
  * so this method is only ever called for swap.
  */
-ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
+ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct inode *inode = iocb->ki_filp->f_mapping->host;
 
@@ -267,9 +266,9 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t
 #else
        VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
 
-       if (rw == READ)
+       if (iov_iter_rw(iter) == READ)
                return nfs_file_direct_read(iocb, iter, pos);
-       return nfs_file_direct_write(iocb, iter, pos);
+       return nfs_file_direct_write(iocb, iter);
 #endif /* CONFIG_NFS_SWAP */
 }
 
@@ -960,8 +959,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
  * Note that O_APPEND is not supported for NFS direct writes, as there
  * is no atomic O_APPEND write facility in the NFS protocol.
  */
-ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
-                               loff_t pos)
+ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
 {
        ssize_t result = -EINVAL;
        struct file *file = iocb->ki_filp;
@@ -969,25 +967,16 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
        struct inode *inode = mapping->host;
        struct nfs_direct_req *dreq;
        struct nfs_lock_context *l_ctx;
-       loff_t end;
-       size_t count = iov_iter_count(iter);
-       end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
-
-       nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
+       loff_t pos, end;
 
        dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
-               file, count, (long long) pos);
+               file, iov_iter_count(iter), (long long) iocb->ki_pos);
 
-       result = generic_write_checks(file, &pos, &count, 0);
-       if (result)
-               goto out;
+       nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES,
+                     iov_iter_count(iter));
 
-       result = -EINVAL;
-       if ((ssize_t) count < 0)
-               goto out;
-       result = 0;
-       if (!count)
-               goto out;
+       pos = iocb->ki_pos;
+       end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT;
 
        mutex_lock(&inode->i_mutex);
 
@@ -1002,7 +991,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
                        goto out_unlock;
        }
 
-       task_io_account_write(count);
+       task_io_account_write(iov_iter_count(iter));
 
        result = -ENOMEM;
        dreq = nfs_direct_req_alloc();
@@ -1010,7 +999,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
                goto out_unlock;
 
        dreq->inode = inode;
-       dreq->bytes_left = count;
+       dreq->bytes_left = iov_iter_count(iter);
        dreq->io_start = pos;
        dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
        l_ctx = nfs_get_lock_context(dreq->ctx);
@@ -1050,7 +1039,6 @@ out_release:
        nfs_direct_req_release(dreq);
 out_unlock:
        mutex_unlock(&inode->i_mutex);
-out:
        return result;
 }
 
index f6a3adedf0270b7edabb23511746a14bbe6d000c..c40e4363e746eab4014991f326005aa27721b460 100644 (file)
@@ -170,7 +170,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
        struct inode *inode = file_inode(iocb->ki_filp);
        ssize_t result;
 
-       if (iocb->ki_filp->f_flags & O_DIRECT)
+       if (iocb->ki_flags & IOCB_DIRECT)
                return nfs_file_direct_read(iocb, to, iocb->ki_pos);
 
        dprintk("NFS: read(%pD2, %zu@%lu)\n",
@@ -674,17 +674,20 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
        unsigned long written = 0;
        ssize_t result;
        size_t count = iov_iter_count(from);
-       loff_t pos = iocb->ki_pos;
 
        result = nfs_key_timeout_notify(file, inode);
        if (result)
                return result;
 
-       if (file->f_flags & O_DIRECT)
-               return nfs_file_direct_write(iocb, from, pos);
+       if (iocb->ki_flags & IOCB_DIRECT) {
+               result = generic_write_checks(iocb, from);
+               if (result <= 0)
+                       return result;
+               return nfs_file_direct_write(iocb, from);
+       }
 
        dprintk("NFS: write(%pD2, %zu@%Ld)\n",
-               file, count, (long long) pos);
+               file, count, (long long) iocb->ki_pos);
 
        result = -EBUSY;
        if (IS_SWAPFILE(inode))
@@ -692,7 +695,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
        /*
         * O_APPEND implies that we must revalidate the file length.
         */
-       if (file->f_flags & O_APPEND) {
+       if (iocb->ki_flags & IOCB_APPEND) {
                result = nfs_revalidate_file_size(inode, file);
                if (result)
                        goto out;
index 568ecf0a880f1145aedb69a1d93241b5eba300e0..b8f5c63f77b2780b51d410ae4d3928ddf2bd1a76 100644 (file)
@@ -117,15 +117,15 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
 
 static void nfs_readpage_release(struct nfs_page *req)
 {
-       struct inode *d_inode = req->wb_context->dentry->d_inode;
+       struct inode *inode = req->wb_context->dentry->d_inode;
 
-       dprintk("NFS: read done (%s/%llu %d@%lld)\n", d_inode->i_sb->s_id,
-               (unsigned long long)NFS_FILEID(d_inode), req->wb_bytes,
+       dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
+               (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
                (long long)req_offset(req));
 
        if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
                if (PageUptodate(req->wb_page))
-                       nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
+                       nfs_readpage_to_fscache(inode, req->wb_page, 0);
 
                unlock_page(req->wb_page);
        }
index ab4987bc637f8b084298086cf48f00e31bfe1298..36f057fa8aa3be305ec0bb155a2924e8a9088957 100644 (file)
@@ -305,8 +305,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
 }
 
 static ssize_t
-nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
-               loff_t offset)
+nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -314,18 +313,17 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
        size_t count = iov_iter_count(iter);
        ssize_t size;
 
-       if (rw == WRITE)
+       if (iov_iter_rw(iter) == WRITE)
                return 0;
 
        /* Needs synchronization with the cleaner */
-       size = blockdev_direct_IO(rw, iocb, inode, iter, offset,
-                                 nilfs_get_block);
+       size = blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
-       if (unlikely((rw & WRITE) && size < 0)) {
+       if (unlikely(iov_iter_rw(iter) == WRITE && size < 0)) {
                loff_t isize = i_size_read(inode);
                loff_t end = offset + count;
 
index 840e95e3f1d271b63fc78049f827f459acf989ec..7bb487e663b478ed52633e43ecc94feb2a636a7f 100644 (file)
@@ -328,25 +328,25 @@ err_out:
        return err;
 }
 
-static ssize_t ntfs_prepare_file_for_write(struct file *file, loff_t *ppos,
-               size_t *count)
+static ssize_t ntfs_prepare_file_for_write(struct kiocb *iocb,
+               struct iov_iter *from)
 {
        loff_t pos;
        s64 end, ll;
        ssize_t err;
        unsigned long flags;
+       struct file *file = iocb->ki_filp;
        struct inode *vi = file_inode(file);
        ntfs_inode *base_ni, *ni = NTFS_I(vi);
        ntfs_volume *vol = ni->vol;
 
        ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
-                       "0x%llx, count 0x%lx.", vi->i_ino,
+                       "0x%llx, count 0x%zx.", vi->i_ino,
                        (unsigned)le32_to_cpu(ni->type),
-                       (unsigned long long)*ppos, (unsigned long)*count);
-       /* We can write back this queue in page reclaim. */
-       current->backing_dev_info = inode_to_bdi(vi);
-       err = generic_write_checks(file, ppos, count, S_ISBLK(vi->i_mode));
-       if (unlikely(err))
+                       (unsigned long long)iocb->ki_pos,
+                       iov_iter_count(from));
+       err = generic_write_checks(iocb, from);
+       if (unlikely(err <= 0))
                goto out;
        /*
         * All checks have passed.  Before we start doing any writing we want
@@ -379,8 +379,6 @@ static ssize_t ntfs_prepare_file_for_write(struct file *file, loff_t *ppos,
                err = -EOPNOTSUPP;
                goto out;
        }
-       if (*count == 0)
-               goto out;
        base_ni = ni;
        if (NInoAttr(ni))
                base_ni = ni->ext.base_ntfs_ino;
@@ -392,9 +390,9 @@ static ssize_t ntfs_prepare_file_for_write(struct file *file, loff_t *ppos,
         * cannot fail either so there is no need to check the return code.
         */
        file_update_time(file);
-       pos = *ppos;
+       pos = iocb->ki_pos;
        /* The first byte after the last cluster being written to. */
-       end = (pos + *count + vol->cluster_size_mask) &
+       end = (pos + iov_iter_count(from) + vol->cluster_size_mask) &
                        ~(u64)vol->cluster_size_mask;
        /*
         * If the write goes beyond the allocated size, extend the allocation
@@ -422,7 +420,7 @@ static ssize_t ntfs_prepare_file_for_write(struct file *file, loff_t *ppos,
                                                "partially extended.",
                                                vi->i_ino, (unsigned)
                                                le32_to_cpu(ni->type));
-                               *count = ll - pos;
+                               iov_iter_truncate(from, ll - pos);
                        }
                } else {
                        err = ll;
@@ -438,7 +436,7 @@ static ssize_t ntfs_prepare_file_for_write(struct file *file, loff_t *ppos,
                                                vi->i_ino, (unsigned)
                                                le32_to_cpu(ni->type),
                                                (int)-err);
-                               *count = ll - pos;
+                               iov_iter_truncate(from, ll - pos);
                        } else {
                                if (err != -ENOSPC)
                                        ntfs_error(vi->i_sb, "Cannot perform "
@@ -1929,61 +1927,37 @@ again:
        return written ? written : status;
 }
 
-/**
- * ntfs_file_write_iter_nolock - write data to a file
- * @iocb:      IO state structure (file, offset, etc.)
- * @from:      iov_iter with data to write
- *
- * Basically the same as __generic_file_write_iter() except that it ends
- * up calling ntfs_perform_write() instead of generic_perform_write() and that
- * O_DIRECT is not implemented.
- */
-static ssize_t ntfs_file_write_iter_nolock(struct kiocb *iocb,
-               struct iov_iter *from)
-{
-       struct file *file = iocb->ki_filp;
-       loff_t pos = iocb->ki_pos;
-       ssize_t written = 0;
-       ssize_t err;
-       size_t count = iov_iter_count(from);
-
-       err = ntfs_prepare_file_for_write(file, &pos, &count);
-       if (count && !err) {
-               iov_iter_truncate(from, count);
-               written = ntfs_perform_write(file, from, pos);
-               if (likely(written >= 0))
-                       iocb->ki_pos = pos + written;
-       }
-       current->backing_dev_info = NULL;
-       return written ? written : err;
-}
-
 /**
  * ntfs_file_write_iter - simple wrapper for ntfs_file_write_iter_nolock()
  * @iocb:      IO state structure
  * @from:      iov_iter with data to write
  *
  * Basically the same as generic_file_write_iter() except that it ends up
- * calling ntfs_file_write_iter_nolock() instead of
- * __generic_file_write_iter().
+ * up calling ntfs_perform_write() instead of generic_perform_write() and that
+ * O_DIRECT is not implemented.
  */
 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
        struct inode *vi = file_inode(file);
-       ssize_t ret;
+       ssize_t written = 0;
+       ssize_t err;
 
        mutex_lock(&vi->i_mutex);
-       ret = ntfs_file_write_iter_nolock(iocb, from);
+       /* We can write back this queue in page reclaim. */
+       current->backing_dev_info = inode_to_bdi(vi);
+       err = ntfs_prepare_file_for_write(iocb, from);
+       if (iov_iter_count(from) && !err)
+               written = ntfs_perform_write(file, from, iocb->ki_pos);
+       current->backing_dev_info = NULL;
        mutex_unlock(&vi->i_mutex);
-       if (ret > 0) {
-               ssize_t err;
-
-               err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+       if (likely(written > 0)) {
+               err = generic_write_sync(file, iocb->ki_pos, written);
                if (err < 0)
-                       ret = err;
+                       written = 0;
        }
-       return ret;
+       iocb->ki_pos += written;
+       return written ? written : err;
 }
 
 /**
index 8d2bc840c288743e97df31fe99bce0b00e34a1b6..f906a250da6addcd0d2cdc796f383a312fe80224 100644 (file)
@@ -855,10 +855,9 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
                ocfs2_inode_unlock(inode, 1);
        }
 
-       written = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev,
-                       iter, offset,
-                       ocfs2_direct_IO_get_blocks,
-                       ocfs2_dio_end_io, NULL, 0);
+       written = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
+                                      offset, ocfs2_direct_IO_get_blocks,
+                                      ocfs2_dio_end_io, NULL, 0);
        if (unlikely(written < 0)) {
                loff_t i_size = i_size_read(inode);
 
@@ -946,9 +945,7 @@ out:
        return ret;
 }
 
-static ssize_t ocfs2_direct_IO(int rw,
-                              struct kiocb *iocb,
-                              struct iov_iter *iter,
+static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                               loff_t offset)
 {
        struct file *file = iocb->ki_filp;
@@ -970,12 +967,11 @@ static ssize_t ocfs2_direct_IO(int rw,
        if (i_size_read(inode) <= offset && !full_coherency)
                return 0;
 
-       if (rw == READ)
-               return __blockdev_direct_IO(rw, iocb, inode,
-                                   inode->i_sb->s_bdev,
-                                   iter, offset,
-                                   ocfs2_direct_IO_get_blocks,
-                                   ocfs2_dio_end_io, NULL, 0);
+       if (iov_iter_rw(iter) == READ)
+               return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
+                                           iter, offset,
+                                           ocfs2_direct_IO_get_blocks,
+                                           ocfs2_dio_end_io, NULL, 0);
        else
                return ocfs2_direct_IO_write(iocb, iter, offset);
 }
index 8c48e989bebab4af26b8b4f1e84ec3900eae6c50..913fc250d85a178b569f4d8ddd31b96a539208e0 100644 (file)
@@ -2106,7 +2106,7 @@ out:
 }
 
 static int ocfs2_prepare_inode_for_write(struct file *file,
-                                        loff_t *ppos,
+                                        loff_t pos,
                                         size_t count,
                                         int appending,
                                         int *direct_io,
@@ -2115,7 +2115,7 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
        int ret = 0, meta_level = 0;
        struct dentry *dentry = file->f_path.dentry;
        struct inode *inode = dentry->d_inode;
-       loff_t saved_pos = 0, end;
+       loff_t end;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        int full_coherency = !(osb->s_mount_opt &
                OCFS2_MOUNT_COHERENCY_BUFFERED);
@@ -2155,23 +2155,16 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
                        }
                }
 
-               /* work on a copy of ppos until we're sure that we won't have
-                * to recalculate it due to relocking. */
-               if (appending)
-                       saved_pos = i_size_read(inode);
-               else
-                       saved_pos = *ppos;
-
-               end = saved_pos + count;
+               end = pos + count;
 
-               ret = ocfs2_check_range_for_refcount(inode, saved_pos, count);
+               ret = ocfs2_check_range_for_refcount(inode, pos, count);
                if (ret == 1) {
                        ocfs2_inode_unlock(inode, meta_level);
                        meta_level = -1;
 
                        ret = ocfs2_prepare_inode_for_refcount(inode,
                                                               file,
-                                                              saved_pos,
+                                                              pos,
                                                               count,
                                                               &meta_level);
                        if (has_refcount)
@@ -2227,7 +2220,7 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
                 * caller will have to retake some cluster
                 * locks and initiate the io as buffered.
                 */
-               ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
+               ret = ocfs2_check_range_for_holes(inode, pos, count);
                if (ret == 1) {
                        /*
                         * Fallback to old way if the feature bit is not set.
@@ -2242,12 +2235,9 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
                break;
        }
 
-       if (appending)
-               *ppos = saved_pos;
-
 out_unlock:
        trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
-                                           saved_pos, appending, count,
+                                           pos, appending, count,
                                            direct_io, has_refcount);
 
        if (meta_level >= 0)
@@ -2260,19 +2250,20 @@ out:
 static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
                                    struct iov_iter *from)
 {
-       int ret, direct_io, appending, rw_level, have_alloc_sem  = 0;
+       int direct_io, appending, rw_level, have_alloc_sem  = 0;
        int can_do_direct, has_refcount = 0;
        ssize_t written = 0;
-       size_t count = iov_iter_count(from);
-       loff_t old_size, *ppos = &iocb->ki_pos;
+       ssize_t ret;
+       size_t count = iov_iter_count(from), orig_count;
+       loff_t old_size;
        u32 old_clusters;
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
-       struct address_space *mapping = file->f_mapping;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        int full_coherency = !(osb->s_mount_opt &
                               OCFS2_MOUNT_COHERENCY_BUFFERED);
        int unaligned_dio = 0;
+       int dropped_dio = 0;
 
        trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
                (unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2283,8 +2274,8 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
        if (count == 0)
                return 0;
 
-       appending = file->f_flags & O_APPEND ? 1 : 0;
-       direct_io = file->f_flags & O_DIRECT ? 1 : 0;
+       appending = iocb->ki_flags & IOCB_APPEND ? 1 : 0;
+       direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
 
        mutex_lock(&inode->i_mutex);
 
@@ -2329,8 +2320,17 @@ relock:
                ocfs2_inode_unlock(inode, 1);
        }
 
+       orig_count = iov_iter_count(from);
+       ret = generic_write_checks(iocb, from);
+       if (ret <= 0) {
+               if (ret)
+                       mlog_errno(ret);
+               goto out;
+       }
+       count = ret;
+
        can_do_direct = direct_io;
-       ret = ocfs2_prepare_inode_for_write(file, ppos, count, appending,
+       ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, appending,
                                            &can_do_direct, &has_refcount);
        if (ret < 0) {
                mlog_errno(ret);
@@ -2338,7 +2338,7 @@ relock:
        }
 
        if (direct_io && !is_sync_kiocb(iocb))
-               unaligned_dio = ocfs2_is_io_unaligned(inode, count, *ppos);
+               unaligned_dio = ocfs2_is_io_unaligned(inode, count, iocb->ki_pos);
 
        /*
         * We can't complete the direct I/O as requested, fall back to
@@ -2351,6 +2351,9 @@ relock:
                rw_level = -1;
 
                direct_io = 0;
+               iocb->ki_flags &= ~IOCB_DIRECT;
+               iov_iter_reexpand(from, orig_count);
+               dropped_dio = 1;
                goto relock;
        }
 
@@ -2374,74 +2377,15 @@ relock:
        /* communicate with ocfs2_dio_end_io */
        ocfs2_iocb_set_rw_locked(iocb, rw_level);
 
-       ret = generic_write_checks(file, ppos, &count,
-                                  S_ISBLK(inode->i_mode));
-       if (ret)
-               goto out_dio;
-
-       iov_iter_truncate(from, count);
-       if (direct_io) {
-               loff_t endbyte;
-               ssize_t written_buffered;
-               written = generic_file_direct_write(iocb, from, *ppos);
-               if (written < 0 || written == count) {
-                       ret = written;
-                       goto out_dio;
-               }
-
-               /*
-                * for completing the rest of the request.
-                */
-               count -= written;
-               written_buffered = generic_perform_write(file, from, *ppos);
-               /*
-                * If generic_file_buffered_write() returned a synchronous error
-                * then we want to return the number of bytes which were
-                * direct-written, or the error code if that was zero. Note
-                * that this differs from normal direct-io semantics, which
-                * will return -EFOO even if some bytes were written.
-                */
-               if (written_buffered < 0) {
-                       ret = written_buffered;
-                       goto out_dio;
-               }
-
-               /* We need to ensure that the page cache pages are written to
-                * disk and invalidated to preserve the expected O_DIRECT
-                * semantics.
-                */
-               endbyte = *ppos + written_buffered - 1;
-               ret = filemap_write_and_wait_range(file->f_mapping, *ppos,
-                               endbyte);
-               if (ret == 0) {
-                       iocb->ki_pos = *ppos + written_buffered;
-                       written += written_buffered;
-                       invalidate_mapping_pages(mapping,
-                                       *ppos >> PAGE_CACHE_SHIFT,
-                                       endbyte >> PAGE_CACHE_SHIFT);
-               } else {
-                       /*
-                        * We don't know how much we wrote, so just return
-                        * the number of bytes which were direct-written
-                        */
-               }
-       } else {
-               current->backing_dev_info = inode_to_bdi(inode);
-               written = generic_perform_write(file, from, *ppos);
-               if (likely(written >= 0))
-                       iocb->ki_pos = *ppos + written;
-               current->backing_dev_info = NULL;
-       }
-
-out_dio:
+       written = __generic_file_write_iter(iocb, from);
        /* buffered aio wouldn't have proper lock coverage today */
-       BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
+       BUG_ON(written == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
 
        if (unlikely(written <= 0))
                goto no_sync;
 
-       if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
-           ((file->f_flags & O_DIRECT) && !direct_io)) {
+       if (((file->f_flags & O_DSYNC) && !direct_io) ||
+           IS_SYNC(inode) || dropped_dio) {
                ret = filemap_fdatawrite_range(file->f_mapping,
                                               iocb->ki_pos - written,
                                               iocb->ki_pos - 1);
@@ -2552,7 +2496,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
         * buffered reads protect themselves in ->readpage().  O_DIRECT reads
         * need locks to protect pending reads from racing with truncate.
         */
-       if (filp->f_flags & O_DIRECT) {
+       if (iocb->ki_flags & IOCB_DIRECT) {
                have_alloc_sem = 1;
                ocfs2_iocb_set_sem_locked(iocb);
 
@@ -2586,7 +2530,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
        trace_generic_file_aio_read_ret(ret);
 
        /* buffered aio wouldn't have proper lock coverage today */
-       BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
+       BUG_ON(ret == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
 
        /* see ocfs2_file_write_iter */
        if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
index 45d583c33879e18d89b3fc06d48bbd05a30b6ab3..819ef3faf1bb710678175de06a13f4dcf6e90d62 100644 (file)
@@ -477,7 +477,8 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t
 
        ret = filp->f_op->write_iter(&kiocb, &iter);
        BUG_ON(ret == -EIOCBQUEUED);
-       *ppos = kiocb.ki_pos;
+       if (ret > 0)
+               *ppos = kiocb.ki_pos;
        return ret;
 }
 
index 9312b7842e036f64ac02135102b445f0769e7702..742242b60972671c50926f9a0dd0db48c4e7aeca 100644 (file)
@@ -3278,22 +3278,22 @@ static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
  * We thank Mingming Cao for helping us understand in great detail what
  * to do in this section of the code.
  */
-static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
-                                 struct iov_iter *iter, loff_t offset)
+static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+                                 loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
+       ret = blockdev_direct_IO(iocb, inode, iter, offset,
                                 reiserfs_get_blocks_direct_io);
 
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
-       if (unlikely((rw & WRITE) && ret < 0)) {
+       if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
                loff_t isize = i_size_read(inode);
                loff_t end = offset + count;
 
index f77f7681288f790dc7b8b73fd4863f5e4a88688b..5dadad9960b9e25cb4698a184005ad1c82073535 100644 (file)
@@ -99,8 +99,7 @@ static int udf_adinicb_write_begin(struct file *file,
        return 0;
 }
 
-static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb,
-                                    struct iov_iter *iter,
+static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                                     loff_t offset)
 {
        /* Fallback to buffered I/O. */
@@ -120,21 +119,21 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        ssize_t retval;
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
-       int err, pos;
-       size_t count = iov_iter_count(from);
        struct udf_inode_info *iinfo = UDF_I(inode);
+       int err;
 
        mutex_lock(&inode->i_mutex);
+
+       retval = generic_write_checks(iocb, from);
+       if (retval <= 0)
+               goto out;
+
        down_write(&iinfo->i_data_sem);
        if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
-               if (file->f_flags & O_APPEND)
-                       pos = inode->i_size;
-               else
-                       pos = iocb->ki_pos;
+               loff_t end = iocb->ki_pos + iov_iter_count(from);
 
                if (inode->i_sb->s_blocksize <
-                               (udf_file_entry_alloc_offset(inode) +
-                                               pos + count)) {
+                               (udf_file_entry_alloc_offset(inode) + end)) {
                        err = udf_expand_file_adinicb(inode);
                        if (err) {
                                mutex_unlock(&inode->i_mutex);
@@ -142,16 +141,14 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                                return err;
                        }
                } else {
-                       if (pos + count > inode->i_size)
-                               iinfo->i_lenAlloc = pos + count;
-                       else
-                               iinfo->i_lenAlloc = inode->i_size;
+                       iinfo->i_lenAlloc = max(end, inode->i_size);
                        up_write(&iinfo->i_data_sem);
                }
        } else
                up_write(&iinfo->i_data_sem);
 
        retval = __generic_file_write_iter(iocb, from);
+out:
        mutex_unlock(&inode->i_mutex);
 
        if (retval > 0) {
index 9e3d780e5efffc373cb4055f8bcbfe2469b3134d..6afac3d561ac81f6f5861f86a0dc4eb2a7d1fca2 100644 (file)
@@ -214,8 +214,7 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
        return ret;
 }
 
-static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
-                            struct iov_iter *iter,
+static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                             loff_t offset)
 {
        struct file *file = iocb->ki_filp;
@@ -224,8 +223,8 @@ static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
        size_t count = iov_iter_count(iter);
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block);
-       if (unlikely(ret < 0 && (rw & WRITE)))
+       ret = blockdev_direct_IO(iocb, inode, iter, offset, udf_get_block);
+       if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
                udf_write_failed(mapping, offset + count);
        return ret;
 }
index 4f8cdc59bc38154b45f1adfd69c0a371df4394e3..1d8eef9cf0f509ac91f86f97ae7fe608e44bc849 100644 (file)
@@ -1495,7 +1495,6 @@ xfs_end_io_direct_write(
 
 STATIC ssize_t
 xfs_vm_direct_IO(
-       int                     rw,
        struct kiocb            *iocb,
        struct iov_iter         *iter,
        loff_t                  offset)
@@ -1503,15 +1502,14 @@ xfs_vm_direct_IO(
        struct inode            *inode = iocb->ki_filp->f_mapping->host;
        struct block_device     *bdev = xfs_find_bdev_for_inode(inode);
 
-       if (rw & WRITE) {
-               return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
-                                           offset, xfs_get_blocks_direct,
+       if (iov_iter_rw(iter) == WRITE) {
+               return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
+                                           xfs_get_blocks_direct,
                                            xfs_end_io_direct_write, NULL,
                                            DIO_ASYNC_EXTEND);
        }
-       return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
-                                   offset, xfs_get_blocks_direct,
-                                   NULL, NULL, 0);
+       return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
+                                   xfs_get_blocks_direct, NULL, NULL, 0);
 }
 
 /*
index 44856c3b9617c6f658f6ab9b05b5f08be23f2a01..1f12ad0a8585b3d0f0788cfe5b88b6ab662a1547 100644 (file)
@@ -279,7 +279,7 @@ xfs_file_read_iter(
 
        XFS_STATS_INC(xs_read_calls);
 
-       if (unlikely(file->f_flags & O_DIRECT))
+       if (unlikely(iocb->ki_flags & IOCB_DIRECT))
                ioflags |= XFS_IO_ISDIRECT;
        if (file->f_mode & FMODE_NOCMTIME)
                ioflags |= XFS_IO_INVIS;
@@ -544,18 +544,19 @@ xfs_zero_eof(
  */
 STATIC ssize_t
 xfs_file_aio_write_checks(
-       struct file             *file,
-       loff_t                  *pos,
-       size_t                  *count,
+       struct kiocb            *iocb,
+       struct iov_iter         *from,
        int                     *iolock)
 {
+       struct file             *file = iocb->ki_filp;
        struct inode            *inode = file->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
-       int                     error = 0;
+       ssize_t                 error = 0;
+       size_t                  count = iov_iter_count(from);
 
 restart:
-       error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
-       if (error)
+       error = generic_write_checks(iocb, from);
+       if (error <= 0)
                return error;
 
        error = xfs_break_layouts(inode, iolock);
@@ -569,16 +570,17 @@ restart:
         * iolock shared, we need to update it to exclusive which implies
         * having to redo all checks before.
         */
-       if (*pos > i_size_read(inode)) {
+       if (iocb->ki_pos > i_size_read(inode)) {
                bool    zero = false;
 
                if (*iolock == XFS_IOLOCK_SHARED) {
                        xfs_rw_iunlock(ip, *iolock);
                        *iolock = XFS_IOLOCK_EXCL;
                        xfs_rw_ilock(ip, *iolock);
+                       iov_iter_reexpand(from, count);
                        goto restart;
                }
-               error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero);
+               error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
                if (error)
                        return error;
        }
@@ -678,10 +680,11 @@ xfs_file_dio_aio_write(
                xfs_rw_ilock(ip, iolock);
        }
 
-       ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
+       ret = xfs_file_aio_write_checks(iocb, from, &iolock);
        if (ret)
                goto out;
-       iov_iter_truncate(from, count);
+       count = iov_iter_count(from);
+       pos = iocb->ki_pos;
 
        if (mapping->nrpages) {
                ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
@@ -734,24 +737,22 @@ xfs_file_buffered_aio_write(
        ssize_t                 ret;
        int                     enospc = 0;
        int                     iolock = XFS_IOLOCK_EXCL;
-       loff_t                  pos = iocb->ki_pos;
-       size_t                  count = iov_iter_count(from);
 
        xfs_rw_ilock(ip, iolock);
 
-       ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
+       ret = xfs_file_aio_write_checks(iocb, from, &iolock);
        if (ret)
                goto out;
 
-       iov_iter_truncate(from, count);
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = inode_to_bdi(inode);
 
 write_retry:
-       trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
-       ret = generic_perform_write(file, from, pos);
+       trace_xfs_file_buffered_write(ip, iov_iter_count(from),
+                                     iocb->ki_pos, 0);
+       ret = generic_perform_write(file, from, iocb->ki_pos);
        if (likely(ret >= 0))
-               iocb->ki_pos = pos + ret;
+               iocb->ki_pos += ret;
 
        /*
         * If we hit a space limit, try to free up some lingering preallocated
@@ -803,7 +804,7 @@ xfs_file_write_iter(
        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
                return -EIO;
 
-       if (unlikely(file->f_flags & O_DIRECT))
+       if (unlikely(iocb->ki_flags & IOCB_DIRECT))
                ret = xfs_file_dio_aio_write(iocb, from);
        else
                ret = xfs_file_buffered_aio_write(iocb, from);
index d8358799c59411f5b715e9dfbb139b94b8dbca8f..df334cbacc6d0b8c2702c28e13e9a804314bb754 100644 (file)
@@ -404,26 +404,11 @@ static inline bool d_mountpoint(const struct dentry *dentry)
 /*
  * Directory cache entry type accessor functions.
  */
-static inline void __d_set_type(struct dentry *dentry, unsigned type)
-{
-       dentry->d_flags = (dentry->d_flags & ~DCACHE_ENTRY_TYPE) | type;
-}
-
-static inline void __d_clear_type(struct dentry *dentry)
-{
-       __d_set_type(dentry, DCACHE_MISS_TYPE);
-}
-
-static inline void d_set_type(struct dentry *dentry, unsigned type)
-{
-       spin_lock(&dentry->d_lock);
-       __d_set_type(dentry, type);
-       spin_unlock(&dentry->d_lock);
-}
-
 static inline unsigned __d_entry_type(const struct dentry *dentry)
 {
-       return dentry->d_flags & DCACHE_ENTRY_TYPE;
+       unsigned type = READ_ONCE(dentry->d_flags);
+       smp_rmb();
+       return type & DCACHE_ENTRY_TYPE;
 }
 
 static inline bool d_is_miss(const struct dentry *dentry)
@@ -482,6 +467,44 @@ static inline bool d_is_positive(const struct dentry *dentry)
        return !d_is_negative(dentry);
 }
 
+/**
+ * d_really_is_negative - Determine if a dentry is really negative (ignoring fallthroughs)
+ * @dentry: The dentry in question
+ *
+ * Returns true if the dentry represents either an absent name or a name that
+ * doesn't map to an inode (ie. ->d_inode is NULL).  The dentry could represent
+ * a true miss, a whiteout that isn't represented by a 0,0 chardev or a
+ * fallthrough marker in an opaque directory.
+ *
+ * Note!  (1) This should be used *only* by a filesystem to examine its own
+ * dentries.  It should not be used to look at some other filesystem's
+ * dentries.  (2) It should also be used in combination with d_inode() to get
+ * the inode.  (3) The dentry may have something attached to ->d_lower and the
+ * type field of the flags may be set to something other than miss or whiteout.
+ */
+static inline bool d_really_is_negative(const struct dentry *dentry)
+{
+       return dentry->d_inode == NULL;
+}
+
+/**
+ * d_really_is_positive - Determine if a dentry is really positive (ignoring fallthroughs)
+ * @dentry: The dentry in question
+ *
+ * Returns true if the dentry represents a name that maps to an inode
+ * (ie. ->d_inode is not NULL).  The dentry might still represent a whiteout if
+ * that is represented on medium as a 0,0 chardev.
+ *
+ * Note!  (1) This should be used *only* by a filesystem to examine its own
+ * dentries.  It should not be used to look at some other filesystem's
+ * dentries.  (2) It should also be used in combination with d_inode() to get
+ * the inode.
+ */
+static inline bool d_really_is_positive(const struct dentry *dentry)
+{
+       return dentry->d_inode != NULL;
+}
+
 extern void d_set_fallthru(struct dentry *dentry);
 
 static inline bool d_is_fallthru(const struct dentry *dentry)
index f4fc60727b8da5a1c0e5e0dc1085013949a8c2d8..f4d63544a791a0d17e5d4b0167781a56f1e8adba 100644 (file)
@@ -315,6 +315,8 @@ struct address_space;
 struct writeback_control;
 
 #define IOCB_EVENTFD           (1 << 0)
+#define IOCB_APPEND            (1 << 1)
+#define IOCB_DIRECT            (1 << 2)
 
 struct kiocb {
        struct file             *ki_filp;
@@ -329,10 +331,13 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
        return kiocb->ki_complete == NULL;
 }
 
+static inline int iocb_flags(struct file *file);
+
 static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
 {
        *kiocb = (struct kiocb) {
                .ki_filp = filp,
+               .ki_flags = iocb_flags(filp),
        };
 }
 
@@ -383,7 +388,7 @@ struct address_space_operations {
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, gfp_t);
        void (*freepage)(struct page *);
-       ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset);
+       ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
        /*
         * migrate the contents of a page to the specified target. If
         * migrate_mode is MIGRATE_ASYNC, it must not block.
@@ -2566,7 +2571,7 @@ extern int sb_min_blocksize(struct super_block *, int);
 
 extern int generic_file_mmap(struct file *, struct vm_area_struct *);
 extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
-int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
+extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
 extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
@@ -2609,8 +2614,8 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
 extern int generic_file_open(struct inode * inode, struct file * filp);
 extern int nonseekable_open(struct inode * inode, struct file * filp);
 
-ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *,
-               loff_t, get_block_t, dio_iodone_t, int flags);
+ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+                 get_block_t, dio_iodone_t, int flags);
 int dax_clear_blocks(struct inode *, sector_t block, long size);
 int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
 int dax_truncate_page(struct inode *, loff_t from, get_block_t);
@@ -2635,16 +2640,18 @@ enum {
 
 void dio_end_io(struct bio *bio, int error);
 
-ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, struct iov_iter *iter, loff_t offset,
-       get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags);
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+                            struct block_device *bdev, struct iov_iter *iter,
+                            loff_t offset, get_block_t get_block,
+                            dio_iodone_t end_io, dio_submit_t submit_io,
+                            int flags);
 
-static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
-               struct inode *inode, struct iov_iter *iter, loff_t offset,
-               get_block_t get_block)
+static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
+                                        struct inode *inode,
+                                        struct iov_iter *iter, loff_t offset,
+                                        get_block_t get_block)
 {
-       return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+       return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
                                    offset, get_block, NULL, NULL,
                                    DIO_LOCKING | DIO_SKIP_HOLES);
 }
@@ -2777,6 +2784,16 @@ static inline bool io_is_direct(struct file *filp)
        return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
 }
 
+static inline int iocb_flags(struct file *file)
+{
+       int res = 0;
+       if (file->f_flags & O_APPEND)
+               res |= IOCB_APPEND;
+       if (io_is_direct(file))
+               res |= IOCB_DIRECT;
+       return res;
+}
+
 static inline ino_t parent_ino(struct dentry *dentry)
 {
        ino_t res;
index b01ccf371fdcaf229f07bddbd2f12f9bbb5c53b0..410abd172febee648d8f63cab2e00592773ddaed 100644 (file)
@@ -447,13 +447,12 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
 /*
  * linux/fs/nfs/direct.c
  */
-extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *, loff_t);
 extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
                        struct iov_iter *iter,
                        loff_t pos);
 extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
-                       struct iov_iter *iter,
-                       loff_t pos);
+                       struct iov_iter *iter);
 
 /*
  * linux/fs/nfs/dir.c
index 15f11fb9fff6feb5197f9501777e018f172dda4b..8b01e1c3c6146623759f122b72ff6b3b869a0617 100644 (file)
@@ -111,6 +111,14 @@ static inline bool iter_is_iovec(struct iov_iter *i)
        return !(i->type & (ITER_BVEC | ITER_KVEC));
 }
 
+/*
+ * Get one of READ or WRITE out of iter->type without any other flags OR'd in
+ * with it.
+ *
+ * The ?: is just for type safety.
+ */
+#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK)
+
 /*
  * Cap the iov_iter by given limit; note that the second argument is
  * *not* the new size - it's upper limit for such.  Passing it a value
index 12548d03c11de9e3f114897ddc031dbb525f550c..6bf5e42d560a46eea8e4916bd017d855033b8d65 100644 (file)
@@ -1693,7 +1693,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
        loff_t *ppos = &iocb->ki_pos;
        loff_t pos = *ppos;
 
-       if (io_is_direct(file)) {
+       if (iocb->ki_flags & IOCB_DIRECT) {
                struct address_space *mapping = file->f_mapping;
                struct inode *inode = mapping->host;
                size_t count = iov_iter_count(iter);
@@ -1706,7 +1706,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
                                        pos + count - 1);
                if (!retval) {
                        struct iov_iter data = *iter;
-                       retval = mapping->a_ops->direct_IO(READ, iocb, &data, pos);
+                       retval = mapping->a_ops->direct_IO(iocb, &data, pos);
                }
 
                if (retval > 0) {
@@ -2259,41 +2259,38 @@ EXPORT_SYMBOL(read_cache_page_gfp);
  * Returns appropriate error code that caller should return or
  * zero in case that write should be allowed.
  */
-inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
+inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
 {
+       struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
        unsigned long limit = rlimit(RLIMIT_FSIZE);
+       loff_t pos;
 
-        if (unlikely(*pos < 0))
-                return -EINVAL;
+       if (!iov_iter_count(from))
+               return 0;
 
-       if (!isblk) {
-               /* FIXME: this is for backwards compatibility with 2.4 */
-               if (file->f_flags & O_APPEND)
-                        *pos = i_size_read(inode);
+       /* FIXME: this is for backwards compatibility with 2.4 */
+       if (iocb->ki_flags & IOCB_APPEND)
+               iocb->ki_pos = i_size_read(inode);
 
-               if (limit != RLIM_INFINITY) {
-                       if (*pos >= limit) {
-                               send_sig(SIGXFSZ, current, 0);
-                               return -EFBIG;
-                       }
-                       if (*count > limit - (typeof(limit))*pos) {
-                               *count = limit - (typeof(limit))*pos;
-                       }
+       pos = iocb->ki_pos;
+
+       if (limit != RLIM_INFINITY) {
+               if (iocb->ki_pos >= limit) {
+                       send_sig(SIGXFSZ, current, 0);
+                       return -EFBIG;
                }
+               iov_iter_truncate(from, limit - (unsigned long)pos);
        }
 
        /*
         * LFS rule
         */
-       if (unlikely(*pos + *count > MAX_NON_LFS &&
+       if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS &&
                                !(file->f_flags & O_LARGEFILE))) {
-               if (*pos >= MAX_NON_LFS) {
+               if (pos >= MAX_NON_LFS)
                        return -EFBIG;
-               }
-               if (*count > MAX_NON_LFS - (unsigned long)*pos) {
-                       *count = MAX_NON_LFS - (unsigned long)*pos;
-               }
+               iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos);
        }
 
        /*
@@ -2303,34 +2300,11 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
         * exceeded without writing data we send a signal and return EFBIG.
         * Linus frestrict idea will clean these up nicely..
         */
-       if (likely(!isblk)) {
-               if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
-                       if (*count || *pos > inode->i_sb->s_maxbytes) {
-                               return -EFBIG;
-                       }
-                       /* zero-length writes at ->s_maxbytes are OK */
-               }
+       if (unlikely(pos >= inode->i_sb->s_maxbytes))
+               return -EFBIG;
 
-               if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
-                       *count = inode->i_sb->s_maxbytes - *pos;
-       } else {
-#ifdef CONFIG_BLOCK
-               loff_t isize;
-               if (bdev_read_only(I_BDEV(inode)))
-                       return -EPERM;
-               isize = i_size_read(inode);
-               if (*pos >= isize) {
-                       if (*count || *pos > isize)
-                               return -ENOSPC;
-               }
-
-               if (*pos + *count > isize)
-                       *count = isize - *pos;
-#else
-               return -EPERM;
-#endif
-       }
-       return 0;
+       iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos);
+       return iov_iter_count(from);
 }
 EXPORT_SYMBOL(generic_write_checks);
 
@@ -2394,7 +2368,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
        }
 
        data = *from;
-       written = mapping->a_ops->direct_IO(WRITE, iocb, &data, pos);
+       written = mapping->a_ops->direct_IO(iocb, &data, pos);
 
        /*
         * Finally, try again to invalidate clean pages which might have been
@@ -2556,23 +2530,12 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct file *file = iocb->ki_filp;
        struct address_space * mapping = file->f_mapping;
        struct inode    *inode = mapping->host;
-       loff_t          pos = iocb->ki_pos;
        ssize_t         written = 0;
        ssize_t         err;
        ssize_t         status;
-       size_t          count = iov_iter_count(from);
 
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = inode_to_bdi(inode);
-       err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
-       if (err)
-               goto out;
-
-       if (count == 0)
-               goto out;
-
-       iov_iter_truncate(from, count);
-
        err = file_remove_suid(file);
        if (err)
                goto out;
@@ -2581,10 +2544,10 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (err)
                goto out;
 
-       if (io_is_direct(file)) {
-               loff_t endbyte;
+       if (iocb->ki_flags & IOCB_DIRECT) {
+               loff_t pos, endbyte;
 
-               written = generic_file_direct_write(iocb, from, pos);
+               written = generic_file_direct_write(iocb, from, iocb->ki_pos);
                /*
                 * If the write stopped short of completing, fall back to
                 * buffered writes.  Some filesystems do this for writes to
@@ -2592,13 +2555,10 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                 * not succeed (even if it did, DAX does not handle dirty
                 * page-cache pages correctly).
                 */
-               if (written < 0 || written == count || IS_DAX(inode))
+               if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
                        goto out;
 
-               pos += written;
-               count -= written;
-
-               status = generic_perform_write(file, from, pos);
+               status = generic_perform_write(file, from, pos = iocb->ki_pos);
                /*
                 * If generic_perform_write() returned a synchronous error
                 * then we want to return the number of bytes which were
@@ -2610,15 +2570,15 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                        err = status;
                        goto out;
                }
-               iocb->ki_pos = pos + status;
                /*
                 * We need to ensure that the page cache pages are written to
                 * disk and invalidated to preserve the expected O_DIRECT
                 * semantics.
                 */
                endbyte = pos + status - 1;
-               err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
+               err = filemap_write_and_wait_range(mapping, pos, endbyte);
                if (err == 0) {
+                       iocb->ki_pos = endbyte + 1;
                        written += status;
                        invalidate_mapping_pages(mapping,
                                                 pos >> PAGE_CACHE_SHIFT,
@@ -2630,9 +2590,9 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                         */
                }
        } else {
-               written = generic_perform_write(file, from, pos);
-               if (likely(written >= 0))
-                       iocb->ki_pos = pos + written;
+               written = generic_perform_write(file, from, iocb->ki_pos);
+               if (likely(written > 0))
+                       iocb->ki_pos += written;
        }
 out:
        current->backing_dev_info = NULL;
@@ -2656,7 +2616,9 @@ ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        ssize_t ret;
 
        mutex_lock(&inode->i_mutex);
-       ret = __generic_file_write_iter(iocb, from);
+       ret = generic_write_checks(iocb, from);
+       if (ret > 0)
+               ret = __generic_file_write_iter(iocb, from);
        mutex_unlock(&inode->i_mutex);
 
        if (ret > 0) {
index a96c8562d83567466b6633dd169c84ff63418e66..6424869e275e2aa2d09debfa791ea08302ac68be 100644 (file)
@@ -277,9 +277,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
 
                set_page_writeback(page);
                unlock_page(page);
-               ret = mapping->a_ops->direct_IO(ITER_BVEC | WRITE,
-                                               &kiocb, &from,
-                                               kiocb.ki_pos);
+               ret = mapping->a_ops->direct_IO(&kiocb, &from, kiocb.ki_pos);
                if (ret == PAGE_SIZE) {
                        count_vm_event(PSWPOUT);
                        ret = 0;