2 * linux/fs/read_write.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/slab.h>
8 #include <linux/stat.h>
9 #include <linux/fcntl.h>
10 #include <linux/file.h>
11 #include <linux/uio.h>
12 #include <linux/fsnotify.h>
13 #include <linux/security.h>
14 #include <linux/export.h>
15 #include <linux/syscalls.h>
16 #include <linux/pagemap.h>
17 #include <linux/splice.h>
18 #include <linux/compat.h>
19 #include <linux/mount.h>
23 #include <linux/uaccess.h>
24 #include <asm/unistd.h>
26 const struct file_operations generic_ro_fops = {
27 .llseek = generic_file_llseek,
28 .read_iter = generic_file_read_iter,
29 .mmap = generic_file_readonly_mmap,
30 .splice_read = generic_file_splice_read,
33 EXPORT_SYMBOL(generic_ro_fops);
35 static inline int unsigned_offsets(struct file *file)
37 return file->f_mode & FMODE_UNSIGNED_OFFSET;
41 * vfs_setpos - update the file offset for lseek
42 * @file: file structure in question
43 * @offset: file offset to seek to
44 * @maxsize: maximum file size
46 * This is a low-level filesystem helper for updating the file offset to
47 * the value specified by @offset if the given offset is valid and it is
48 * not equal to the current file offset.
50 * Return the specified offset on success and -EINVAL on invalid offset.
52 loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
54 if (offset < 0 && !unsigned_offsets(file))
59 if (offset != file->f_pos) {
65 EXPORT_SYMBOL(vfs_setpos);
68 * generic_file_llseek_size - generic llseek implementation for regular files
69 * @file: file structure to seek on
70 * @offset: file offset to seek to
71 * @whence: type of seek
72 * @size: max size of this file in file system
73 * @eof: offset used for SEEK_END position
75 * This is a variant of generic_file_llseek that allows passing in a custom
76 * maximum file size and a custom EOF position, for e.g. hashed directories
79 * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
80 * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes.
81 * read/writes behave like SEEK_SET against seeks.
84 generic_file_llseek_size(struct file *file, loff_t offset, int whence,
85 loff_t maxsize, loff_t eof)
93 * Here we special-case the lseek(fd, 0, SEEK_CUR)
94 * position-querying operation. Avoid rewriting the "same"
95 * f_pos value back to the file because a concurrent read(),
96 * write() or lseek() might have altered it
101 * f_lock protects against read/modify/write race with other
102 * SEEK_CURs. Note that parallel writes and reads behave
105 spin_lock(&file->f_lock);
106 offset = vfs_setpos(file, file->f_pos + offset, maxsize);
107 spin_unlock(&file->f_lock);
111 * In the generic case the entire file is data, so as long as
112 * offset isn't at the end of the file then the offset is data.
119 * There is a virtual hole at the end of the file, so as long as
120 * offset isn't i_size or larger, return i_size.
128 return vfs_setpos(file, offset, maxsize);
130 EXPORT_SYMBOL(generic_file_llseek_size);
133 * generic_file_llseek - generic llseek implementation for regular files
134 * @file: file structure to seek on
135 * @offset: file offset to seek to
136 * @whence: type of seek
138 * This is a generic implemenation of ->llseek useable for all normal local
139 * filesystems. It just updates the file offset to the value specified by
140 * @offset and @whence.
142 loff_t generic_file_llseek(struct file *file, loff_t offset, int whence)
144 struct inode *inode = file->f_mapping->host;
146 return generic_file_llseek_size(file, offset, whence,
147 inode->i_sb->s_maxbytes,
150 EXPORT_SYMBOL(generic_file_llseek);
153 * fixed_size_llseek - llseek implementation for fixed-sized devices
154 * @file: file structure to seek on
155 * @offset: file offset to seek to
156 * @whence: type of seek
157 * @size: size of the file
160 loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size)
163 case SEEK_SET: case SEEK_CUR: case SEEK_END:
164 return generic_file_llseek_size(file, offset, whence,
170 EXPORT_SYMBOL(fixed_size_llseek);
173 * no_seek_end_llseek - llseek implementation for fixed-sized devices
174 * @file: file structure to seek on
175 * @offset: file offset to seek to
176 * @whence: type of seek
179 loff_t no_seek_end_llseek(struct file *file, loff_t offset, int whence)
182 case SEEK_SET: case SEEK_CUR:
183 return generic_file_llseek_size(file, offset, whence,
189 EXPORT_SYMBOL(no_seek_end_llseek);
192 * no_seek_end_llseek_size - llseek implementation for fixed-sized devices
193 * @file: file structure to seek on
194 * @offset: file offset to seek to
195 * @whence: type of seek
196 * @size: maximal offset allowed
199 loff_t no_seek_end_llseek_size(struct file *file, loff_t offset, int whence, loff_t size)
202 case SEEK_SET: case SEEK_CUR:
203 return generic_file_llseek_size(file, offset, whence,
209 EXPORT_SYMBOL(no_seek_end_llseek_size);
212 * noop_llseek - No Operation Performed llseek implementation
213 * @file: file structure to seek on
214 * @offset: file offset to seek to
215 * @whence: type of seek
217 * This is an implementation of ->llseek useable for the rare special case when
218 * userspace expects the seek to succeed but the (device) file is actually not
219 * able to perform the seek. In this case you use noop_llseek() instead of
220 * falling back to the default implementation of ->llseek.
222 loff_t noop_llseek(struct file *file, loff_t offset, int whence)
226 EXPORT_SYMBOL(noop_llseek);
228 loff_t no_llseek(struct file *file, loff_t offset, int whence)
232 EXPORT_SYMBOL(no_llseek);
234 loff_t default_llseek(struct file *file, loff_t offset, int whence)
236 struct inode *inode = file_inode(file);
242 offset += i_size_read(inode);
246 retval = file->f_pos;
249 offset += file->f_pos;
253 * In the generic case the entire file is data, so as
254 * long as offset isn't at the end of the file then the
257 if (offset >= inode->i_size) {
264 * There is a virtual hole at the end of the file, so
265 * as long as offset isn't i_size or larger, return
268 if (offset >= inode->i_size) {
272 offset = inode->i_size;
276 if (offset >= 0 || unsigned_offsets(file)) {
277 if (offset != file->f_pos) {
278 file->f_pos = offset;
287 EXPORT_SYMBOL(default_llseek);
289 loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
291 loff_t (*fn)(struct file *, loff_t, int);
294 if (file->f_mode & FMODE_LSEEK) {
295 if (file->f_op->llseek)
296 fn = file->f_op->llseek;
298 return fn(file, offset, whence);
300 EXPORT_SYMBOL(vfs_llseek);
302 SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
305 struct fd f = fdget_pos(fd);
310 if (whence <= SEEK_MAX) {
311 loff_t res = vfs_llseek(f.file, offset, whence);
313 if (res != (loff_t)retval)
314 retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
321 COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence)
323 return sys_lseek(fd, offset, whence);
327 #ifdef __ARCH_WANT_SYS_LLSEEK
328 SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
329 unsigned long, offset_low, loff_t __user *, result,
330 unsigned int, whence)
333 struct fd f = fdget_pos(fd);
340 if (whence > SEEK_MAX)
343 offset = vfs_llseek(f.file, ((loff_t) offset_high << 32) | offset_low,
346 retval = (int)offset;
349 if (!copy_to_user(result, &offset, sizeof(offset)))
358 ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos)
363 if (!file->f_op->read_iter)
366 init_sync_kiocb(&kiocb, file);
367 kiocb.ki_pos = *ppos;
370 ret = call_read_iter(file, &kiocb, iter);
371 BUG_ON(ret == -EIOCBQUEUED);
373 *ppos = kiocb.ki_pos;
376 EXPORT_SYMBOL(vfs_iter_read);
378 ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos)
383 if (!file->f_op->write_iter)
386 init_sync_kiocb(&kiocb, file);
387 kiocb.ki_pos = *ppos;
390 ret = call_write_iter(file, &kiocb, iter);
391 BUG_ON(ret == -EIOCBQUEUED);
393 *ppos = kiocb.ki_pos;
396 EXPORT_SYMBOL(vfs_iter_write);
398 int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
402 int retval = -EINVAL;
404 inode = file_inode(file);
405 if (unlikely((ssize_t) count < 0))
408 if (unlikely(pos < 0)) {
409 if (!unsigned_offsets(file))
411 if (count >= -pos) /* both values are in 0..LLONG_MAX */
413 } else if (unlikely((loff_t) (pos + count) < 0)) {
414 if (!unsigned_offsets(file))
418 if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
419 retval = locks_mandatory_area(inode, file, pos, pos + count - 1,
420 read_write == READ ? F_RDLCK : F_WRLCK);
424 return security_file_permission(file,
425 read_write == READ ? MAY_READ : MAY_WRITE);
428 static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
430 struct iovec iov = { .iov_base = buf, .iov_len = len };
432 struct iov_iter iter;
435 init_sync_kiocb(&kiocb, filp);
436 kiocb.ki_pos = *ppos;
437 iov_iter_init(&iter, READ, &iov, 1, len);
439 ret = call_read_iter(filp, &kiocb, &iter);
440 BUG_ON(ret == -EIOCBQUEUED);
441 *ppos = kiocb.ki_pos;
445 ssize_t __vfs_read(struct file *file, char __user *buf, size_t count,
448 if (file->f_op->read)
449 return file->f_op->read(file, buf, count, pos);
450 else if (file->f_op->read_iter)
451 return new_sync_read(file, buf, count, pos);
455 EXPORT_SYMBOL(__vfs_read);
457 ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
461 if (!(file->f_mode & FMODE_READ))
463 if (!(file->f_mode & FMODE_CAN_READ))
465 if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
468 ret = rw_verify_area(READ, file, pos, count);
470 if (count > MAX_RW_COUNT)
471 count = MAX_RW_COUNT;
472 ret = __vfs_read(file, buf, count, pos);
474 fsnotify_access(file);
475 add_rchar(current, ret);
483 EXPORT_SYMBOL(vfs_read);
485 static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
487 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
489 struct iov_iter iter;
492 init_sync_kiocb(&kiocb, filp);
493 kiocb.ki_pos = *ppos;
494 iov_iter_init(&iter, WRITE, &iov, 1, len);
496 ret = call_write_iter(filp, &kiocb, &iter);
497 BUG_ON(ret == -EIOCBQUEUED);
499 *ppos = kiocb.ki_pos;
503 ssize_t __vfs_write(struct file *file, const char __user *p, size_t count,
506 if (file->f_op->write)
507 return file->f_op->write(file, p, count, pos);
508 else if (file->f_op->write_iter)
509 return new_sync_write(file, p, count, pos);
513 EXPORT_SYMBOL(__vfs_write);
515 ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
518 const char __user *p;
521 if (!(file->f_mode & FMODE_CAN_WRITE))
526 p = (__force const char __user *)buf;
527 if (count > MAX_RW_COUNT)
528 count = MAX_RW_COUNT;
529 ret = __vfs_write(file, p, count, pos);
532 fsnotify_modify(file);
533 add_wchar(current, ret);
539 EXPORT_SYMBOL(__kernel_write);
541 ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
545 if (!(file->f_mode & FMODE_WRITE))
547 if (!(file->f_mode & FMODE_CAN_WRITE))
549 if (unlikely(!access_ok(VERIFY_READ, buf, count)))
552 ret = rw_verify_area(WRITE, file, pos, count);
554 if (count > MAX_RW_COUNT)
555 count = MAX_RW_COUNT;
556 file_start_write(file);
557 ret = __vfs_write(file, buf, count, pos);
559 fsnotify_modify(file);
560 add_wchar(current, ret);
563 file_end_write(file);
569 EXPORT_SYMBOL(vfs_write);
571 static inline loff_t file_pos_read(struct file *file)
576 static inline void file_pos_write(struct file *file, loff_t pos)
581 SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
583 struct fd f = fdget_pos(fd);
584 ssize_t ret = -EBADF;
587 loff_t pos = file_pos_read(f.file);
588 ret = vfs_read(f.file, buf, count, &pos);
590 file_pos_write(f.file, pos);
596 SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
599 struct fd f = fdget_pos(fd);
600 ssize_t ret = -EBADF;
603 loff_t pos = file_pos_read(f.file);
604 ret = vfs_write(f.file, buf, count, &pos);
606 file_pos_write(f.file, pos);
613 SYSCALL_DEFINE4(pread64, unsigned int, fd, char __user *, buf,
614 size_t, count, loff_t, pos)
617 ssize_t ret = -EBADF;
625 if (f.file->f_mode & FMODE_PREAD)
626 ret = vfs_read(f.file, buf, count, &pos);
633 SYSCALL_DEFINE4(pwrite64, unsigned int, fd, const char __user *, buf,
634 size_t, count, loff_t, pos)
637 ssize_t ret = -EBADF;
645 if (f.file->f_mode & FMODE_PWRITE)
646 ret = vfs_write(f.file, buf, count, &pos);
654 * Reduce an iovec's length in-place. Return the resulting number of segments
656 unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
658 unsigned long seg = 0;
661 while (seg < nr_segs) {
663 if (len + iov->iov_len >= to) {
664 iov->iov_len = to - len;
672 EXPORT_SYMBOL(iov_shorten);
674 static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
675 loff_t *ppos, int type, int flags)
680 if (flags & ~(RWF_HIPRI | RWF_DSYNC | RWF_SYNC))
683 init_sync_kiocb(&kiocb, filp);
684 if (flags & RWF_HIPRI)
685 kiocb.ki_flags |= IOCB_HIPRI;
686 if (flags & RWF_DSYNC)
687 kiocb.ki_flags |= IOCB_DSYNC;
688 if (flags & RWF_SYNC)
689 kiocb.ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
690 kiocb.ki_pos = *ppos;
693 ret = call_read_iter(filp, &kiocb, iter);
695 ret = call_write_iter(filp, &kiocb, iter);
696 BUG_ON(ret == -EIOCBQUEUED);
697 *ppos = kiocb.ki_pos;
701 /* Do it by hand, with file-ops */
702 static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
703 loff_t *ppos, int type, int flags)
707 if (flags & ~RWF_HIPRI)
710 while (iov_iter_count(iter)) {
711 struct iovec iovec = iov_iter_iovec(iter);
715 nr = filp->f_op->read(filp, iovec.iov_base,
716 iovec.iov_len, ppos);
718 nr = filp->f_op->write(filp, iovec.iov_base,
719 iovec.iov_len, ppos);
728 if (nr != iovec.iov_len)
730 iov_iter_advance(iter, nr);
736 /* A write operation does a read from user space and vice versa */
737 #define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
740 * rw_copy_check_uvector() - Copy an array of &struct iovec from userspace
741 * into the kernel and check that it is valid.
743 * @type: One of %CHECK_IOVEC_ONLY, %READ, or %WRITE.
744 * @uvector: Pointer to the userspace array.
745 * @nr_segs: Number of elements in userspace array.
746 * @fast_segs: Number of elements in @fast_pointer.
747 * @fast_pointer: Pointer to (usually small on-stack) kernel array.
748 * @ret_pointer: (output parameter) Pointer to a variable that will point to
749 * either @fast_pointer, a newly allocated kernel array, or NULL,
750 * depending on which array was used.
752 * This function copies an array of &struct iovec of @nr_segs from
753 * userspace into the kernel and checks that each element is valid (e.g.
754 * it does not point to a kernel address or cause overflow by being too
757 * As an optimization, the caller may provide a pointer to a small
758 * on-stack array in @fast_pointer, typically %UIO_FASTIOV elements long
759 * (the size of this array, or 0 if unused, should be given in @fast_segs).
761 * @ret_pointer will always point to the array that was used, so the
762 * caller must take care not to call kfree() on it e.g. in case the
763 * @fast_pointer array was used and it was allocated on the stack.
765 * Return: The total number of bytes covered by the iovec array on success
766 * or a negative error code on error.
768 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
769 unsigned long nr_segs, unsigned long fast_segs,
770 struct iovec *fast_pointer,
771 struct iovec **ret_pointer)
775 struct iovec *iov = fast_pointer;
778 * SuS says "The readv() function *may* fail if the iovcnt argument
779 * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
780 * traditionally returned zero for zero segments, so...
788 * First get the "struct iovec" from user memory and
789 * verify all the pointers
791 if (nr_segs > UIO_MAXIOV) {
795 if (nr_segs > fast_segs) {
796 iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
802 if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) {
808 * According to the Single Unix Specification we should return EINVAL
809 * if an element length is < 0 when cast to ssize_t or if the
810 * total length would overflow the ssize_t return value of the
813 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
817 for (seg = 0; seg < nr_segs; seg++) {
818 void __user *buf = iov[seg].iov_base;
819 ssize_t len = (ssize_t)iov[seg].iov_len;
821 /* see if we we're about to use an invalid len or if
822 * it's about to overflow ssize_t */
828 && unlikely(!access_ok(vrfy_dir(type), buf, len))) {
832 if (len > MAX_RW_COUNT - ret) {
833 len = MAX_RW_COUNT - ret;
834 iov[seg].iov_len = len;
843 static ssize_t __do_readv_writev(int type, struct file *file,
844 struct iov_iter *iter, loff_t *pos, int flags)
849 tot_len = iov_iter_count(iter);
852 ret = rw_verify_area(type, file, pos, tot_len);
857 file_start_write(file);
859 if ((type == READ && file->f_op->read_iter) ||
860 (type == WRITE && file->f_op->write_iter))
861 ret = do_iter_readv_writev(file, iter, pos, type, flags);
863 ret = do_loop_readv_writev(file, iter, pos, type, flags);
866 file_end_write(file);
869 if ((ret + (type == READ)) > 0) {
871 fsnotify_access(file);
873 fsnotify_modify(file);
878 static ssize_t do_readv_writev(int type, struct file *file,
879 const struct iovec __user *uvector,
880 unsigned long nr_segs, loff_t *pos,
883 struct iovec iovstack[UIO_FASTIOV];
884 struct iovec *iov = iovstack;
885 struct iov_iter iter;
888 ret = import_iovec(type, uvector, nr_segs,
889 ARRAY_SIZE(iovstack), &iov, &iter);
893 ret = __do_readv_writev(type, file, &iter, pos, flags);
899 ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
900 unsigned long vlen, loff_t *pos, int flags)
902 if (!(file->f_mode & FMODE_READ))
904 if (!(file->f_mode & FMODE_CAN_READ))
907 return do_readv_writev(READ, file, vec, vlen, pos, flags);
910 EXPORT_SYMBOL(vfs_readv);
912 ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
913 unsigned long vlen, loff_t *pos, int flags)
915 if (!(file->f_mode & FMODE_WRITE))
917 if (!(file->f_mode & FMODE_CAN_WRITE))
920 return do_readv_writev(WRITE, file, vec, vlen, pos, flags);
923 EXPORT_SYMBOL(vfs_writev);
925 static ssize_t do_readv(unsigned long fd, const struct iovec __user *vec,
926 unsigned long vlen, int flags)
928 struct fd f = fdget_pos(fd);
929 ssize_t ret = -EBADF;
932 loff_t pos = file_pos_read(f.file);
933 ret = vfs_readv(f.file, vec, vlen, &pos, flags);
935 file_pos_write(f.file, pos);
940 add_rchar(current, ret);
945 static ssize_t do_writev(unsigned long fd, const struct iovec __user *vec,
946 unsigned long vlen, int flags)
948 struct fd f = fdget_pos(fd);
949 ssize_t ret = -EBADF;
952 loff_t pos = file_pos_read(f.file);
953 ret = vfs_writev(f.file, vec, vlen, &pos, flags);
955 file_pos_write(f.file, pos);
960 add_wchar(current, ret);
965 static inline loff_t pos_from_hilo(unsigned long high, unsigned long low)
967 #define HALF_LONG_BITS (BITS_PER_LONG / 2)
968 return (((loff_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low;
971 static ssize_t do_preadv(unsigned long fd, const struct iovec __user *vec,
972 unsigned long vlen, loff_t pos, int flags)
975 ssize_t ret = -EBADF;
983 if (f.file->f_mode & FMODE_PREAD)
984 ret = vfs_readv(f.file, vec, vlen, &pos, flags);
989 add_rchar(current, ret);
994 static ssize_t do_pwritev(unsigned long fd, const struct iovec __user *vec,
995 unsigned long vlen, loff_t pos, int flags)
998 ssize_t ret = -EBADF;
1006 if (f.file->f_mode & FMODE_PWRITE)
1007 ret = vfs_writev(f.file, vec, vlen, &pos, flags);
1012 add_wchar(current, ret);
1017 SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
1018 unsigned long, vlen)
1020 return do_readv(fd, vec, vlen, 0);
1023 SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
1024 unsigned long, vlen)
1026 return do_writev(fd, vec, vlen, 0);
1029 SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
1030 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
1032 loff_t pos = pos_from_hilo(pos_h, pos_l);
1034 return do_preadv(fd, vec, vlen, pos, 0);
1037 SYSCALL_DEFINE6(preadv2, unsigned long, fd, const struct iovec __user *, vec,
1038 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h,
1041 loff_t pos = pos_from_hilo(pos_h, pos_l);
1044 return do_readv(fd, vec, vlen, flags);
1046 return do_preadv(fd, vec, vlen, pos, flags);
1049 SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
1050 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
1052 loff_t pos = pos_from_hilo(pos_h, pos_l);
1054 return do_pwritev(fd, vec, vlen, pos, 0);
1057 SYSCALL_DEFINE6(pwritev2, unsigned long, fd, const struct iovec __user *, vec,
1058 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h,
1061 loff_t pos = pos_from_hilo(pos_h, pos_l);
1064 return do_writev(fd, vec, vlen, flags);
1066 return do_pwritev(fd, vec, vlen, pos, flags);
1069 #ifdef CONFIG_COMPAT
1071 static ssize_t compat_do_readv_writev(int type, struct file *file,
1072 const struct compat_iovec __user *uvector,
1073 unsigned long nr_segs, loff_t *pos,
1076 struct iovec iovstack[UIO_FASTIOV];
1077 struct iovec *iov = iovstack;
1078 struct iov_iter iter;
1081 ret = compat_import_iovec(type, uvector, nr_segs,
1082 UIO_FASTIOV, &iov, &iter);
1086 ret = __do_readv_writev(type, file, &iter, pos, flags);
1092 static size_t compat_readv(struct file *file,
1093 const struct compat_iovec __user *vec,
1094 unsigned long vlen, loff_t *pos, int flags)
1096 ssize_t ret = -EBADF;
1098 if (!(file->f_mode & FMODE_READ))
1102 if (!(file->f_mode & FMODE_CAN_READ))
1105 ret = compat_do_readv_writev(READ, file, vec, vlen, pos, flags);
1109 add_rchar(current, ret);
1114 static size_t do_compat_readv(compat_ulong_t fd,
1115 const struct compat_iovec __user *vec,
1116 compat_ulong_t vlen, int flags)
1118 struct fd f = fdget_pos(fd);
1124 pos = f.file->f_pos;
1125 ret = compat_readv(f.file, vec, vlen, &pos, flags);
1127 f.file->f_pos = pos;
1133 COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
1134 const struct compat_iovec __user *,vec,
1135 compat_ulong_t, vlen)
1137 return do_compat_readv(fd, vec, vlen, 0);
1140 static long do_compat_preadv64(unsigned long fd,
1141 const struct compat_iovec __user *vec,
1142 unsigned long vlen, loff_t pos, int flags)
1153 if (f.file->f_mode & FMODE_PREAD)
1154 ret = compat_readv(f.file, vec, vlen, &pos, flags);
1159 #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
1160 COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
1161 const struct compat_iovec __user *,vec,
1162 unsigned long, vlen, loff_t, pos)
1164 return do_compat_preadv64(fd, vec, vlen, pos, 0);
1168 COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
1169 const struct compat_iovec __user *,vec,
1170 compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
1172 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1174 return do_compat_preadv64(fd, vec, vlen, pos, 0);
1177 #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
1178 COMPAT_SYSCALL_DEFINE5(preadv64v2, unsigned long, fd,
1179 const struct compat_iovec __user *,vec,
1180 unsigned long, vlen, loff_t, pos, int, flags)
1182 return do_compat_preadv64(fd, vec, vlen, pos, flags);
1186 COMPAT_SYSCALL_DEFINE6(preadv2, compat_ulong_t, fd,
1187 const struct compat_iovec __user *,vec,
1188 compat_ulong_t, vlen, u32, pos_low, u32, pos_high,
1191 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1194 return do_compat_readv(fd, vec, vlen, flags);
1196 return do_compat_preadv64(fd, vec, vlen, pos, flags);
1199 static size_t compat_writev(struct file *file,
1200 const struct compat_iovec __user *vec,
1201 unsigned long vlen, loff_t *pos, int flags)
1203 ssize_t ret = -EBADF;
1205 if (!(file->f_mode & FMODE_WRITE))
1209 if (!(file->f_mode & FMODE_CAN_WRITE))
1212 ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0);
1216 add_wchar(current, ret);
1221 static size_t do_compat_writev(compat_ulong_t fd,
1222 const struct compat_iovec __user* vec,
1223 compat_ulong_t vlen, int flags)
1225 struct fd f = fdget_pos(fd);
1231 pos = f.file->f_pos;
1232 ret = compat_writev(f.file, vec, vlen, &pos, flags);
1234 f.file->f_pos = pos;
1239 COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
1240 const struct compat_iovec __user *, vec,
1241 compat_ulong_t, vlen)
1243 return do_compat_writev(fd, vec, vlen, 0);
1246 static long do_compat_pwritev64(unsigned long fd,
1247 const struct compat_iovec __user *vec,
1248 unsigned long vlen, loff_t pos, int flags)
1259 if (f.file->f_mode & FMODE_PWRITE)
1260 ret = compat_writev(f.file, vec, vlen, &pos, flags);
1265 #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
1266 COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
1267 const struct compat_iovec __user *,vec,
1268 unsigned long, vlen, loff_t, pos)
1270 return do_compat_pwritev64(fd, vec, vlen, pos, 0);
1274 COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
1275 const struct compat_iovec __user *,vec,
1276 compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
1278 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1280 return do_compat_pwritev64(fd, vec, vlen, pos, 0);
1283 #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
1284 COMPAT_SYSCALL_DEFINE5(pwritev64v2, unsigned long, fd,
1285 const struct compat_iovec __user *,vec,
1286 unsigned long, vlen, loff_t, pos, int, flags)
1288 return do_compat_pwritev64(fd, vec, vlen, pos, flags);
1292 COMPAT_SYSCALL_DEFINE6(pwritev2, compat_ulong_t, fd,
1293 const struct compat_iovec __user *,vec,
1294 compat_ulong_t, vlen, u32, pos_low, u32, pos_high, int, flags)
1296 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1299 return do_compat_writev(fd, vec, vlen, flags);
1301 return do_compat_pwritev64(fd, vec, vlen, pos, flags);
1306 static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
1307 size_t count, loff_t max)
1310 struct inode *in_inode, *out_inode;
1317 * Get input file, and verify that it is ok..
1323 if (!(in.file->f_mode & FMODE_READ))
1327 pos = in.file->f_pos;
1330 if (!(in.file->f_mode & FMODE_PREAD))
1333 retval = rw_verify_area(READ, in.file, &pos, count);
1336 if (count > MAX_RW_COUNT)
1337 count = MAX_RW_COUNT;
1340 * Get output file, and verify that it is ok..
1343 out = fdget(out_fd);
1346 if (!(out.file->f_mode & FMODE_WRITE))
1349 in_inode = file_inode(in.file);
1350 out_inode = file_inode(out.file);
1351 out_pos = out.file->f_pos;
1352 retval = rw_verify_area(WRITE, out.file, &out_pos, count);
1357 max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
1359 if (unlikely(pos + count > max)) {
1360 retval = -EOVERFLOW;
1369 * We need to debate whether we can enable this or not. The
1370 * man page documents EAGAIN return for the output at least,
1371 * and the application is arguably buggy if it doesn't expect
1372 * EAGAIN on a non-blocking file descriptor.
1374 if (in.file->f_flags & O_NONBLOCK)
1375 fl = SPLICE_F_NONBLOCK;
1377 file_start_write(out.file);
1378 retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl);
1379 file_end_write(out.file);
1382 add_rchar(current, retval);
1383 add_wchar(current, retval);
1384 fsnotify_access(in.file);
1385 fsnotify_modify(out.file);
1386 out.file->f_pos = out_pos;
1390 in.file->f_pos = pos;
1396 retval = -EOVERFLOW;
1406 SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
1413 if (unlikely(get_user(off, offset)))
1416 ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
1417 if (unlikely(put_user(pos, offset)))
1422 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1425 SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
1431 if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
1433 ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
1434 if (unlikely(put_user(pos, offset)))
1439 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1442 #ifdef CONFIG_COMPAT
1443 COMPAT_SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd,
1444 compat_off_t __user *, offset, compat_size_t, count)
1451 if (unlikely(get_user(off, offset)))
1454 ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
1455 if (unlikely(put_user(pos, offset)))
1460 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1463 COMPAT_SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd,
1464 compat_loff_t __user *, offset, compat_size_t, count)
1470 if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
1472 ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
1473 if (unlikely(put_user(pos, offset)))
1478 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1483 * copy_file_range() differs from regular file read and write in that it
1484 * specifically allows return partial success. When it does so is up to
1485 * the copy_file_range method.
1487 ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
1488 struct file *file_out, loff_t pos_out,
1489 size_t len, unsigned int flags)
1491 struct inode *inode_in = file_inode(file_in);
1492 struct inode *inode_out = file_inode(file_out);
1498 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
1500 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
1503 ret = rw_verify_area(READ, file_in, &pos_in, len);
1507 ret = rw_verify_area(WRITE, file_out, &pos_out, len);
1511 if (!(file_in->f_mode & FMODE_READ) ||
1512 !(file_out->f_mode & FMODE_WRITE) ||
1513 (file_out->f_flags & O_APPEND))
1516 /* this could be relaxed once a method supports cross-fs copies */
1517 if (inode_in->i_sb != inode_out->i_sb)
1523 file_start_write(file_out);
1526 * Try cloning first, this is supported by more file systems, and
1527 * more efficient if both clone and copy are supported (e.g. NFS).
1529 if (file_in->f_op->clone_file_range) {
1530 ret = file_in->f_op->clone_file_range(file_in, pos_in,
1531 file_out, pos_out, len);
1538 if (file_out->f_op->copy_file_range) {
1539 ret = file_out->f_op->copy_file_range(file_in, pos_in, file_out,
1540 pos_out, len, flags);
1541 if (ret != -EOPNOTSUPP)
1545 ret = do_splice_direct(file_in, &pos_in, file_out, &pos_out,
1546 len > MAX_RW_COUNT ? MAX_RW_COUNT : len, 0);
1550 fsnotify_access(file_in);
1551 add_rchar(current, ret);
1552 fsnotify_modify(file_out);
1553 add_wchar(current, ret);
1559 file_end_write(file_out);
1563 EXPORT_SYMBOL(vfs_copy_file_range);
1565 SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in,
1566 int, fd_out, loff_t __user *, off_out,
1567 size_t, len, unsigned int, flags)
1573 ssize_t ret = -EBADF;
1575 f_in = fdget(fd_in);
1579 f_out = fdget(fd_out);
1585 if (copy_from_user(&pos_in, off_in, sizeof(loff_t)))
1588 pos_in = f_in.file->f_pos;
1592 if (copy_from_user(&pos_out, off_out, sizeof(loff_t)))
1595 pos_out = f_out.file->f_pos;
1598 ret = vfs_copy_file_range(f_in.file, pos_in, f_out.file, pos_out, len,
1605 if (copy_to_user(off_in, &pos_in, sizeof(loff_t)))
1608 f_in.file->f_pos = pos_in;
1612 if (copy_to_user(off_out, &pos_out, sizeof(loff_t)))
1615 f_out.file->f_pos = pos_out;
1627 static int clone_verify_area(struct file *file, loff_t pos, u64 len, bool write)
1629 struct inode *inode = file_inode(file);
1631 if (unlikely(pos < 0))
1634 if (unlikely((loff_t) (pos + len) < 0))
1637 if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
1638 loff_t end = len ? pos + len - 1 : OFFSET_MAX;
1641 retval = locks_mandatory_area(inode, file, pos, end,
1642 write ? F_WRLCK : F_RDLCK);
1647 return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
1651 * Check that the two inodes are eligible for cloning, the ranges make
1652 * sense, and then flush all dirty data. Caller must ensure that the
1653 * inodes have been locked against any other modifications.
1655 * Returns: 0 for "nothing to clone", 1 for "something to clone", or
1656 * the usual negative error code.
1658 int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
1659 struct inode *inode_out, loff_t pos_out,
1660 u64 *len, bool is_dedupe)
1662 loff_t bs = inode_out->i_sb->s_blocksize;
1665 bool same_inode = (inode_in == inode_out);
1668 /* Don't touch certain kinds of inodes */
1669 if (IS_IMMUTABLE(inode_out))
1672 if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
1675 /* Don't reflink dirs, pipes, sockets... */
1676 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
1678 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
1681 /* Are we going all the way to the end? */
1682 isize = i_size_read(inode_in);
1686 /* Zero length dedupe exits immediately; reflink goes to EOF. */
1688 if (is_dedupe || pos_in == isize)
1692 *len = isize - pos_in;
1695 /* Ensure offsets don't wrap and the input is inside i_size */
1696 if (pos_in + *len < pos_in || pos_out + *len < pos_out ||
1697 pos_in + *len > isize)
1700 /* Don't allow dedupe past EOF in the dest file */
1704 disize = i_size_read(inode_out);
1705 if (pos_out >= disize || pos_out + *len > disize)
1709 /* If we're linking to EOF, continue to the block boundary. */
1710 if (pos_in + *len == isize)
1711 blen = ALIGN(isize, bs) - pos_in;
1715 /* Only reflink if we're aligned to block boundaries */
1716 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
1717 !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
1720 /* Don't allow overlapped reflink within the same file */
1722 if (pos_out + blen > pos_in && pos_out < pos_in + blen)
1726 /* Wait for the completion of any pending IOs on both files */
1727 inode_dio_wait(inode_in);
1729 inode_dio_wait(inode_out);
1731 ret = filemap_write_and_wait_range(inode_in->i_mapping,
1732 pos_in, pos_in + *len - 1);
1736 ret = filemap_write_and_wait_range(inode_out->i_mapping,
1737 pos_out, pos_out + *len - 1);
1742 * Check that the extents are the same.
1745 bool is_same = false;
1747 ret = vfs_dedupe_file_range_compare(inode_in, pos_in,
1748 inode_out, pos_out, *len, &is_same);
1757 EXPORT_SYMBOL(vfs_clone_file_prep_inodes);
1759 int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
1760 struct file *file_out, loff_t pos_out, u64 len)
1762 struct inode *inode_in = file_inode(file_in);
1763 struct inode *inode_out = file_inode(file_out);
1766 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
1768 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
1772 * FICLONE/FICLONERANGE ioctls enforce that src and dest files are on
1773 * the same mount. Practically, they only need to be on the same file
1776 if (inode_in->i_sb != inode_out->i_sb)
1779 if (!(file_in->f_mode & FMODE_READ) ||
1780 !(file_out->f_mode & FMODE_WRITE) ||
1781 (file_out->f_flags & O_APPEND))
1784 if (!file_in->f_op->clone_file_range)
1787 ret = clone_verify_area(file_in, pos_in, len, false);
1791 ret = clone_verify_area(file_out, pos_out, len, true);
1795 if (pos_in + len > i_size_read(inode_in))
1798 ret = file_in->f_op->clone_file_range(file_in, pos_in,
1799 file_out, pos_out, len);
1801 fsnotify_access(file_in);
1802 fsnotify_modify(file_out);
1807 EXPORT_SYMBOL(vfs_clone_file_range);
1810 * Read a page's worth of file data into the page cache. Return the page
1813 static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1815 struct address_space *mapping;
1819 n = offset >> PAGE_SHIFT;
1820 mapping = inode->i_mapping;
1821 page = read_mapping_page(mapping, n, NULL);
1824 if (!PageUptodate(page)) {
1826 return ERR_PTR(-EIO);
1833 * Compare extents of two files to see if they are the same.
1834 * Caller must have locked both inodes to prevent write races.
1836 int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1837 struct inode *dest, loff_t destoff,
1838 loff_t len, bool *is_same)
1844 struct page *src_page;
1845 struct page *dest_page;
1853 src_poff = srcoff & (PAGE_SIZE - 1);
1854 dest_poff = destoff & (PAGE_SIZE - 1);
1855 cmp_len = min(PAGE_SIZE - src_poff,
1856 PAGE_SIZE - dest_poff);
1857 cmp_len = min(cmp_len, len);
1861 src_page = vfs_dedupe_get_page(src, srcoff);
1862 if (IS_ERR(src_page)) {
1863 error = PTR_ERR(src_page);
1866 dest_page = vfs_dedupe_get_page(dest, destoff);
1867 if (IS_ERR(dest_page)) {
1868 error = PTR_ERR(dest_page);
1869 unlock_page(src_page);
1873 src_addr = kmap_atomic(src_page);
1874 dest_addr = kmap_atomic(dest_page);
1876 flush_dcache_page(src_page);
1877 flush_dcache_page(dest_page);
1879 if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len))
1882 kunmap_atomic(dest_addr);
1883 kunmap_atomic(src_addr);
1884 unlock_page(dest_page);
1885 unlock_page(src_page);
1886 put_page(dest_page);
1903 EXPORT_SYMBOL(vfs_dedupe_file_range_compare);
1905 int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
1907 struct file_dedupe_range_info *info;
1908 struct inode *src = file_inode(file);
1913 bool is_admin = capable(CAP_SYS_ADMIN);
1914 u16 count = same->dest_count;
1915 struct file *dst_file;
1919 if (!(file->f_mode & FMODE_READ))
1922 if (same->reserved1 || same->reserved2)
1925 off = same->src_offset;
1926 len = same->src_length;
1929 if (S_ISDIR(src->i_mode))
1933 if (!S_ISREG(src->i_mode))
1936 ret = clone_verify_area(file, off, len, false);
1941 if (off + len > i_size_read(src))
1944 /* pre-format output fields to sane values */
1945 for (i = 0; i < count; i++) {
1946 same->info[i].bytes_deduped = 0ULL;
1947 same->info[i].status = FILE_DEDUPE_RANGE_SAME;
1950 for (i = 0, info = same->info; i < count; i++, info++) {
1952 struct fd dst_fd = fdget(info->dest_fd);
1954 dst_file = dst_fd.file;
1956 info->status = -EBADF;
1959 dst = file_inode(dst_file);
1961 ret = mnt_want_write_file(dst_file);
1967 dst_off = info->dest_offset;
1968 ret = clone_verify_area(dst_file, dst_off, len, true);
1975 if (info->reserved) {
1976 info->status = -EINVAL;
1977 } else if (!(is_admin || (dst_file->f_mode & FMODE_WRITE))) {
1978 info->status = -EINVAL;
1979 } else if (file->f_path.mnt != dst_file->f_path.mnt) {
1980 info->status = -EXDEV;
1981 } else if (S_ISDIR(dst->i_mode)) {
1982 info->status = -EISDIR;
1983 } else if (dst_file->f_op->dedupe_file_range == NULL) {
1984 info->status = -EINVAL;
1986 deduped = dst_file->f_op->dedupe_file_range(file, off,
1989 if (deduped == -EBADE)
1990 info->status = FILE_DEDUPE_RANGE_DIFFERS;
1991 else if (deduped < 0)
1992 info->status = deduped;
1994 info->bytes_deduped += deduped;
1998 mnt_drop_write_file(dst_file);
2002 if (fatal_signal_pending(current))
2009 EXPORT_SYMBOL(vfs_dedupe_file_range);