2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_da_format.h"
26 #include "xfs_da_btree.h"
27 #include "xfs_inode.h"
28 #include "xfs_trans.h"
29 #include "xfs_inode_item.h"
31 #include "xfs_bmap_util.h"
32 #include "xfs_error.h"
34 #include "xfs_dir2_priv.h"
35 #include "xfs_ioctl.h"
36 #include "xfs_trace.h"
38 #include "xfs_icache.h"
40 #include "xfs_iomap.h"
41 #include "xfs_reflink.h"
43 #include <linux/dcache.h>
44 #include <linux/falloc.h>
45 #include <linux/pagevec.h>
46 #include <linux/backing-dev.h>
47 #include <linux/mman.h>
49 static const struct vm_operations_struct xfs_file_vm_ops;
52 xfs_update_prealloc_flags(
54 enum xfs_prealloc_flags flags)
59 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
64 xfs_ilock(ip, XFS_ILOCK_EXCL);
65 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
67 if (!(flags & XFS_PREALLOC_INVISIBLE)) {
68 VFS_I(ip)->i_mode &= ~S_ISUID;
69 if (VFS_I(ip)->i_mode & S_IXGRP)
70 VFS_I(ip)->i_mode &= ~S_ISGID;
71 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
74 if (flags & XFS_PREALLOC_SET)
75 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
76 if (flags & XFS_PREALLOC_CLEAR)
77 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
79 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
80 if (flags & XFS_PREALLOC_SYNC)
81 xfs_trans_set_sync(tp);
82 return xfs_trans_commit(tp);
86 * Fsync operations on directories are much simpler than on regular files,
87 * as there is no file data to flush, and thus also no need for explicit
88 * cache flush operations, and there are no non-transaction metadata updates
89 * on directories either.
98 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
99 struct xfs_mount *mp = ip->i_mount;
102 trace_xfs_dir_fsync(ip);
104 xfs_ilock(ip, XFS_ILOCK_SHARED);
105 if (xfs_ipincount(ip))
106 lsn = ip->i_itemp->ili_last_lsn;
107 xfs_iunlock(ip, XFS_ILOCK_SHARED);
111 return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
121 struct inode *inode = file->f_mapping->host;
122 struct xfs_inode *ip = XFS_I(inode);
123 struct xfs_mount *mp = ip->i_mount;
128 trace_xfs_file_fsync(ip);
130 error = file_write_and_wait_range(file, start, end);
134 if (XFS_FORCED_SHUTDOWN(mp))
137 xfs_iflags_clear(ip, XFS_ITRUNCATED);
140 * If we have an RT and/or log subvolume we need to make sure to flush
141 * the write cache the device used for file data first. This is to
142 * ensure newly written file data make it to disk before logging the new
143 * inode size in case of an extending write.
145 if (XFS_IS_REALTIME_INODE(ip))
146 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
147 else if (mp->m_logdev_targp != mp->m_ddev_targp)
148 xfs_blkdev_issue_flush(mp->m_ddev_targp);
151 * All metadata updates are logged, which means that we just have to
152 * flush the log up to the latest LSN that touched the inode. If we have
153 * concurrent fsync/fdatasync() calls, we need them to all block on the
154 * log force before we clear the ili_fsync_fields field. This ensures
155 * that we don't get a racing sync operation that does not wait for the
156 * metadata to hit the journal before returning. If we race with
157 * clearing the ili_fsync_fields, then all that will happen is the log
158 * force will do nothing as the lsn will already be on disk. We can't
159 * race with setting ili_fsync_fields because that is done under
160 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
161 * until after the ili_fsync_fields is cleared.
163 xfs_ilock(ip, XFS_ILOCK_SHARED);
164 if (xfs_ipincount(ip)) {
166 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
167 lsn = ip->i_itemp->ili_last_lsn;
171 error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
172 ip->i_itemp->ili_fsync_fields = 0;
174 xfs_iunlock(ip, XFS_ILOCK_SHARED);
177 * If we only have a single device, and the log force about was
178 * a no-op we might have to flush the data device cache here.
179 * This can only happen for fdatasync/O_DSYNC if we were overwriting
180 * an already allocated file and thus do not have any metadata to
183 if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
184 mp->m_logdev_targp == mp->m_ddev_targp)
185 xfs_blkdev_issue_flush(mp->m_ddev_targp);
191 xfs_file_dio_aio_read(
195 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
196 size_t count = iov_iter_count(to);
199 trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
202 return 0; /* skip atime */
204 file_accessed(iocb->ki_filp);
206 xfs_ilock(ip, XFS_IOLOCK_SHARED);
207 ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
208 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
213 static noinline ssize_t
218 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
219 size_t count = iov_iter_count(to);
222 trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
225 return 0; /* skip atime */
227 if (iocb->ki_flags & IOCB_NOWAIT) {
228 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
231 xfs_ilock(ip, XFS_IOLOCK_SHARED);
234 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
235 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
237 file_accessed(iocb->ki_filp);
242 xfs_file_buffered_aio_read(
246 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
249 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
251 if (iocb->ki_flags & IOCB_NOWAIT) {
252 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
255 xfs_ilock(ip, XFS_IOLOCK_SHARED);
257 ret = generic_file_read_iter(iocb, to);
258 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
268 struct inode *inode = file_inode(iocb->ki_filp);
269 struct xfs_mount *mp = XFS_I(inode)->i_mount;
272 XFS_STATS_INC(mp, xs_read_calls);
274 if (XFS_FORCED_SHUTDOWN(mp))
278 ret = xfs_file_dax_read(iocb, to);
279 else if (iocb->ki_flags & IOCB_DIRECT)
280 ret = xfs_file_dio_aio_read(iocb, to);
282 ret = xfs_file_buffered_aio_read(iocb, to);
285 XFS_STATS_ADD(mp, xs_read_bytes, ret);
290 * Common pre-write limit and setup checks.
292 * Called with the iolocked held either shared and exclusive according to
293 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
294 * if called for a direct write beyond i_size.
297 xfs_file_aio_write_checks(
299 struct iov_iter *from,
302 struct file *file = iocb->ki_filp;
303 struct inode *inode = file->f_mapping->host;
304 struct xfs_inode *ip = XFS_I(inode);
306 size_t count = iov_iter_count(from);
307 bool drained_dio = false;
311 error = generic_write_checks(iocb, from);
315 error = xfs_break_layouts(inode, iolock);
320 * For changing security info in file_remove_privs() we need i_rwsem
323 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
324 xfs_iunlock(ip, *iolock);
325 *iolock = XFS_IOLOCK_EXCL;
326 xfs_ilock(ip, *iolock);
330 * If the offset is beyond the size of the file, we need to zero any
331 * blocks that fall between the existing EOF and the start of this
332 * write. If zeroing is needed and we are currently holding the
333 * iolock shared, we need to update it to exclusive which implies
334 * having to redo all checks before.
336 * We need to serialise against EOF updates that occur in IO
337 * completions here. We want to make sure that nobody is changing the
338 * size while we do this check until we have placed an IO barrier (i.e.
339 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
340 * The spinlock effectively forms a memory barrier once we have the
341 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
342 * and hence be able to correctly determine if we need to run zeroing.
344 spin_lock(&ip->i_flags_lock);
345 isize = i_size_read(inode);
346 if (iocb->ki_pos > isize) {
347 spin_unlock(&ip->i_flags_lock);
349 if (*iolock == XFS_IOLOCK_SHARED) {
350 xfs_iunlock(ip, *iolock);
351 *iolock = XFS_IOLOCK_EXCL;
352 xfs_ilock(ip, *iolock);
353 iov_iter_reexpand(from, count);
356 * We now have an IO submission barrier in place, but
357 * AIO can do EOF updates during IO completion and hence
358 * we now need to wait for all of them to drain. Non-AIO
359 * DIO will have drained before we are given the
360 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
363 inode_dio_wait(inode);
368 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
369 error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
370 NULL, &xfs_iomap_ops);
374 spin_unlock(&ip->i_flags_lock);
377 * Updating the timestamps will grab the ilock again from
378 * xfs_fs_dirty_inode, so we have to call it after dropping the
379 * lock above. Eventually we should look into a way to avoid
380 * the pointless lock roundtrip.
382 if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
383 error = file_update_time(file);
389 * If we're writing the file then make sure to clear the setuid and
390 * setgid bits if the process is not being run by root. This keeps
391 * people from modifying setuid and setgid binaries.
393 if (!IS_NOSEC(inode))
394 return file_remove_privs(file);
399 xfs_dio_write_end_io(
404 struct inode *inode = file_inode(iocb->ki_filp);
405 struct xfs_inode *ip = XFS_I(inode);
406 loff_t offset = iocb->ki_pos;
409 trace_xfs_end_io_direct_write(ip, offset, size);
411 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
417 if (flags & IOMAP_DIO_COW) {
418 error = xfs_reflink_end_cow(ip, offset, size);
424 * Unwritten conversion updates the in-core isize after extent
425 * conversion but before updating the on-disk size. Updating isize any
426 * earlier allows a racing dio read to find unwritten extents before
427 * they are converted.
429 if (flags & IOMAP_DIO_UNWRITTEN)
430 return xfs_iomap_write_unwritten(ip, offset, size, true);
433 * We need to update the in-core inode size here so that we don't end up
434 * with the on-disk inode size being outside the in-core inode size. We
435 * have no other method of updating EOF for AIO, so always do it here
438 * We need to lock the test/set EOF update as we can be racing with
439 * other IO completions here to update the EOF. Failing to serialise
440 * here can result in EOF moving backwards and Bad Things Happen when
443 spin_lock(&ip->i_flags_lock);
444 if (offset + size > i_size_read(inode)) {
445 i_size_write(inode, offset + size);
446 spin_unlock(&ip->i_flags_lock);
447 error = xfs_setfilesize(ip, offset, size);
449 spin_unlock(&ip->i_flags_lock);
456 * xfs_file_dio_aio_write - handle direct IO writes
458 * Lock the inode appropriately to prepare for and issue a direct IO write.
459 * By separating it from the buffered write path we remove all the tricky to
460 * follow locking changes and looping.
462 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
463 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
464 * pages are flushed out.
466 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
467 * allowing them to be done in parallel with reads and other direct IO writes.
468 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
469 * needs to do sub-block zeroing and that requires serialisation against other
470 * direct IOs to the same block. In this case we need to serialise the
471 * submission of the unaligned IOs so that we don't get racing block zeroing in
472 * the dio layer. To avoid the problem with aio, we also need to wait for
473 * outstanding IOs to complete so that unwritten extent conversion is completed
474 * before we try to map the overlapping block. This is currently implemented by
475 * hitting it with a big hammer (i.e. inode_dio_wait()).
477 * Returns with locks held indicated by @iolock and errors indicated by
478 * negative return values.
481 xfs_file_dio_aio_write(
483 struct iov_iter *from)
485 struct file *file = iocb->ki_filp;
486 struct address_space *mapping = file->f_mapping;
487 struct inode *inode = mapping->host;
488 struct xfs_inode *ip = XFS_I(inode);
489 struct xfs_mount *mp = ip->i_mount;
491 int unaligned_io = 0;
493 size_t count = iov_iter_count(from);
494 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
495 mp->m_rtdev_targp : mp->m_ddev_targp;
497 /* DIO must be aligned to device logical sector size */
498 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
502 * Don't take the exclusive iolock here unless the I/O is unaligned to
503 * the file system block size. We don't need to consider the EOF
504 * extension case here because xfs_file_aio_write_checks() will relock
505 * the inode as necessary for EOF zeroing cases and fill out the new
506 * inode size as appropriate.
508 if ((iocb->ki_pos & mp->m_blockmask) ||
509 ((iocb->ki_pos + count) & mp->m_blockmask)) {
513 * We can't properly handle unaligned direct I/O to reflink
514 * files yet, as we can't unshare a partial block.
516 if (xfs_is_reflink_inode(ip)) {
517 trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
520 iolock = XFS_IOLOCK_EXCL;
522 iolock = XFS_IOLOCK_SHARED;
525 if (iocb->ki_flags & IOCB_NOWAIT) {
526 if (!xfs_ilock_nowait(ip, iolock))
529 xfs_ilock(ip, iolock);
532 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
535 count = iov_iter_count(from);
538 * If we are doing unaligned IO, wait for all other IO to drain,
539 * otherwise demote the lock if we had to take the exclusive lock
540 * for other reasons in xfs_file_aio_write_checks.
543 /* If we are going to wait for other DIO to finish, bail */
544 if (iocb->ki_flags & IOCB_NOWAIT) {
545 if (atomic_read(&inode->i_dio_count))
548 inode_dio_wait(inode);
550 } else if (iolock == XFS_IOLOCK_EXCL) {
551 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
552 iolock = XFS_IOLOCK_SHARED;
555 trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
556 ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
558 xfs_iunlock(ip, iolock);
561 * No fallback to buffered IO on errors for XFS, direct IO will either
562 * complete fully or fail.
564 ASSERT(ret < 0 || ret == count);
568 static noinline ssize_t
571 struct iov_iter *from)
573 struct inode *inode = iocb->ki_filp->f_mapping->host;
574 struct xfs_inode *ip = XFS_I(inode);
575 int iolock = XFS_IOLOCK_EXCL;
576 ssize_t ret, error = 0;
580 if (iocb->ki_flags & IOCB_NOWAIT) {
581 if (!xfs_ilock_nowait(ip, iolock))
584 xfs_ilock(ip, iolock);
587 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
592 count = iov_iter_count(from);
594 trace_xfs_file_dax_write(ip, count, pos);
595 ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
596 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
597 i_size_write(inode, iocb->ki_pos);
598 error = xfs_setfilesize(ip, pos, ret);
601 xfs_iunlock(ip, iolock);
602 return error ? error : ret;
606 xfs_file_buffered_aio_write(
608 struct iov_iter *from)
610 struct file *file = iocb->ki_filp;
611 struct address_space *mapping = file->f_mapping;
612 struct inode *inode = mapping->host;
613 struct xfs_inode *ip = XFS_I(inode);
618 if (iocb->ki_flags & IOCB_NOWAIT)
622 iolock = XFS_IOLOCK_EXCL;
623 xfs_ilock(ip, iolock);
625 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
629 /* We can write back this queue in page reclaim */
630 current->backing_dev_info = inode_to_bdi(inode);
632 trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
633 ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
634 if (likely(ret >= 0))
638 * If we hit a space limit, try to free up some lingering preallocated
639 * space before returning an error. In the case of ENOSPC, first try to
640 * write back all dirty inodes to free up some of the excess reserved
641 * metadata space. This reduces the chances that the eofblocks scan
642 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
643 * also behaves as a filter to prevent too many eofblocks scans from
644 * running at the same time.
646 if (ret == -EDQUOT && !enospc) {
647 xfs_iunlock(ip, iolock);
648 enospc = xfs_inode_free_quota_eofblocks(ip);
651 enospc = xfs_inode_free_quota_cowblocks(ip);
655 } else if (ret == -ENOSPC && !enospc) {
656 struct xfs_eofblocks eofb = {0};
659 xfs_flush_inodes(ip->i_mount);
661 xfs_iunlock(ip, iolock);
662 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
663 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
664 xfs_icache_free_cowblocks(ip->i_mount, &eofb);
668 current->backing_dev_info = NULL;
671 xfs_iunlock(ip, iolock);
678 struct iov_iter *from)
680 struct file *file = iocb->ki_filp;
681 struct address_space *mapping = file->f_mapping;
682 struct inode *inode = mapping->host;
683 struct xfs_inode *ip = XFS_I(inode);
685 size_t ocount = iov_iter_count(from);
687 XFS_STATS_INC(ip->i_mount, xs_write_calls);
692 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
696 ret = xfs_file_dax_write(iocb, from);
697 else if (iocb->ki_flags & IOCB_DIRECT) {
699 * Allow a directio write to fall back to a buffered
700 * write *only* in the case that we're doing a reflink
701 * CoW. In all other directio scenarios we do not
702 * allow an operation to fall back to buffered mode.
704 ret = xfs_file_dio_aio_write(iocb, from);
709 ret = xfs_file_buffered_aio_write(iocb, from);
713 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
715 /* Handle various SYNC-type writes */
716 ret = generic_write_sync(iocb, ret);
721 #define XFS_FALLOC_FL_SUPPORTED \
722 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
723 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
724 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
733 struct inode *inode = file_inode(file);
734 struct xfs_inode *ip = XFS_I(inode);
736 enum xfs_prealloc_flags flags = 0;
737 uint iolock = XFS_IOLOCK_EXCL;
739 bool do_file_insert = false;
741 if (!S_ISREG(inode->i_mode))
743 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
746 xfs_ilock(ip, iolock);
747 error = xfs_break_layouts(inode, &iolock);
751 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
752 iolock |= XFS_MMAPLOCK_EXCL;
754 if (mode & FALLOC_FL_PUNCH_HOLE) {
755 error = xfs_free_file_space(ip, offset, len);
758 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
759 unsigned int blksize_mask = i_blocksize(inode) - 1;
761 if (offset & blksize_mask || len & blksize_mask) {
767 * There is no need to overlap collapse range with EOF,
768 * in which case it is effectively a truncate operation
770 if (offset + len >= i_size_read(inode)) {
775 new_size = i_size_read(inode) - len;
777 error = xfs_collapse_file_space(ip, offset, len);
780 } else if (mode & FALLOC_FL_INSERT_RANGE) {
781 unsigned int blksize_mask = i_blocksize(inode) - 1;
782 loff_t isize = i_size_read(inode);
784 if (offset & blksize_mask || len & blksize_mask) {
790 * New inode size must not exceed ->s_maxbytes, accounting for
791 * possible signed overflow.
793 if (inode->i_sb->s_maxbytes - isize < len) {
797 new_size = isize + len;
799 /* Offset should be less than i_size */
800 if (offset >= isize) {
804 do_file_insert = true;
806 flags |= XFS_PREALLOC_SET;
808 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
809 offset + len > i_size_read(inode)) {
810 new_size = offset + len;
811 error = inode_newsize_ok(inode, new_size);
816 if (mode & FALLOC_FL_ZERO_RANGE)
817 error = xfs_zero_file_space(ip, offset, len);
819 if (mode & FALLOC_FL_UNSHARE_RANGE) {
820 error = xfs_reflink_unshare(ip, offset, len);
824 error = xfs_alloc_file_space(ip, offset, len,
831 if (file->f_flags & O_DSYNC)
832 flags |= XFS_PREALLOC_SYNC;
834 error = xfs_update_prealloc_flags(ip, flags);
838 /* Change file size if needed */
842 iattr.ia_valid = ATTR_SIZE;
843 iattr.ia_size = new_size;
844 error = xfs_vn_setattr_size(file_dentry(file), &iattr);
850 * Perform hole insertion now that the file size has been
851 * updated so that if we crash during the operation we don't
852 * leave shifted extents past EOF and hence losing access to
853 * the data that is contained within them.
856 error = xfs_insert_file_space(ip, offset, len);
859 xfs_iunlock(ip, iolock);
864 xfs_file_clone_range(
865 struct file *file_in,
867 struct file *file_out,
871 return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
876 xfs_file_dedupe_range(
877 struct file *src_file,
880 struct file *dst_file,
885 error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
897 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
899 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
901 file->f_mode |= FMODE_NOWAIT;
910 struct xfs_inode *ip = XFS_I(inode);
914 error = xfs_file_open(inode, file);
919 * If there are any blocks, read-ahead block 0 as we're almost
920 * certain to have the next operation be a read there.
922 mode = xfs_ilock_data_map_shared(ip);
923 if (ip->i_d.di_nextents > 0)
924 error = xfs_dir3_data_readahead(ip, 0, -1);
925 xfs_iunlock(ip, mode);
934 return xfs_release(XFS_I(inode));
940 struct dir_context *ctx)
942 struct inode *inode = file_inode(file);
943 xfs_inode_t *ip = XFS_I(inode);
947 * The Linux API doesn't pass down the total size of the buffer
948 * we read into down to the filesystem. With the filldir concept
949 * it's not needed for correct information, but the XFS dir2 leaf
950 * code wants an estimate of the buffer size to calculate it's
951 * readahead window and size the buffers used for mapping to
954 * Try to give it an estimate that's good enough, maybe at some
955 * point we can change the ->readdir prototype to include the
956 * buffer size. For now we use the current glibc buffer size.
958 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
960 return xfs_readdir(NULL, ip, ctx, bufsize);
969 struct inode *inode = file->f_mapping->host;
971 if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
976 return generic_file_llseek(file, offset, whence);
978 offset = iomap_seek_hole(inode, offset, &xfs_iomap_ops);
981 offset = iomap_seek_data(inode, offset, &xfs_iomap_ops);
987 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
991 * Locking for serialisation of IO during page faults. This results in a lock
995 * sb_start_pagefault(vfs, freeze)
996 * i_mmaplock (XFS - truncate serialisation)
998 * i_lock (XFS - extent map serialisation)
1001 __xfs_filemap_fault(
1002 struct vm_fault *vmf,
1003 enum page_entry_size pe_size,
1006 struct inode *inode = file_inode(vmf->vma->vm_file);
1007 struct xfs_inode *ip = XFS_I(inode);
1010 trace_xfs_filemap_fault(ip, pe_size, write_fault);
1013 sb_start_pagefault(inode->i_sb);
1014 file_update_time(vmf->vma->vm_file);
1017 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1018 if (IS_DAX(inode)) {
1021 ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
1022 if (ret & VM_FAULT_NEEDDSYNC)
1023 ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1026 ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
1028 ret = filemap_fault(vmf);
1030 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1033 sb_end_pagefault(inode->i_sb);
1039 struct vm_fault *vmf)
1041 /* DAX can shortcut the normal fault path on write faults! */
1042 return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1043 IS_DAX(file_inode(vmf->vma->vm_file)) &&
1044 (vmf->flags & FAULT_FLAG_WRITE));
1048 xfs_filemap_huge_fault(
1049 struct vm_fault *vmf,
1050 enum page_entry_size pe_size)
1052 if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1053 return VM_FAULT_FALLBACK;
1055 /* DAX can shortcut the normal fault path on write faults! */
1056 return __xfs_filemap_fault(vmf, pe_size,
1057 (vmf->flags & FAULT_FLAG_WRITE));
1061 xfs_filemap_page_mkwrite(
1062 struct vm_fault *vmf)
1064 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1068 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1069 * on write faults. In reality, it needs to serialise against truncate and
1070 * prepare memory for writing so handle is as standard write fault.
1073 xfs_filemap_pfn_mkwrite(
1074 struct vm_fault *vmf)
1077 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1080 static const struct vm_operations_struct xfs_file_vm_ops = {
1081 .fault = xfs_filemap_fault,
1082 .huge_fault = xfs_filemap_huge_fault,
1083 .map_pages = filemap_map_pages,
1084 .page_mkwrite = xfs_filemap_page_mkwrite,
1085 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
1091 struct vm_area_struct *vma)
1094 * We don't support synchronous mappings for non-DAX files. At least
1095 * until someone comes with a sensible use case.
1097 if (!IS_DAX(file_inode(filp)) && (vma->vm_flags & VM_SYNC))
1100 file_accessed(filp);
1101 vma->vm_ops = &xfs_file_vm_ops;
1102 if (IS_DAX(file_inode(filp)))
1103 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1107 const struct file_operations xfs_file_operations = {
1108 .llseek = xfs_file_llseek,
1109 .read_iter = xfs_file_read_iter,
1110 .write_iter = xfs_file_write_iter,
1111 .splice_read = generic_file_splice_read,
1112 .splice_write = iter_file_splice_write,
1113 .unlocked_ioctl = xfs_file_ioctl,
1114 #ifdef CONFIG_COMPAT
1115 .compat_ioctl = xfs_file_compat_ioctl,
1117 .mmap = xfs_file_mmap,
1118 .mmap_supported_flags = MAP_SYNC,
1119 .open = xfs_file_open,
1120 .release = xfs_file_release,
1121 .fsync = xfs_file_fsync,
1122 .get_unmapped_area = thp_get_unmapped_area,
1123 .fallocate = xfs_file_fallocate,
1124 .clone_file_range = xfs_file_clone_range,
1125 .dedupe_file_range = xfs_file_dedupe_range,
1128 const struct file_operations xfs_dir_file_operations = {
1129 .open = xfs_dir_open,
1130 .read = generic_read_dir,
1131 .iterate_shared = xfs_file_readdir,
1132 .llseek = generic_file_llseek,
1133 .unlocked_ioctl = xfs_file_ioctl,
1134 #ifdef CONFIG_COMPAT
1135 .compat_ioctl = xfs_file_compat_ioctl,
1137 .fsync = xfs_dir_fsync,