libceph: move r_reply_op_{len,result} into struct ceph_osd_req_op
[linux-2.6-block.git] / fs / xfs / xfs_file.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
dda35b8f 19#include "xfs_fs.h"
70a9883c 20#include "xfs_shared.h"
a4fbe6ab 21#include "xfs_format.h"
239880ef
DC
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
1da177e4 24#include "xfs_mount.h"
57062787
DC
25#include "xfs_da_format.h"
26#include "xfs_da_btree.h"
1da177e4 27#include "xfs_inode.h"
239880ef 28#include "xfs_trans.h"
fd3200be 29#include "xfs_inode_item.h"
dda35b8f 30#include "xfs_bmap.h"
c24b5dfa 31#include "xfs_bmap_util.h"
1da177e4 32#include "xfs_error.h"
2b9ab5ab 33#include "xfs_dir2.h"
c24b5dfa 34#include "xfs_dir2_priv.h"
ddcd856d 35#include "xfs_ioctl.h"
dda35b8f 36#include "xfs_trace.h"
239880ef 37#include "xfs_log.h"
dc06f398 38#include "xfs_icache.h"
781355c6 39#include "xfs_pnfs.h"
1da177e4
LT
40
41#include <linux/dcache.h>
2fe17c10 42#include <linux/falloc.h>
d126d43f 43#include <linux/pagevec.h>
66114cad 44#include <linux/backing-dev.h>
1da177e4 45
f0f37e2f 46static const struct vm_operations_struct xfs_file_vm_ops;
1da177e4 47
487f84f3
DC
48/*
49 * Locking primitives for read and write IO paths to ensure we consistently use
50 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
51 */
52static inline void
53xfs_rw_ilock(
54 struct xfs_inode *ip,
55 int type)
56{
57 if (type & XFS_IOLOCK_EXCL)
5955102c 58 inode_lock(VFS_I(ip));
487f84f3
DC
59 xfs_ilock(ip, type);
60}
61
62static inline void
63xfs_rw_iunlock(
64 struct xfs_inode *ip,
65 int type)
66{
67 xfs_iunlock(ip, type);
68 if (type & XFS_IOLOCK_EXCL)
5955102c 69 inode_unlock(VFS_I(ip));
487f84f3
DC
70}
71
72static inline void
73xfs_rw_ilock_demote(
74 struct xfs_inode *ip,
75 int type)
76{
77 xfs_ilock_demote(ip, type);
78 if (type & XFS_IOLOCK_EXCL)
5955102c 79 inode_unlock(VFS_I(ip));
487f84f3
DC
80}
81
dda35b8f 82/*
4f69f578
DC
83 * xfs_iozero clears the specified range supplied via the page cache (except in
84 * the DAX case). Writes through the page cache will allocate blocks over holes,
85 * though the callers usually map the holes first and avoid them. If a block is
86 * not completely zeroed, then it will be read from disk before being partially
87 * zeroed.
dda35b8f 88 *
4f69f578
DC
89 * In the DAX case, we can just directly write to the underlying pages. This
90 * will not allocate blocks, but will avoid holes and unwritten extents and so
91 * not do unnecessary work.
dda35b8f 92 */
ef9d8733 93int
dda35b8f
CH
94xfs_iozero(
95 struct xfs_inode *ip, /* inode */
96 loff_t pos, /* offset in file */
97 size_t count) /* size of data to zero */
98{
99 struct page *page;
100 struct address_space *mapping;
4f69f578
DC
101 int status = 0;
102
dda35b8f
CH
103
104 mapping = VFS_I(ip)->i_mapping;
105 do {
106 unsigned offset, bytes;
107 void *fsdata;
108
109 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
110 bytes = PAGE_CACHE_SIZE - offset;
111 if (bytes > count)
112 bytes = count;
113
4f69f578
DC
114 if (IS_DAX(VFS_I(ip))) {
115 status = dax_zero_page_range(VFS_I(ip), pos, bytes,
116 xfs_get_blocks_direct);
117 if (status)
118 break;
119 } else {
120 status = pagecache_write_begin(NULL, mapping, pos, bytes,
121 AOP_FLAG_UNINTERRUPTIBLE,
122 &page, &fsdata);
123 if (status)
124 break;
dda35b8f 125
4f69f578 126 zero_user(page, offset, bytes);
dda35b8f 127
4f69f578
DC
128 status = pagecache_write_end(NULL, mapping, pos, bytes,
129 bytes, page, fsdata);
130 WARN_ON(status <= 0); /* can't return less than zero! */
131 status = 0;
132 }
dda35b8f
CH
133 pos += bytes;
134 count -= bytes;
dda35b8f
CH
135 } while (count);
136
cddc1162 137 return status;
dda35b8f
CH
138}
139
8add71ca
CH
140int
141xfs_update_prealloc_flags(
142 struct xfs_inode *ip,
143 enum xfs_prealloc_flags flags)
144{
145 struct xfs_trans *tp;
146 int error;
147
148 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
149 error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
150 if (error) {
4906e215 151 xfs_trans_cancel(tp);
8add71ca
CH
152 return error;
153 }
154
155 xfs_ilock(ip, XFS_ILOCK_EXCL);
156 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
157
158 if (!(flags & XFS_PREALLOC_INVISIBLE)) {
159 ip->i_d.di_mode &= ~S_ISUID;
160 if (ip->i_d.di_mode & S_IXGRP)
161 ip->i_d.di_mode &= ~S_ISGID;
162 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
163 }
164
165 if (flags & XFS_PREALLOC_SET)
166 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
167 if (flags & XFS_PREALLOC_CLEAR)
168 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
169
170 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
171 if (flags & XFS_PREALLOC_SYNC)
172 xfs_trans_set_sync(tp);
70393313 173 return xfs_trans_commit(tp);
8add71ca
CH
174}
175
1da2f2db
CH
176/*
177 * Fsync operations on directories are much simpler than on regular files,
178 * as there is no file data to flush, and thus also no need for explicit
179 * cache flush operations, and there are no non-transaction metadata updates
180 * on directories either.
181 */
182STATIC int
183xfs_dir_fsync(
184 struct file *file,
185 loff_t start,
186 loff_t end,
187 int datasync)
188{
189 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
190 struct xfs_mount *mp = ip->i_mount;
191 xfs_lsn_t lsn = 0;
192
193 trace_xfs_dir_fsync(ip);
194
195 xfs_ilock(ip, XFS_ILOCK_SHARED);
196 if (xfs_ipincount(ip))
197 lsn = ip->i_itemp->ili_last_lsn;
198 xfs_iunlock(ip, XFS_ILOCK_SHARED);
199
200 if (!lsn)
201 return 0;
2451337d 202 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
1da2f2db
CH
203}
204
fd3200be
CH
205STATIC int
206xfs_file_fsync(
207 struct file *file,
02c24a82
JB
208 loff_t start,
209 loff_t end,
fd3200be
CH
210 int datasync)
211{
7ea80859
CH
212 struct inode *inode = file->f_mapping->host;
213 struct xfs_inode *ip = XFS_I(inode);
a27a263b 214 struct xfs_mount *mp = ip->i_mount;
fd3200be
CH
215 int error = 0;
216 int log_flushed = 0;
b1037058 217 xfs_lsn_t lsn = 0;
fd3200be 218
cca28fb8 219 trace_xfs_file_fsync(ip);
fd3200be 220
02c24a82
JB
221 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
222 if (error)
223 return error;
224
a27a263b 225 if (XFS_FORCED_SHUTDOWN(mp))
b474c7ae 226 return -EIO;
fd3200be
CH
227
228 xfs_iflags_clear(ip, XFS_ITRUNCATED);
229
a27a263b
CH
230 if (mp->m_flags & XFS_MOUNT_BARRIER) {
231 /*
232 * If we have an RT and/or log subvolume we need to make sure
233 * to flush the write cache the device used for file data
234 * first. This is to ensure newly written file data make
235 * it to disk before logging the new inode size in case of
236 * an extending write.
237 */
238 if (XFS_IS_REALTIME_INODE(ip))
239 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
240 else if (mp->m_logdev_targp != mp->m_ddev_targp)
241 xfs_blkdev_issue_flush(mp->m_ddev_targp);
242 }
243
fd3200be 244 /*
fc0561ce
DC
245 * All metadata updates are logged, which means that we just have to
246 * flush the log up to the latest LSN that touched the inode. If we have
247 * concurrent fsync/fdatasync() calls, we need them to all block on the
248 * log force before we clear the ili_fsync_fields field. This ensures
249 * that we don't get a racing sync operation that does not wait for the
250 * metadata to hit the journal before returning. If we race with
251 * clearing the ili_fsync_fields, then all that will happen is the log
252 * force will do nothing as the lsn will already be on disk. We can't
253 * race with setting ili_fsync_fields because that is done under
254 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
255 * until after the ili_fsync_fields is cleared.
fd3200be
CH
256 */
257 xfs_ilock(ip, XFS_ILOCK_SHARED);
8f639dde
CH
258 if (xfs_ipincount(ip)) {
259 if (!datasync ||
fc0561ce 260 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
8f639dde
CH
261 lsn = ip->i_itemp->ili_last_lsn;
262 }
fd3200be 263
fc0561ce 264 if (lsn) {
b1037058 265 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
fc0561ce
DC
266 ip->i_itemp->ili_fsync_fields = 0;
267 }
268 xfs_iunlock(ip, XFS_ILOCK_SHARED);
b1037058 269
a27a263b
CH
270 /*
271 * If we only have a single device, and the log force about was
272 * a no-op we might have to flush the data device cache here.
273 * This can only happen for fdatasync/O_DSYNC if we were overwriting
274 * an already allocated file and thus do not have any metadata to
275 * commit.
276 */
277 if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
278 mp->m_logdev_targp == mp->m_ddev_targp &&
279 !XFS_IS_REALTIME_INODE(ip) &&
280 !log_flushed)
281 xfs_blkdev_issue_flush(mp->m_ddev_targp);
fd3200be 282
2451337d 283 return error;
fd3200be
CH
284}
285
00258e36 286STATIC ssize_t
b4f5d2c6 287xfs_file_read_iter(
dda35b8f 288 struct kiocb *iocb,
b4f5d2c6 289 struct iov_iter *to)
dda35b8f
CH
290{
291 struct file *file = iocb->ki_filp;
292 struct inode *inode = file->f_mapping->host;
00258e36
CH
293 struct xfs_inode *ip = XFS_I(inode);
294 struct xfs_mount *mp = ip->i_mount;
b4f5d2c6 295 size_t size = iov_iter_count(to);
dda35b8f 296 ssize_t ret = 0;
00258e36 297 int ioflags = 0;
dda35b8f 298 xfs_fsize_t n;
b4f5d2c6 299 loff_t pos = iocb->ki_pos;
dda35b8f 300
ff6d6af2 301 XFS_STATS_INC(mp, xs_read_calls);
dda35b8f 302
2ba48ce5 303 if (unlikely(iocb->ki_flags & IOCB_DIRECT))
b92cc59f 304 ioflags |= XFS_IO_ISDIRECT;
00258e36 305 if (file->f_mode & FMODE_NOCMTIME)
b92cc59f 306 ioflags |= XFS_IO_INVIS;
00258e36 307
6b698ede 308 if ((ioflags & XFS_IO_ISDIRECT) && !IS_DAX(inode)) {
dda35b8f
CH
309 xfs_buftarg_t *target =
310 XFS_IS_REALTIME_INODE(ip) ?
311 mp->m_rtdev_targp : mp->m_ddev_targp;
7c71ee78
ES
312 /* DIO must be aligned to device logical sector size */
313 if ((pos | size) & target->bt_logical_sectormask) {
fb595814 314 if (pos == i_size_read(inode))
00258e36 315 return 0;
b474c7ae 316 return -EINVAL;
dda35b8f
CH
317 }
318 }
319
fb595814 320 n = mp->m_super->s_maxbytes - pos;
00258e36 321 if (n <= 0 || size == 0)
dda35b8f
CH
322 return 0;
323
324 if (n < size)
325 size = n;
326
327 if (XFS_FORCED_SHUTDOWN(mp))
328 return -EIO;
329
0c38a251 330 /*
3d751af2
BF
331 * Locking is a bit tricky here. If we take an exclusive lock for direct
332 * IO, we effectively serialise all new concurrent read IO to this file
333 * and block it behind IO that is currently in progress because IO in
334 * progress holds the IO lock shared. We only need to hold the lock
335 * exclusive to blow away the page cache, so only take lock exclusively
336 * if the page cache needs invalidation. This allows the normal direct
337 * IO case of no page cache pages to proceeed concurrently without
338 * serialisation.
0c38a251
DC
339 */
340 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
b92cc59f 341 if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) {
0c38a251 342 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
487f84f3
DC
343 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
344
3d751af2
BF
345 /*
346 * The generic dio code only flushes the range of the particular
347 * I/O. Because we take an exclusive lock here, this whole
348 * sequence is considerably more expensive for us. This has a
349 * noticeable performance impact for any file with cached pages,
350 * even when outside of the range of the particular I/O.
351 *
352 * Hence, amortize the cost of the lock against a full file
353 * flush and reduce the chances of repeated iolock cycles going
354 * forward.
355 */
00258e36 356 if (inode->i_mapping->nrpages) {
3d751af2 357 ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
487f84f3
DC
358 if (ret) {
359 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
360 return ret;
361 }
85e584da
CM
362
363 /*
364 * Invalidate whole pages. This can return an error if
365 * we fail to invalidate a page, but this should never
366 * happen on XFS. Warn if it does fail.
367 */
3d751af2 368 ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
85e584da
CM
369 WARN_ON_ONCE(ret);
370 ret = 0;
00258e36 371 }
487f84f3 372 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
0c38a251 373 }
dda35b8f 374
fb595814 375 trace_xfs_file_read(ip, size, pos, ioflags);
dda35b8f 376
b4f5d2c6 377 ret = generic_file_read_iter(iocb, to);
dda35b8f 378 if (ret > 0)
ff6d6af2 379 XFS_STATS_ADD(mp, xs_read_bytes, ret);
dda35b8f 380
487f84f3 381 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
dda35b8f
CH
382 return ret;
383}
384
00258e36
CH
385STATIC ssize_t
386xfs_file_splice_read(
dda35b8f
CH
387 struct file *infilp,
388 loff_t *ppos,
389 struct pipe_inode_info *pipe,
390 size_t count,
00258e36 391 unsigned int flags)
dda35b8f 392{
00258e36 393 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
00258e36 394 int ioflags = 0;
dda35b8f
CH
395 ssize_t ret;
396
ff6d6af2 397 XFS_STATS_INC(ip->i_mount, xs_read_calls);
00258e36
CH
398
399 if (infilp->f_mode & FMODE_NOCMTIME)
b92cc59f 400 ioflags |= XFS_IO_INVIS;
00258e36 401
dda35b8f
CH
402 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
403 return -EIO;
404
dda35b8f
CH
405 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
406
a6d7636e
DC
407 /*
408 * DAX inodes cannot ues the page cache for splice, so we have to push
409 * them through the VFS IO path. This means it goes through
410 * ->read_iter, which for us takes the XFS_IOLOCK_SHARED. Hence we
411 * cannot lock the splice operation at this level for DAX inodes.
412 */
413 if (IS_DAX(VFS_I(ip))) {
414 ret = default_file_splice_read(infilp, ppos, pipe, count,
415 flags);
416 goto out;
417 }
dda35b8f 418
a6d7636e
DC
419 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
420 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
487f84f3 421 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
a6d7636e
DC
422out:
423 if (ret > 0)
424 XFS_STATS_ADD(ip->i_mount, xs_read_bytes, ret);
dda35b8f
CH
425 return ret;
426}
427
dda35b8f 428/*
193aec10
CH
429 * This routine is called to handle zeroing any space in the last block of the
430 * file that is beyond the EOF. We do this since the size is being increased
431 * without writing anything to that block and we don't want to read the
432 * garbage on the disk.
dda35b8f
CH
433 */
434STATIC int /* error (positive) */
435xfs_zero_last_block(
193aec10
CH
436 struct xfs_inode *ip,
437 xfs_fsize_t offset,
5885ebda
DC
438 xfs_fsize_t isize,
439 bool *did_zeroing)
dda35b8f 440{
193aec10
CH
441 struct xfs_mount *mp = ip->i_mount;
442 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize);
443 int zero_offset = XFS_B_FSB_OFFSET(mp, isize);
444 int zero_len;
445 int nimaps = 1;
446 int error = 0;
447 struct xfs_bmbt_irec imap;
dda35b8f 448
193aec10 449 xfs_ilock(ip, XFS_ILOCK_EXCL);
5c8ed202 450 error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0);
193aec10 451 xfs_iunlock(ip, XFS_ILOCK_EXCL);
5c8ed202 452 if (error)
dda35b8f 453 return error;
193aec10 454
dda35b8f 455 ASSERT(nimaps > 0);
193aec10 456
dda35b8f
CH
457 /*
458 * If the block underlying isize is just a hole, then there
459 * is nothing to zero.
460 */
193aec10 461 if (imap.br_startblock == HOLESTARTBLOCK)
dda35b8f 462 return 0;
dda35b8f
CH
463
464 zero_len = mp->m_sb.sb_blocksize - zero_offset;
465 if (isize + zero_len > offset)
466 zero_len = offset - isize;
5885ebda 467 *did_zeroing = true;
193aec10 468 return xfs_iozero(ip, isize, zero_len);
dda35b8f
CH
469}
470
471/*
193aec10
CH
472 * Zero any on disk space between the current EOF and the new, larger EOF.
473 *
474 * This handles the normal case of zeroing the remainder of the last block in
475 * the file and the unusual case of zeroing blocks out beyond the size of the
476 * file. This second case only happens with fixed size extents and when the
477 * system crashes before the inode size was updated but after blocks were
478 * allocated.
479 *
480 * Expects the iolock to be held exclusive, and will take the ilock internally.
dda35b8f 481 */
dda35b8f
CH
482int /* error (positive) */
483xfs_zero_eof(
193aec10
CH
484 struct xfs_inode *ip,
485 xfs_off_t offset, /* starting I/O offset */
5885ebda
DC
486 xfs_fsize_t isize, /* current inode size */
487 bool *did_zeroing)
dda35b8f 488{
193aec10
CH
489 struct xfs_mount *mp = ip->i_mount;
490 xfs_fileoff_t start_zero_fsb;
491 xfs_fileoff_t end_zero_fsb;
492 xfs_fileoff_t zero_count_fsb;
493 xfs_fileoff_t last_fsb;
494 xfs_fileoff_t zero_off;
495 xfs_fsize_t zero_len;
496 int nimaps;
497 int error = 0;
498 struct xfs_bmbt_irec imap;
499
500 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
dda35b8f
CH
501 ASSERT(offset > isize);
502
0a50f162
BF
503 trace_xfs_zero_eof(ip, isize, offset - isize);
504
dda35b8f
CH
505 /*
506 * First handle zeroing the block on which isize resides.
193aec10 507 *
dda35b8f
CH
508 * We only zero a part of that block so it is handled specially.
509 */
193aec10 510 if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
5885ebda 511 error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
193aec10
CH
512 if (error)
513 return error;
dda35b8f
CH
514 }
515
516 /*
193aec10
CH
517 * Calculate the range between the new size and the old where blocks
518 * needing to be zeroed may exist.
519 *
520 * To get the block where the last byte in the file currently resides,
521 * we need to subtract one from the size and truncate back to a block
522 * boundary. We subtract 1 in case the size is exactly on a block
523 * boundary.
dda35b8f
CH
524 */
525 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
526 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
527 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
528 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
529 if (last_fsb == end_zero_fsb) {
530 /*
531 * The size was only incremented on its last block.
532 * We took care of that above, so just return.
533 */
534 return 0;
535 }
536
537 ASSERT(start_zero_fsb <= end_zero_fsb);
538 while (start_zero_fsb <= end_zero_fsb) {
539 nimaps = 1;
540 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
193aec10
CH
541
542 xfs_ilock(ip, XFS_ILOCK_EXCL);
5c8ed202
DC
543 error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb,
544 &imap, &nimaps, 0);
193aec10
CH
545 xfs_iunlock(ip, XFS_ILOCK_EXCL);
546 if (error)
dda35b8f 547 return error;
193aec10 548
dda35b8f
CH
549 ASSERT(nimaps > 0);
550
551 if (imap.br_state == XFS_EXT_UNWRITTEN ||
552 imap.br_startblock == HOLESTARTBLOCK) {
dda35b8f
CH
553 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
554 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
555 continue;
556 }
557
558 /*
559 * There are blocks we need to zero.
dda35b8f 560 */
dda35b8f
CH
561 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
562 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
563
564 if ((zero_off + zero_len) > offset)
565 zero_len = offset - zero_off;
566
567 error = xfs_iozero(ip, zero_off, zero_len);
193aec10
CH
568 if (error)
569 return error;
dda35b8f 570
5885ebda 571 *did_zeroing = true;
dda35b8f
CH
572 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
573 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
dda35b8f
CH
574 }
575
576 return 0;
dda35b8f
CH
577}
578
4d8d1581
DC
579/*
580 * Common pre-write limit and setup checks.
581 *
5bf1f262
CH
582 * Called with the iolocked held either shared and exclusive according to
583 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
584 * if called for a direct write beyond i_size.
4d8d1581
DC
585 */
586STATIC ssize_t
587xfs_file_aio_write_checks(
99733fa3
AV
588 struct kiocb *iocb,
589 struct iov_iter *from,
4d8d1581
DC
590 int *iolock)
591{
99733fa3 592 struct file *file = iocb->ki_filp;
4d8d1581
DC
593 struct inode *inode = file->f_mapping->host;
594 struct xfs_inode *ip = XFS_I(inode);
3309dd04 595 ssize_t error = 0;
99733fa3 596 size_t count = iov_iter_count(from);
3136e8bb 597 bool drained_dio = false;
4d8d1581 598
7271d243 599restart:
3309dd04
AV
600 error = generic_write_checks(iocb, from);
601 if (error <= 0)
4d8d1581 602 return error;
4d8d1581 603
21c3ea18 604 error = xfs_break_layouts(inode, iolock, true);
781355c6
CH
605 if (error)
606 return error;
607
a6de82ca
JK
608 /* For changing security info in file_remove_privs() we need i_mutex */
609 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
610 xfs_rw_iunlock(ip, *iolock);
611 *iolock = XFS_IOLOCK_EXCL;
612 xfs_rw_ilock(ip, *iolock);
613 goto restart;
614 }
4d8d1581
DC
615 /*
616 * If the offset is beyond the size of the file, we need to zero any
617 * blocks that fall between the existing EOF and the start of this
2813d682 618 * write. If zeroing is needed and we are currently holding the
467f7899
CH
619 * iolock shared, we need to update it to exclusive which implies
620 * having to redo all checks before.
b9d59846
DC
621 *
622 * We need to serialise against EOF updates that occur in IO
623 * completions here. We want to make sure that nobody is changing the
624 * size while we do this check until we have placed an IO barrier (i.e.
625 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
626 * The spinlock effectively forms a memory barrier once we have the
627 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
628 * and hence be able to correctly determine if we need to run zeroing.
4d8d1581 629 */
b9d59846 630 spin_lock(&ip->i_flags_lock);
99733fa3 631 if (iocb->ki_pos > i_size_read(inode)) {
5885ebda
DC
632 bool zero = false;
633
b9d59846 634 spin_unlock(&ip->i_flags_lock);
3136e8bb
BF
635 if (!drained_dio) {
636 if (*iolock == XFS_IOLOCK_SHARED) {
637 xfs_rw_iunlock(ip, *iolock);
638 *iolock = XFS_IOLOCK_EXCL;
639 xfs_rw_ilock(ip, *iolock);
640 iov_iter_reexpand(from, count);
641 }
40c63fbc
DC
642 /*
643 * We now have an IO submission barrier in place, but
644 * AIO can do EOF updates during IO completion and hence
645 * we now need to wait for all of them to drain. Non-AIO
646 * DIO will have drained before we are given the
647 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
648 * no-op.
649 */
650 inode_dio_wait(inode);
3136e8bb 651 drained_dio = true;
7271d243
DC
652 goto restart;
653 }
99733fa3 654 error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
467f7899
CH
655 if (error)
656 return error;
b9d59846
DC
657 } else
658 spin_unlock(&ip->i_flags_lock);
4d8d1581 659
8a9c9980
CH
660 /*
661 * Updating the timestamps will grab the ilock again from
662 * xfs_fs_dirty_inode, so we have to call it after dropping the
663 * lock above. Eventually we should look into a way to avoid
664 * the pointless lock roundtrip.
665 */
c3b2da31
JB
666 if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
667 error = file_update_time(file);
668 if (error)
669 return error;
670 }
8a9c9980 671
4d8d1581
DC
672 /*
673 * If we're writing the file then make sure to clear the setuid and
674 * setgid bits if the process is not being run by root. This keeps
675 * people from modifying setuid and setgid binaries.
676 */
a6de82ca
JK
677 if (!IS_NOSEC(inode))
678 return file_remove_privs(file);
679 return 0;
4d8d1581
DC
680}
681
f0d26e86
DC
682/*
683 * xfs_file_dio_aio_write - handle direct IO writes
684 *
685 * Lock the inode appropriately to prepare for and issue a direct IO write.
eda77982 686 * By separating it from the buffered write path we remove all the tricky to
f0d26e86
DC
687 * follow locking changes and looping.
688 *
eda77982
DC
689 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
690 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
691 * pages are flushed out.
692 *
693 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
694 * allowing them to be done in parallel with reads and other direct IO writes.
695 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
696 * needs to do sub-block zeroing and that requires serialisation against other
697 * direct IOs to the same block. In this case we need to serialise the
698 * submission of the unaligned IOs so that we don't get racing block zeroing in
699 * the dio layer. To avoid the problem with aio, we also need to wait for
700 * outstanding IOs to complete so that unwritten extent conversion is completed
701 * before we try to map the overlapping block. This is currently implemented by
4a06fd26 702 * hitting it with a big hammer (i.e. inode_dio_wait()).
eda77982 703 *
f0d26e86
DC
704 * Returns with locks held indicated by @iolock and errors indicated by
705 * negative return values.
706 */
707STATIC ssize_t
708xfs_file_dio_aio_write(
709 struct kiocb *iocb,
b3188919 710 struct iov_iter *from)
f0d26e86
DC
711{
712 struct file *file = iocb->ki_filp;
713 struct address_space *mapping = file->f_mapping;
714 struct inode *inode = mapping->host;
715 struct xfs_inode *ip = XFS_I(inode);
716 struct xfs_mount *mp = ip->i_mount;
717 ssize_t ret = 0;
eda77982 718 int unaligned_io = 0;
d0606464 719 int iolock;
b3188919
AV
720 size_t count = iov_iter_count(from);
721 loff_t pos = iocb->ki_pos;
0cefb29e
DC
722 loff_t end;
723 struct iov_iter data;
f0d26e86
DC
724 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
725 mp->m_rtdev_targp : mp->m_ddev_targp;
726
7c71ee78 727 /* DIO must be aligned to device logical sector size */
6b698ede 728 if (!IS_DAX(inode) && ((pos | count) & target->bt_logical_sectormask))
b474c7ae 729 return -EINVAL;
f0d26e86 730
7c71ee78 731 /* "unaligned" here means not aligned to a filesystem block */
eda77982
DC
732 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
733 unaligned_io = 1;
734
7271d243
DC
735 /*
736 * We don't need to take an exclusive lock unless there page cache needs
737 * to be invalidated or unaligned IO is being executed. We don't need to
738 * consider the EOF extension case here because
739 * xfs_file_aio_write_checks() will relock the inode as necessary for
740 * EOF zeroing cases and fill out the new inode size as appropriate.
741 */
742 if (unaligned_io || mapping->nrpages)
d0606464 743 iolock = XFS_IOLOCK_EXCL;
f0d26e86 744 else
d0606464
CH
745 iolock = XFS_IOLOCK_SHARED;
746 xfs_rw_ilock(ip, iolock);
c58cb165
CH
747
748 /*
749 * Recheck if there are cached pages that need invalidate after we got
750 * the iolock to protect against other threads adding new pages while
751 * we were waiting for the iolock.
752 */
d0606464
CH
753 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
754 xfs_rw_iunlock(ip, iolock);
755 iolock = XFS_IOLOCK_EXCL;
756 xfs_rw_ilock(ip, iolock);
c58cb165 757 }
f0d26e86 758
99733fa3 759 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
4d8d1581 760 if (ret)
d0606464 761 goto out;
99733fa3
AV
762 count = iov_iter_count(from);
763 pos = iocb->ki_pos;
0cefb29e 764 end = pos + count - 1;
f0d26e86 765
3d751af2
BF
766 /*
767 * See xfs_file_read_iter() for why we do a full-file flush here.
768 */
f0d26e86 769 if (mapping->nrpages) {
3d751af2 770 ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
f0d26e86 771 if (ret)
d0606464 772 goto out;
834ffca6 773 /*
3d751af2
BF
774 * Invalidate whole pages. This can return an error if we fail
775 * to invalidate a page, but this should never happen on XFS.
776 * Warn if it does fail.
834ffca6 777 */
3d751af2 778 ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
834ffca6
DC
779 WARN_ON_ONCE(ret);
780 ret = 0;
f0d26e86
DC
781 }
782
eda77982
DC
783 /*
784 * If we are doing unaligned IO, wait for all other IO to drain,
785 * otherwise demote the lock if we had to flush cached pages
786 */
787 if (unaligned_io)
4a06fd26 788 inode_dio_wait(inode);
d0606464 789 else if (iolock == XFS_IOLOCK_EXCL) {
f0d26e86 790 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
d0606464 791 iolock = XFS_IOLOCK_SHARED;
f0d26e86
DC
792 }
793
794 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
f0d26e86 795
0cefb29e 796 data = *from;
1aef882f 797 ret = mapping->a_ops->direct_IO(iocb, &data, pos);
0cefb29e
DC
798
799 /* see generic_file_direct_write() for why this is necessary */
800 if (mapping->nrpages) {
801 invalidate_inode_pages2_range(mapping,
802 pos >> PAGE_CACHE_SHIFT,
803 end >> PAGE_CACHE_SHIFT);
804 }
805
806 if (ret > 0) {
807 pos += ret;
808 iov_iter_advance(from, ret);
809 iocb->ki_pos = pos;
810 }
d0606464
CH
811out:
812 xfs_rw_iunlock(ip, iolock);
813
6b698ede
DC
814 /*
815 * No fallback to buffered IO on errors for XFS. DAX can result in
816 * partial writes, but direct IO will either complete fully or fail.
817 */
818 ASSERT(ret < 0 || ret == count || IS_DAX(VFS_I(ip)));
f0d26e86
DC
819 return ret;
820}
821
00258e36 822STATIC ssize_t
637bbc75 823xfs_file_buffered_aio_write(
dda35b8f 824 struct kiocb *iocb,
b3188919 825 struct iov_iter *from)
dda35b8f
CH
826{
827 struct file *file = iocb->ki_filp;
828 struct address_space *mapping = file->f_mapping;
829 struct inode *inode = mapping->host;
00258e36 830 struct xfs_inode *ip = XFS_I(inode);
637bbc75
DC
831 ssize_t ret;
832 int enospc = 0;
d0606464 833 int iolock = XFS_IOLOCK_EXCL;
dda35b8f 834
d0606464 835 xfs_rw_ilock(ip, iolock);
dda35b8f 836
99733fa3 837 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
4d8d1581 838 if (ret)
d0606464 839 goto out;
dda35b8f
CH
840
841 /* We can write back this queue in page reclaim */
de1414a6 842 current->backing_dev_info = inode_to_bdi(inode);
dda35b8f 843
dda35b8f 844write_retry:
99733fa3
AV
845 trace_xfs_file_buffered_write(ip, iov_iter_count(from),
846 iocb->ki_pos, 0);
847 ret = generic_perform_write(file, from, iocb->ki_pos);
0a64bc2c 848 if (likely(ret >= 0))
99733fa3 849 iocb->ki_pos += ret;
dc06f398 850
637bbc75 851 /*
dc06f398
BF
852 * If we hit a space limit, try to free up some lingering preallocated
853 * space before returning an error. In the case of ENOSPC, first try to
854 * write back all dirty inodes to free up some of the excess reserved
855 * metadata space. This reduces the chances that the eofblocks scan
856 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
857 * also behaves as a filter to prevent too many eofblocks scans from
858 * running at the same time.
637bbc75 859 */
dc06f398
BF
860 if (ret == -EDQUOT && !enospc) {
861 enospc = xfs_inode_free_quota_eofblocks(ip);
862 if (enospc)
863 goto write_retry;
864 } else if (ret == -ENOSPC && !enospc) {
865 struct xfs_eofblocks eofb = {0};
866
637bbc75 867 enospc = 1;
9aa05000 868 xfs_flush_inodes(ip->i_mount);
dc06f398
BF
869 eofb.eof_scan_owner = ip->i_ino; /* for locking */
870 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
871 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
9aa05000 872 goto write_retry;
dda35b8f 873 }
d0606464 874
dda35b8f 875 current->backing_dev_info = NULL;
d0606464
CH
876out:
877 xfs_rw_iunlock(ip, iolock);
637bbc75
DC
878 return ret;
879}
880
881STATIC ssize_t
bf97f3bc 882xfs_file_write_iter(
637bbc75 883 struct kiocb *iocb,
bf97f3bc 884 struct iov_iter *from)
637bbc75
DC
885{
886 struct file *file = iocb->ki_filp;
887 struct address_space *mapping = file->f_mapping;
888 struct inode *inode = mapping->host;
889 struct xfs_inode *ip = XFS_I(inode);
890 ssize_t ret;
bf97f3bc 891 size_t ocount = iov_iter_count(from);
637bbc75 892
ff6d6af2 893 XFS_STATS_INC(ip->i_mount, xs_write_calls);
637bbc75 894
637bbc75
DC
895 if (ocount == 0)
896 return 0;
897
bf97f3bc
AV
898 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
899 return -EIO;
637bbc75 900
6b698ede 901 if ((iocb->ki_flags & IOCB_DIRECT) || IS_DAX(inode))
bf97f3bc 902 ret = xfs_file_dio_aio_write(iocb, from);
637bbc75 903 else
bf97f3bc 904 ret = xfs_file_buffered_aio_write(iocb, from);
dda35b8f 905
d0606464
CH
906 if (ret > 0) {
907 ssize_t err;
dda35b8f 908
ff6d6af2 909 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
dda35b8f 910
d0606464 911 /* Handle various SYNC-type writes */
d311d79d 912 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
d0606464
CH
913 if (err < 0)
914 ret = err;
dda35b8f 915 }
a363f0c2 916 return ret;
dda35b8f
CH
917}
918
a904b1ca
NJ
919#define XFS_FALLOC_FL_SUPPORTED \
920 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
921 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
922 FALLOC_FL_INSERT_RANGE)
923
2fe17c10
CH
924STATIC long
925xfs_file_fallocate(
83aee9e4
CH
926 struct file *file,
927 int mode,
928 loff_t offset,
929 loff_t len)
2fe17c10 930{
83aee9e4
CH
931 struct inode *inode = file_inode(file);
932 struct xfs_inode *ip = XFS_I(inode);
83aee9e4 933 long error;
8add71ca 934 enum xfs_prealloc_flags flags = 0;
781355c6 935 uint iolock = XFS_IOLOCK_EXCL;
83aee9e4 936 loff_t new_size = 0;
a904b1ca 937 bool do_file_insert = 0;
2fe17c10 938
83aee9e4
CH
939 if (!S_ISREG(inode->i_mode))
940 return -EINVAL;
a904b1ca 941 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
2fe17c10
CH
942 return -EOPNOTSUPP;
943
781355c6 944 xfs_ilock(ip, iolock);
21c3ea18 945 error = xfs_break_layouts(inode, &iolock, false);
781355c6
CH
946 if (error)
947 goto out_unlock;
948
e8e9ad42
DC
949 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
950 iolock |= XFS_MMAPLOCK_EXCL;
951
83aee9e4
CH
952 if (mode & FALLOC_FL_PUNCH_HOLE) {
953 error = xfs_free_file_space(ip, offset, len);
954 if (error)
955 goto out_unlock;
e1d8fb88
NJ
956 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
957 unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
958
959 if (offset & blksize_mask || len & blksize_mask) {
2451337d 960 error = -EINVAL;
e1d8fb88
NJ
961 goto out_unlock;
962 }
963
23fffa92
LC
964 /*
965 * There is no need to overlap collapse range with EOF,
966 * in which case it is effectively a truncate operation
967 */
968 if (offset + len >= i_size_read(inode)) {
2451337d 969 error = -EINVAL;
23fffa92
LC
970 goto out_unlock;
971 }
972
e1d8fb88
NJ
973 new_size = i_size_read(inode) - len;
974
975 error = xfs_collapse_file_space(ip, offset, len);
976 if (error)
977 goto out_unlock;
a904b1ca
NJ
978 } else if (mode & FALLOC_FL_INSERT_RANGE) {
979 unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
980
981 new_size = i_size_read(inode) + len;
982 if (offset & blksize_mask || len & blksize_mask) {
983 error = -EINVAL;
984 goto out_unlock;
985 }
986
987 /* check the new inode size does not wrap through zero */
988 if (new_size > inode->i_sb->s_maxbytes) {
989 error = -EFBIG;
990 goto out_unlock;
991 }
992
993 /* Offset should be less than i_size */
994 if (offset >= i_size_read(inode)) {
995 error = -EINVAL;
996 goto out_unlock;
997 }
998 do_file_insert = 1;
83aee9e4 999 } else {
8add71ca
CH
1000 flags |= XFS_PREALLOC_SET;
1001
83aee9e4
CH
1002 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1003 offset + len > i_size_read(inode)) {
1004 new_size = offset + len;
2451337d 1005 error = inode_newsize_ok(inode, new_size);
83aee9e4
CH
1006 if (error)
1007 goto out_unlock;
1008 }
2fe17c10 1009
376ba313
LC
1010 if (mode & FALLOC_FL_ZERO_RANGE)
1011 error = xfs_zero_file_space(ip, offset, len);
1012 else
1013 error = xfs_alloc_file_space(ip, offset, len,
1014 XFS_BMAPI_PREALLOC);
2fe17c10
CH
1015 if (error)
1016 goto out_unlock;
1017 }
1018
83aee9e4 1019 if (file->f_flags & O_DSYNC)
8add71ca
CH
1020 flags |= XFS_PREALLOC_SYNC;
1021
1022 error = xfs_update_prealloc_flags(ip, flags);
2fe17c10
CH
1023 if (error)
1024 goto out_unlock;
1025
1026 /* Change file size if needed */
1027 if (new_size) {
1028 struct iattr iattr;
1029
1030 iattr.ia_valid = ATTR_SIZE;
1031 iattr.ia_size = new_size;
83aee9e4 1032 error = xfs_setattr_size(ip, &iattr);
a904b1ca
NJ
1033 if (error)
1034 goto out_unlock;
2fe17c10
CH
1035 }
1036
a904b1ca
NJ
1037 /*
1038 * Perform hole insertion now that the file size has been
1039 * updated so that if we crash during the operation we don't
1040 * leave shifted extents past EOF and hence losing access to
1041 * the data that is contained within them.
1042 */
1043 if (do_file_insert)
1044 error = xfs_insert_file_space(ip, offset, len);
1045
2fe17c10 1046out_unlock:
781355c6 1047 xfs_iunlock(ip, iolock);
2451337d 1048 return error;
2fe17c10
CH
1049}
1050
1051
1da177e4 1052STATIC int
3562fd45 1053xfs_file_open(
1da177e4 1054 struct inode *inode,
f999a5bf 1055 struct file *file)
1da177e4 1056{
f999a5bf 1057 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
1da177e4 1058 return -EFBIG;
f999a5bf
CH
1059 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
1060 return -EIO;
1061 return 0;
1062}
1063
1064STATIC int
1065xfs_dir_open(
1066 struct inode *inode,
1067 struct file *file)
1068{
1069 struct xfs_inode *ip = XFS_I(inode);
1070 int mode;
1071 int error;
1072
1073 error = xfs_file_open(inode, file);
1074 if (error)
1075 return error;
1076
1077 /*
1078 * If there are any blocks, read-ahead block 0 as we're almost
1079 * certain to have the next operation be a read there.
1080 */
309ecac8 1081 mode = xfs_ilock_data_map_shared(ip);
f999a5bf 1082 if (ip->i_d.di_nextents > 0)
9df2dd0b 1083 xfs_dir3_data_readahead(ip, 0, -1);
f999a5bf
CH
1084 xfs_iunlock(ip, mode);
1085 return 0;
1da177e4
LT
1086}
1087
1da177e4 1088STATIC int
3562fd45 1089xfs_file_release(
1da177e4
LT
1090 struct inode *inode,
1091 struct file *filp)
1092{
2451337d 1093 return xfs_release(XFS_I(inode));
1da177e4
LT
1094}
1095
1da177e4 1096STATIC int
3562fd45 1097xfs_file_readdir(
b8227554
AV
1098 struct file *file,
1099 struct dir_context *ctx)
1da177e4 1100{
b8227554 1101 struct inode *inode = file_inode(file);
739bfb2a 1102 xfs_inode_t *ip = XFS_I(inode);
051e7cd4
CH
1103 size_t bufsize;
1104
1105 /*
1106 * The Linux API doesn't pass down the total size of the buffer
1107 * we read into down to the filesystem. With the filldir concept
1108 * it's not needed for correct information, but the XFS dir2 leaf
1109 * code wants an estimate of the buffer size to calculate it's
1110 * readahead window and size the buffers used for mapping to
1111 * physical blocks.
1112 *
1113 * Try to give it an estimate that's good enough, maybe at some
1114 * point we can change the ->readdir prototype to include the
a9cc799e 1115 * buffer size. For now we use the current glibc buffer size.
051e7cd4 1116 */
a9cc799e 1117 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
051e7cd4 1118
8300475e 1119 return xfs_readdir(ip, ctx, bufsize);
1da177e4
LT
1120}
1121
d126d43f
JL
1122/*
1123 * This type is designed to indicate the type of offset we would like
49c69591 1124 * to search from page cache for xfs_seek_hole_data().
d126d43f
JL
1125 */
1126enum {
1127 HOLE_OFF = 0,
1128 DATA_OFF,
1129};
1130
1131/*
1132 * Lookup the desired type of offset from the given page.
1133 *
1134 * On success, return true and the offset argument will point to the
1135 * start of the region that was found. Otherwise this function will
1136 * return false and keep the offset argument unchanged.
1137 */
1138STATIC bool
1139xfs_lookup_buffer_offset(
1140 struct page *page,
1141 loff_t *offset,
1142 unsigned int type)
1143{
1144 loff_t lastoff = page_offset(page);
1145 bool found = false;
1146 struct buffer_head *bh, *head;
1147
1148 bh = head = page_buffers(page);
1149 do {
1150 /*
1151 * Unwritten extents that have data in the page
1152 * cache covering them can be identified by the
1153 * BH_Unwritten state flag. Pages with multiple
1154 * buffers might have a mix of holes, data and
1155 * unwritten extents - any buffer with valid
1156 * data in it should have BH_Uptodate flag set
1157 * on it.
1158 */
1159 if (buffer_unwritten(bh) ||
1160 buffer_uptodate(bh)) {
1161 if (type == DATA_OFF)
1162 found = true;
1163 } else {
1164 if (type == HOLE_OFF)
1165 found = true;
1166 }
1167
1168 if (found) {
1169 *offset = lastoff;
1170 break;
1171 }
1172 lastoff += bh->b_size;
1173 } while ((bh = bh->b_this_page) != head);
1174
1175 return found;
1176}
1177
1178/*
1179 * This routine is called to find out and return a data or hole offset
1180 * from the page cache for unwritten extents according to the desired
49c69591 1181 * type for xfs_seek_hole_data().
d126d43f
JL
1182 *
1183 * The argument offset is used to tell where we start to search from the
1184 * page cache. Map is used to figure out the end points of the range to
1185 * lookup pages.
1186 *
1187 * Return true if the desired type of offset was found, and the argument
1188 * offset is filled with that address. Otherwise, return false and keep
1189 * offset unchanged.
1190 */
1191STATIC bool
1192xfs_find_get_desired_pgoff(
1193 struct inode *inode,
1194 struct xfs_bmbt_irec *map,
1195 unsigned int type,
1196 loff_t *offset)
1197{
1198 struct xfs_inode *ip = XFS_I(inode);
1199 struct xfs_mount *mp = ip->i_mount;
1200 struct pagevec pvec;
1201 pgoff_t index;
1202 pgoff_t end;
1203 loff_t endoff;
1204 loff_t startoff = *offset;
1205 loff_t lastoff = startoff;
1206 bool found = false;
1207
1208 pagevec_init(&pvec, 0);
1209
1210 index = startoff >> PAGE_CACHE_SHIFT;
1211 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1212 end = endoff >> PAGE_CACHE_SHIFT;
1213 do {
1214 int want;
1215 unsigned nr_pages;
1216 unsigned int i;
1217
1218 want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
1219 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1220 want);
1221 /*
1222 * No page mapped into given range. If we are searching holes
1223 * and if this is the first time we got into the loop, it means
1224 * that the given offset is landed in a hole, return it.
1225 *
1226 * If we have already stepped through some block buffers to find
1227 * holes but they all contains data. In this case, the last
1228 * offset is already updated and pointed to the end of the last
1229 * mapped page, if it does not reach the endpoint to search,
1230 * that means there should be a hole between them.
1231 */
1232 if (nr_pages == 0) {
1233 /* Data search found nothing */
1234 if (type == DATA_OFF)
1235 break;
1236
1237 ASSERT(type == HOLE_OFF);
1238 if (lastoff == startoff || lastoff < endoff) {
1239 found = true;
1240 *offset = lastoff;
1241 }
1242 break;
1243 }
1244
1245 /*
1246 * At lease we found one page. If this is the first time we
1247 * step into the loop, and if the first page index offset is
1248 * greater than the given search offset, a hole was found.
1249 */
1250 if (type == HOLE_OFF && lastoff == startoff &&
1251 lastoff < page_offset(pvec.pages[0])) {
1252 found = true;
1253 break;
1254 }
1255
1256 for (i = 0; i < nr_pages; i++) {
1257 struct page *page = pvec.pages[i];
1258 loff_t b_offset;
1259
1260 /*
1261 * At this point, the page may be truncated or
1262 * invalidated (changing page->mapping to NULL),
1263 * or even swizzled back from swapper_space to tmpfs
1264 * file mapping. However, page->index will not change
1265 * because we have a reference on the page.
1266 *
1267 * Searching done if the page index is out of range.
1268 * If the current offset is not reaches the end of
1269 * the specified search range, there should be a hole
1270 * between them.
1271 */
1272 if (page->index > end) {
1273 if (type == HOLE_OFF && lastoff < endoff) {
1274 *offset = lastoff;
1275 found = true;
1276 }
1277 goto out;
1278 }
1279
1280 lock_page(page);
1281 /*
1282 * Page truncated or invalidated(page->mapping == NULL).
1283 * We can freely skip it and proceed to check the next
1284 * page.
1285 */
1286 if (unlikely(page->mapping != inode->i_mapping)) {
1287 unlock_page(page);
1288 continue;
1289 }
1290
1291 if (!page_has_buffers(page)) {
1292 unlock_page(page);
1293 continue;
1294 }
1295
1296 found = xfs_lookup_buffer_offset(page, &b_offset, type);
1297 if (found) {
1298 /*
1299 * The found offset may be less than the start
1300 * point to search if this is the first time to
1301 * come here.
1302 */
1303 *offset = max_t(loff_t, startoff, b_offset);
1304 unlock_page(page);
1305 goto out;
1306 }
1307
1308 /*
1309 * We either searching data but nothing was found, or
1310 * searching hole but found a data buffer. In either
1311 * case, probably the next page contains the desired
1312 * things, update the last offset to it so.
1313 */
1314 lastoff = page_offset(page) + PAGE_SIZE;
1315 unlock_page(page);
1316 }
1317
1318 /*
1319 * The number of returned pages less than our desired, search
1320 * done. In this case, nothing was found for searching data,
1321 * but we found a hole behind the last offset.
1322 */
1323 if (nr_pages < want) {
1324 if (type == HOLE_OFF) {
1325 *offset = lastoff;
1326 found = true;
1327 }
1328 break;
1329 }
1330
1331 index = pvec.pages[i - 1]->index + 1;
1332 pagevec_release(&pvec);
1333 } while (index <= end);
1334
1335out:
1336 pagevec_release(&pvec);
1337 return found;
1338}
1339
3fe3e6b1 1340STATIC loff_t
49c69591 1341xfs_seek_hole_data(
3fe3e6b1 1342 struct file *file,
49c69591
ES
1343 loff_t start,
1344 int whence)
3fe3e6b1
JL
1345{
1346 struct inode *inode = file->f_mapping->host;
1347 struct xfs_inode *ip = XFS_I(inode);
1348 struct xfs_mount *mp = ip->i_mount;
3fe3e6b1
JL
1349 loff_t uninitialized_var(offset);
1350 xfs_fsize_t isize;
1351 xfs_fileoff_t fsbno;
1352 xfs_filblks_t end;
1353 uint lock;
1354 int error;
1355
49c69591
ES
1356 if (XFS_FORCED_SHUTDOWN(mp))
1357 return -EIO;
1358
309ecac8 1359 lock = xfs_ilock_data_map_shared(ip);
3fe3e6b1
JL
1360
1361 isize = i_size_read(inode);
1362 if (start >= isize) {
2451337d 1363 error = -ENXIO;
3fe3e6b1
JL
1364 goto out_unlock;
1365 }
1366
3fe3e6b1
JL
1367 /*
1368 * Try to read extents from the first block indicated
1369 * by fsbno to the end block of the file.
1370 */
52f1acc8 1371 fsbno = XFS_B_TO_FSBT(mp, start);
3fe3e6b1 1372 end = XFS_B_TO_FSB(mp, isize);
49c69591 1373
52f1acc8
JL
1374 for (;;) {
1375 struct xfs_bmbt_irec map[2];
1376 int nmap = 2;
1377 unsigned int i;
3fe3e6b1 1378
52f1acc8
JL
1379 error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
1380 XFS_BMAPI_ENTIRE);
1381 if (error)
1382 goto out_unlock;
3fe3e6b1 1383
52f1acc8
JL
1384 /* No extents at given offset, must be beyond EOF */
1385 if (nmap == 0) {
2451337d 1386 error = -ENXIO;
52f1acc8
JL
1387 goto out_unlock;
1388 }
1389
1390 for (i = 0; i < nmap; i++) {
1391 offset = max_t(loff_t, start,
1392 XFS_FSB_TO_B(mp, map[i].br_startoff));
1393
49c69591
ES
1394 /* Landed in the hole we wanted? */
1395 if (whence == SEEK_HOLE &&
1396 map[i].br_startblock == HOLESTARTBLOCK)
1397 goto out;
1398
1399 /* Landed in the data extent we wanted? */
1400 if (whence == SEEK_DATA &&
1401 (map[i].br_startblock == DELAYSTARTBLOCK ||
1402 (map[i].br_state == XFS_EXT_NORM &&
1403 !isnullstartblock(map[i].br_startblock))))
52f1acc8
JL
1404 goto out;
1405
1406 /*
49c69591
ES
1407 * Landed in an unwritten extent, try to search
1408 * for hole or data from page cache.
52f1acc8
JL
1409 */
1410 if (map[i].br_state == XFS_EXT_UNWRITTEN) {
1411 if (xfs_find_get_desired_pgoff(inode, &map[i],
49c69591
ES
1412 whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
1413 &offset))
52f1acc8
JL
1414 goto out;
1415 }
1416 }
1417
1418 /*
49c69591
ES
1419 * We only received one extent out of the two requested. This
1420 * means we've hit EOF and didn't find what we are looking for.
52f1acc8 1421 */
3fe3e6b1 1422 if (nmap == 1) {
49c69591
ES
1423 /*
1424 * If we were looking for a hole, set offset to
1425 * the end of the file (i.e., there is an implicit
1426 * hole at the end of any file).
1427 */
1428 if (whence == SEEK_HOLE) {
1429 offset = isize;
1430 break;
1431 }
1432 /*
1433 * If we were looking for data, it's nowhere to be found
1434 */
1435 ASSERT(whence == SEEK_DATA);
2451337d 1436 error = -ENXIO;
3fe3e6b1
JL
1437 goto out_unlock;
1438 }
1439
52f1acc8
JL
1440 ASSERT(i > 1);
1441
1442 /*
1443 * Nothing was found, proceed to the next round of search
49c69591 1444 * if the next reading offset is not at or beyond EOF.
52f1acc8
JL
1445 */
1446 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
1447 start = XFS_FSB_TO_B(mp, fsbno);
1448 if (start >= isize) {
49c69591
ES
1449 if (whence == SEEK_HOLE) {
1450 offset = isize;
1451 break;
1452 }
1453 ASSERT(whence == SEEK_DATA);
2451337d 1454 error = -ENXIO;
52f1acc8
JL
1455 goto out_unlock;
1456 }
3fe3e6b1
JL
1457 }
1458
b686d1f7
JL
1459out:
1460 /*
49c69591 1461 * If at this point we have found the hole we wanted, the returned
b686d1f7 1462 * offset may be bigger than the file size as it may be aligned to
49c69591 1463 * page boundary for unwritten extents. We need to deal with this
b686d1f7
JL
1464 * situation in particular.
1465 */
49c69591
ES
1466 if (whence == SEEK_HOLE)
1467 offset = min_t(loff_t, offset, isize);
46a1c2c7 1468 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3fe3e6b1
JL
1469
1470out_unlock:
01f4f327 1471 xfs_iunlock(ip, lock);
3fe3e6b1
JL
1472
1473 if (error)
2451337d 1474 return error;
3fe3e6b1
JL
1475 return offset;
1476}
1477
1478STATIC loff_t
1479xfs_file_llseek(
1480 struct file *file,
1481 loff_t offset,
59f9c004 1482 int whence)
3fe3e6b1 1483{
59f9c004 1484 switch (whence) {
3fe3e6b1
JL
1485 case SEEK_END:
1486 case SEEK_CUR:
1487 case SEEK_SET:
59f9c004 1488 return generic_file_llseek(file, offset, whence);
3fe3e6b1 1489 case SEEK_HOLE:
49c69591 1490 case SEEK_DATA:
59f9c004 1491 return xfs_seek_hole_data(file, offset, whence);
3fe3e6b1
JL
1492 default:
1493 return -EINVAL;
1494 }
1495}
1496
de0e8c20
DC
1497/*
1498 * Locking for serialisation of IO during page faults. This results in a lock
1499 * ordering of:
1500 *
1501 * mmap_sem (MM)
6b698ede 1502 * sb_start_pagefault(vfs, freeze)
13ad4fe3 1503 * i_mmaplock (XFS - truncate serialisation)
6b698ede
DC
1504 * page_lock (MM)
1505 * i_lock (XFS - extent map serialisation)
de0e8c20 1506 */
de0e8c20 1507
075a924d
DC
1508/*
1509 * mmap()d file has taken write protection fault and is being made writable. We
1510 * can set the page state up correctly for a writable page, which means we can
1511 * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
1512 * mapping.
de0e8c20
DC
1513 */
1514STATIC int
075a924d 1515xfs_filemap_page_mkwrite(
de0e8c20
DC
1516 struct vm_area_struct *vma,
1517 struct vm_fault *vmf)
1518{
6b698ede 1519 struct inode *inode = file_inode(vma->vm_file);
ec56b1f1 1520 int ret;
de0e8c20 1521
6b698ede 1522 trace_xfs_filemap_page_mkwrite(XFS_I(inode));
de0e8c20 1523
6b698ede 1524 sb_start_pagefault(inode->i_sb);
ec56b1f1 1525 file_update_time(vma->vm_file);
6b698ede 1526 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
de0e8c20 1527
6b698ede 1528 if (IS_DAX(inode)) {
01a155e6 1529 ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault, NULL);
6b698ede 1530 } else {
5c500029 1531 ret = block_page_mkwrite(vma, vmf, xfs_get_blocks);
6b698ede
DC
1532 ret = block_page_mkwrite_return(ret);
1533 }
1534
1535 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1536 sb_end_pagefault(inode->i_sb);
1537
1538 return ret;
de0e8c20
DC
1539}
1540
075a924d 1541STATIC int
6b698ede 1542xfs_filemap_fault(
075a924d
DC
1543 struct vm_area_struct *vma,
1544 struct vm_fault *vmf)
1545{
b2442c5a 1546 struct inode *inode = file_inode(vma->vm_file);
6b698ede 1547 int ret;
ec56b1f1 1548
b2442c5a 1549 trace_xfs_filemap_fault(XFS_I(inode));
075a924d 1550
6b698ede 1551 /* DAX can shortcut the normal fault path on write faults! */
b2442c5a 1552 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
6b698ede 1553 return xfs_filemap_page_mkwrite(vma, vmf);
075a924d 1554
b2442c5a
DC
1555 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1556 if (IS_DAX(inode)) {
1557 /*
1558 * we do not want to trigger unwritten extent conversion on read
1559 * faults - that is unnecessary overhead and would also require
1560 * changes to xfs_get_blocks_direct() to map unwritten extent
1561 * ioend for conversion on read-only mappings.
1562 */
3e12dbbd 1563 ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault, NULL);
b2442c5a
DC
1564 } else
1565 ret = filemap_fault(vma, vmf);
1566 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
075a924d 1567
6b698ede
DC
1568 return ret;
1569}
1570
13ad4fe3
DC
1571/*
1572 * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
1573 * both read and write faults. Hence we need to handle both cases. There is no
1574 * ->pmd_mkwrite callout for huge pages, so we have a single function here to
1575 * handle both cases here. @flags carries the information on the type of fault
1576 * occuring.
1577 */
acd76e74
MW
1578STATIC int
1579xfs_filemap_pmd_fault(
1580 struct vm_area_struct *vma,
1581 unsigned long addr,
1582 pmd_t *pmd,
1583 unsigned int flags)
1584{
1585 struct inode *inode = file_inode(vma->vm_file);
1586 struct xfs_inode *ip = XFS_I(inode);
1587 int ret;
1588
1589 if (!IS_DAX(inode))
1590 return VM_FAULT_FALLBACK;
1591
1592 trace_xfs_filemap_pmd_fault(ip);
1593
13ad4fe3
DC
1594 if (flags & FAULT_FLAG_WRITE) {
1595 sb_start_pagefault(inode->i_sb);
1596 file_update_time(vma->vm_file);
1597 }
1598
acd76e74 1599 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
3e12dbbd 1600 ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault,
01a155e6 1601 NULL);
acd76e74 1602 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
acd76e74 1603
13ad4fe3
DC
1604 if (flags & FAULT_FLAG_WRITE)
1605 sb_end_pagefault(inode->i_sb);
acd76e74
MW
1606
1607 return ret;
1608}
1609
3af49285
DC
1610/*
1611 * pfn_mkwrite was originally inteneded to ensure we capture time stamp
1612 * updates on write faults. In reality, it's need to serialise against
5eb88dca
RZ
1613 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED
1614 * to ensure we serialise the fault barrier in place.
3af49285
DC
1615 */
1616static int
1617xfs_filemap_pfn_mkwrite(
1618 struct vm_area_struct *vma,
1619 struct vm_fault *vmf)
1620{
1621
1622 struct inode *inode = file_inode(vma->vm_file);
1623 struct xfs_inode *ip = XFS_I(inode);
1624 int ret = VM_FAULT_NOPAGE;
1625 loff_t size;
1626
1627 trace_xfs_filemap_pfn_mkwrite(ip);
1628
1629 sb_start_pagefault(inode->i_sb);
1630 file_update_time(vma->vm_file);
1631
1632 /* check if the faulting page hasn't raced with truncate */
1633 xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1634 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1635 if (vmf->pgoff >= size)
1636 ret = VM_FAULT_SIGBUS;
5eb88dca
RZ
1637 else if (IS_DAX(inode))
1638 ret = dax_pfn_mkwrite(vma, vmf);
3af49285
DC
1639 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1640 sb_end_pagefault(inode->i_sb);
acd76e74 1641 return ret;
3af49285 1642
acd76e74
MW
1643}
1644
6b698ede
DC
1645static const struct vm_operations_struct xfs_file_vm_ops = {
1646 .fault = xfs_filemap_fault,
acd76e74 1647 .pmd_fault = xfs_filemap_pmd_fault,
6b698ede
DC
1648 .map_pages = filemap_map_pages,
1649 .page_mkwrite = xfs_filemap_page_mkwrite,
3af49285 1650 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
6b698ede
DC
1651};
1652
1653STATIC int
1654xfs_file_mmap(
1655 struct file *filp,
1656 struct vm_area_struct *vma)
1657{
1658 file_accessed(filp);
1659 vma->vm_ops = &xfs_file_vm_ops;
1660 if (IS_DAX(file_inode(filp)))
acd76e74 1661 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
6b698ede 1662 return 0;
075a924d
DC
1663}
1664
4b6f5d20 1665const struct file_operations xfs_file_operations = {
3fe3e6b1 1666 .llseek = xfs_file_llseek,
b4f5d2c6 1667 .read_iter = xfs_file_read_iter,
bf97f3bc 1668 .write_iter = xfs_file_write_iter,
1b895840 1669 .splice_read = xfs_file_splice_read,
8d020765 1670 .splice_write = iter_file_splice_write,
3562fd45 1671 .unlocked_ioctl = xfs_file_ioctl,
1da177e4 1672#ifdef CONFIG_COMPAT
3562fd45 1673 .compat_ioctl = xfs_file_compat_ioctl,
1da177e4 1674#endif
3562fd45
NS
1675 .mmap = xfs_file_mmap,
1676 .open = xfs_file_open,
1677 .release = xfs_file_release,
1678 .fsync = xfs_file_fsync,
2fe17c10 1679 .fallocate = xfs_file_fallocate,
1da177e4
LT
1680};
1681
4b6f5d20 1682const struct file_operations xfs_dir_file_operations = {
f999a5bf 1683 .open = xfs_dir_open,
1da177e4 1684 .read = generic_read_dir,
b8227554 1685 .iterate = xfs_file_readdir,
59af1584 1686 .llseek = generic_file_llseek,
3562fd45 1687 .unlocked_ioctl = xfs_file_ioctl,
d3870398 1688#ifdef CONFIG_COMPAT
3562fd45 1689 .compat_ioctl = xfs_file_compat_ioctl,
d3870398 1690#endif
1da2f2db 1691 .fsync = xfs_dir_fsync,
1da177e4 1692};