Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
7b718769 NS |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
1da177e4 | 4 | * |
7b718769 NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
1da177e4 LT |
7 | * published by the Free Software Foundation. |
8 | * | |
7b718769 NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
1da177e4 | 13 | * |
7b718769 NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
1da177e4 | 17 | */ |
1da177e4 | 18 | #include "xfs.h" |
dda35b8f | 19 | #include "xfs_fs.h" |
a844f451 | 20 | #include "xfs_bit.h" |
1da177e4 | 21 | #include "xfs_log.h" |
a844f451 | 22 | #include "xfs_inum.h" |
1da177e4 | 23 | #include "xfs_sb.h" |
a844f451 | 24 | #include "xfs_ag.h" |
1da177e4 | 25 | #include "xfs_trans.h" |
1da177e4 LT |
26 | #include "xfs_mount.h" |
27 | #include "xfs_bmap_btree.h" | |
1da177e4 | 28 | #include "xfs_alloc.h" |
1da177e4 LT |
29 | #include "xfs_dinode.h" |
30 | #include "xfs_inode.h" | |
fd3200be | 31 | #include "xfs_inode_item.h" |
dda35b8f | 32 | #include "xfs_bmap.h" |
1da177e4 | 33 | #include "xfs_error.h" |
739bfb2a | 34 | #include "xfs_vnodeops.h" |
f999a5bf | 35 | #include "xfs_da_btree.h" |
ddcd856d | 36 | #include "xfs_ioctl.h" |
dda35b8f | 37 | #include "xfs_trace.h" |
1da177e4 LT |
38 | |
39 | #include <linux/dcache.h> | |
2fe17c10 | 40 | #include <linux/falloc.h> |
1da177e4 | 41 | |
f0f37e2f | 42 | static const struct vm_operations_struct xfs_file_vm_ops; |
1da177e4 | 43 | |
487f84f3 DC |
44 | /* |
45 | * Locking primitives for read and write IO paths to ensure we consistently use | |
46 | * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. | |
47 | */ | |
48 | static inline void | |
49 | xfs_rw_ilock( | |
50 | struct xfs_inode *ip, | |
51 | int type) | |
52 | { | |
53 | if (type & XFS_IOLOCK_EXCL) | |
54 | mutex_lock(&VFS_I(ip)->i_mutex); | |
55 | xfs_ilock(ip, type); | |
56 | } | |
57 | ||
58 | static inline void | |
59 | xfs_rw_iunlock( | |
60 | struct xfs_inode *ip, | |
61 | int type) | |
62 | { | |
63 | xfs_iunlock(ip, type); | |
64 | if (type & XFS_IOLOCK_EXCL) | |
65 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
66 | } | |
67 | ||
68 | static inline void | |
69 | xfs_rw_ilock_demote( | |
70 | struct xfs_inode *ip, | |
71 | int type) | |
72 | { | |
73 | xfs_ilock_demote(ip, type); | |
74 | if (type & XFS_IOLOCK_EXCL) | |
75 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
76 | } | |
77 | ||
dda35b8f CH |
78 | /* |
79 | * xfs_iozero | |
80 | * | |
81 | * xfs_iozero clears the specified range of buffer supplied, | |
82 | * and marks all the affected blocks as valid and modified. If | |
83 | * an affected block is not allocated, it will be allocated. If | |
84 | * an affected block is not completely overwritten, and is not | |
85 | * valid before the operation, it will be read from disk before | |
86 | * being partially zeroed. | |
87 | */ | |
88 | STATIC int | |
89 | xfs_iozero( | |
90 | struct xfs_inode *ip, /* inode */ | |
91 | loff_t pos, /* offset in file */ | |
92 | size_t count) /* size of data to zero */ | |
93 | { | |
94 | struct page *page; | |
95 | struct address_space *mapping; | |
96 | int status; | |
97 | ||
98 | mapping = VFS_I(ip)->i_mapping; | |
99 | do { | |
100 | unsigned offset, bytes; | |
101 | void *fsdata; | |
102 | ||
103 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
104 | bytes = PAGE_CACHE_SIZE - offset; | |
105 | if (bytes > count) | |
106 | bytes = count; | |
107 | ||
108 | status = pagecache_write_begin(NULL, mapping, pos, bytes, | |
109 | AOP_FLAG_UNINTERRUPTIBLE, | |
110 | &page, &fsdata); | |
111 | if (status) | |
112 | break; | |
113 | ||
114 | zero_user(page, offset, bytes); | |
115 | ||
116 | status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, | |
117 | page, fsdata); | |
118 | WARN_ON(status <= 0); /* can't return less than zero! */ | |
119 | pos += bytes; | |
120 | count -= bytes; | |
121 | status = 0; | |
122 | } while (count); | |
123 | ||
124 | return (-status); | |
125 | } | |
126 | ||
1da2f2db CH |
127 | /* |
128 | * Fsync operations on directories are much simpler than on regular files, | |
129 | * as there is no file data to flush, and thus also no need for explicit | |
130 | * cache flush operations, and there are no non-transaction metadata updates | |
131 | * on directories either. | |
132 | */ | |
133 | STATIC int | |
134 | xfs_dir_fsync( | |
135 | struct file *file, | |
136 | loff_t start, | |
137 | loff_t end, | |
138 | int datasync) | |
139 | { | |
140 | struct xfs_inode *ip = XFS_I(file->f_mapping->host); | |
141 | struct xfs_mount *mp = ip->i_mount; | |
142 | xfs_lsn_t lsn = 0; | |
143 | ||
144 | trace_xfs_dir_fsync(ip); | |
145 | ||
146 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
147 | if (xfs_ipincount(ip)) | |
148 | lsn = ip->i_itemp->ili_last_lsn; | |
149 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
150 | ||
151 | if (!lsn) | |
152 | return 0; | |
153 | return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); | |
154 | } | |
155 | ||
fd3200be CH |
156 | STATIC int |
157 | xfs_file_fsync( | |
158 | struct file *file, | |
02c24a82 JB |
159 | loff_t start, |
160 | loff_t end, | |
fd3200be CH |
161 | int datasync) |
162 | { | |
7ea80859 CH |
163 | struct inode *inode = file->f_mapping->host; |
164 | struct xfs_inode *ip = XFS_I(inode); | |
a27a263b | 165 | struct xfs_mount *mp = ip->i_mount; |
fd3200be CH |
166 | int error = 0; |
167 | int log_flushed = 0; | |
b1037058 | 168 | xfs_lsn_t lsn = 0; |
fd3200be | 169 | |
cca28fb8 | 170 | trace_xfs_file_fsync(ip); |
fd3200be | 171 | |
02c24a82 JB |
172 | error = filemap_write_and_wait_range(inode->i_mapping, start, end); |
173 | if (error) | |
174 | return error; | |
175 | ||
a27a263b | 176 | if (XFS_FORCED_SHUTDOWN(mp)) |
fd3200be CH |
177 | return -XFS_ERROR(EIO); |
178 | ||
179 | xfs_iflags_clear(ip, XFS_ITRUNCATED); | |
180 | ||
a27a263b CH |
181 | if (mp->m_flags & XFS_MOUNT_BARRIER) { |
182 | /* | |
183 | * If we have an RT and/or log subvolume we need to make sure | |
184 | * to flush the write cache the device used for file data | |
185 | * first. This is to ensure newly written file data make | |
186 | * it to disk before logging the new inode size in case of | |
187 | * an extending write. | |
188 | */ | |
189 | if (XFS_IS_REALTIME_INODE(ip)) | |
190 | xfs_blkdev_issue_flush(mp->m_rtdev_targp); | |
191 | else if (mp->m_logdev_targp != mp->m_ddev_targp) | |
192 | xfs_blkdev_issue_flush(mp->m_ddev_targp); | |
193 | } | |
194 | ||
fd3200be | 195 | /* |
8a9c9980 CH |
196 | * All metadata updates are logged, which means that we just have |
197 | * to flush the log up to the latest LSN that touched the inode. | |
fd3200be CH |
198 | */ |
199 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
8f639dde CH |
200 | if (xfs_ipincount(ip)) { |
201 | if (!datasync || | |
202 | (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP)) | |
203 | lsn = ip->i_itemp->ili_last_lsn; | |
204 | } | |
8a9c9980 | 205 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
fd3200be | 206 | |
8a9c9980 | 207 | if (lsn) |
b1037058 CH |
208 | error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); |
209 | ||
a27a263b CH |
210 | /* |
211 | * If we only have a single device, and the log force about was | |
212 | * a no-op we might have to flush the data device cache here. | |
213 | * This can only happen for fdatasync/O_DSYNC if we were overwriting | |
214 | * an already allocated file and thus do not have any metadata to | |
215 | * commit. | |
216 | */ | |
217 | if ((mp->m_flags & XFS_MOUNT_BARRIER) && | |
218 | mp->m_logdev_targp == mp->m_ddev_targp && | |
219 | !XFS_IS_REALTIME_INODE(ip) && | |
220 | !log_flushed) | |
221 | xfs_blkdev_issue_flush(mp->m_ddev_targp); | |
fd3200be CH |
222 | |
223 | return -error; | |
224 | } | |
225 | ||
00258e36 CH |
226 | STATIC ssize_t |
227 | xfs_file_aio_read( | |
dda35b8f CH |
228 | struct kiocb *iocb, |
229 | const struct iovec *iovp, | |
00258e36 CH |
230 | unsigned long nr_segs, |
231 | loff_t pos) | |
dda35b8f CH |
232 | { |
233 | struct file *file = iocb->ki_filp; | |
234 | struct inode *inode = file->f_mapping->host; | |
00258e36 CH |
235 | struct xfs_inode *ip = XFS_I(inode); |
236 | struct xfs_mount *mp = ip->i_mount; | |
dda35b8f CH |
237 | size_t size = 0; |
238 | ssize_t ret = 0; | |
00258e36 | 239 | int ioflags = 0; |
dda35b8f CH |
240 | xfs_fsize_t n; |
241 | unsigned long seg; | |
242 | ||
dda35b8f CH |
243 | XFS_STATS_INC(xs_read_calls); |
244 | ||
00258e36 CH |
245 | BUG_ON(iocb->ki_pos != pos); |
246 | ||
247 | if (unlikely(file->f_flags & O_DIRECT)) | |
248 | ioflags |= IO_ISDIRECT; | |
249 | if (file->f_mode & FMODE_NOCMTIME) | |
250 | ioflags |= IO_INVIS; | |
251 | ||
dda35b8f | 252 | /* START copy & waste from filemap.c */ |
00258e36 | 253 | for (seg = 0; seg < nr_segs; seg++) { |
dda35b8f CH |
254 | const struct iovec *iv = &iovp[seg]; |
255 | ||
256 | /* | |
257 | * If any segment has a negative length, or the cumulative | |
258 | * length ever wraps negative then return -EINVAL. | |
259 | */ | |
260 | size += iv->iov_len; | |
261 | if (unlikely((ssize_t)(size|iv->iov_len) < 0)) | |
262 | return XFS_ERROR(-EINVAL); | |
263 | } | |
264 | /* END copy & waste from filemap.c */ | |
265 | ||
266 | if (unlikely(ioflags & IO_ISDIRECT)) { | |
267 | xfs_buftarg_t *target = | |
268 | XFS_IS_REALTIME_INODE(ip) ? | |
269 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
00258e36 | 270 | if ((iocb->ki_pos & target->bt_smask) || |
dda35b8f | 271 | (size & target->bt_smask)) { |
ce7ae151 | 272 | if (iocb->ki_pos == i_size_read(inode)) |
00258e36 | 273 | return 0; |
dda35b8f CH |
274 | return -XFS_ERROR(EINVAL); |
275 | } | |
276 | } | |
277 | ||
00258e36 CH |
278 | n = XFS_MAXIOFFSET(mp) - iocb->ki_pos; |
279 | if (n <= 0 || size == 0) | |
dda35b8f CH |
280 | return 0; |
281 | ||
282 | if (n < size) | |
283 | size = n; | |
284 | ||
285 | if (XFS_FORCED_SHUTDOWN(mp)) | |
286 | return -EIO; | |
287 | ||
0c38a251 DC |
288 | /* |
289 | * Locking is a bit tricky here. If we take an exclusive lock | |
290 | * for direct IO, we effectively serialise all new concurrent | |
291 | * read IO to this file and block it behind IO that is currently in | |
292 | * progress because IO in progress holds the IO lock shared. We only | |
293 | * need to hold the lock exclusive to blow away the page cache, so | |
294 | * only take lock exclusively if the page cache needs invalidation. | |
295 | * This allows the normal direct IO case of no page cache pages to | |
296 | * proceeed concurrently without serialisation. | |
297 | */ | |
298 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | |
299 | if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) { | |
300 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | |
487f84f3 DC |
301 | xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); |
302 | ||
00258e36 CH |
303 | if (inode->i_mapping->nrpages) { |
304 | ret = -xfs_flushinval_pages(ip, | |
305 | (iocb->ki_pos & PAGE_CACHE_MASK), | |
306 | -1, FI_REMAPF_LOCKED); | |
487f84f3 DC |
307 | if (ret) { |
308 | xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); | |
309 | return ret; | |
310 | } | |
00258e36 | 311 | } |
487f84f3 | 312 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
0c38a251 | 313 | } |
dda35b8f | 314 | |
00258e36 | 315 | trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags); |
dda35b8f | 316 | |
00258e36 | 317 | ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos); |
dda35b8f CH |
318 | if (ret > 0) |
319 | XFS_STATS_ADD(xs_read_bytes, ret); | |
320 | ||
487f84f3 | 321 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
322 | return ret; |
323 | } | |
324 | ||
00258e36 CH |
325 | STATIC ssize_t |
326 | xfs_file_splice_read( | |
dda35b8f CH |
327 | struct file *infilp, |
328 | loff_t *ppos, | |
329 | struct pipe_inode_info *pipe, | |
330 | size_t count, | |
00258e36 | 331 | unsigned int flags) |
dda35b8f | 332 | { |
00258e36 | 333 | struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); |
00258e36 | 334 | int ioflags = 0; |
dda35b8f CH |
335 | ssize_t ret; |
336 | ||
337 | XFS_STATS_INC(xs_read_calls); | |
00258e36 CH |
338 | |
339 | if (infilp->f_mode & FMODE_NOCMTIME) | |
340 | ioflags |= IO_INVIS; | |
341 | ||
dda35b8f CH |
342 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
343 | return -EIO; | |
344 | ||
487f84f3 | 345 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); |
dda35b8f | 346 | |
dda35b8f CH |
347 | trace_xfs_file_splice_read(ip, count, *ppos, ioflags); |
348 | ||
349 | ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); | |
350 | if (ret > 0) | |
351 | XFS_STATS_ADD(xs_read_bytes, ret); | |
352 | ||
487f84f3 | 353 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
354 | return ret; |
355 | } | |
356 | ||
487f84f3 DC |
357 | /* |
358 | * xfs_file_splice_write() does not use xfs_rw_ilock() because | |
359 | * generic_file_splice_write() takes the i_mutex itself. This, in theory, | |
360 | * couuld cause lock inversions between the aio_write path and the splice path | |
361 | * if someone is doing concurrent splice(2) based writes and write(2) based | |
362 | * writes to the same inode. The only real way to fix this is to re-implement | |
363 | * the generic code here with correct locking orders. | |
364 | */ | |
00258e36 CH |
365 | STATIC ssize_t |
366 | xfs_file_splice_write( | |
dda35b8f CH |
367 | struct pipe_inode_info *pipe, |
368 | struct file *outfilp, | |
369 | loff_t *ppos, | |
370 | size_t count, | |
00258e36 | 371 | unsigned int flags) |
dda35b8f | 372 | { |
dda35b8f | 373 | struct inode *inode = outfilp->f_mapping->host; |
00258e36 | 374 | struct xfs_inode *ip = XFS_I(inode); |
00258e36 CH |
375 | int ioflags = 0; |
376 | ssize_t ret; | |
dda35b8f CH |
377 | |
378 | XFS_STATS_INC(xs_write_calls); | |
00258e36 CH |
379 | |
380 | if (outfilp->f_mode & FMODE_NOCMTIME) | |
381 | ioflags |= IO_INVIS; | |
382 | ||
dda35b8f CH |
383 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
384 | return -EIO; | |
385 | ||
386 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | |
387 | ||
dda35b8f CH |
388 | trace_xfs_file_splice_write(ip, count, *ppos, ioflags); |
389 | ||
390 | ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); | |
ce7ae151 CH |
391 | if (ret > 0) |
392 | XFS_STATS_ADD(xs_write_bytes, ret); | |
dda35b8f | 393 | |
dda35b8f CH |
394 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
395 | return ret; | |
396 | } | |
397 | ||
398 | /* | |
399 | * This routine is called to handle zeroing any space in the last | |
400 | * block of the file that is beyond the EOF. We do this since the | |
401 | * size is being increased without writing anything to that block | |
402 | * and we don't want anyone to read the garbage on the disk. | |
403 | */ | |
404 | STATIC int /* error (positive) */ | |
405 | xfs_zero_last_block( | |
406 | xfs_inode_t *ip, | |
407 | xfs_fsize_t offset, | |
408 | xfs_fsize_t isize) | |
409 | { | |
410 | xfs_fileoff_t last_fsb; | |
411 | xfs_mount_t *mp = ip->i_mount; | |
412 | int nimaps; | |
413 | int zero_offset; | |
414 | int zero_len; | |
415 | int error = 0; | |
416 | xfs_bmbt_irec_t imap; | |
417 | ||
418 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | |
419 | ||
420 | zero_offset = XFS_B_FSB_OFFSET(mp, isize); | |
421 | if (zero_offset == 0) { | |
422 | /* | |
423 | * There are no extra bytes in the last block on disk to | |
424 | * zero, so return. | |
425 | */ | |
426 | return 0; | |
427 | } | |
428 | ||
429 | last_fsb = XFS_B_TO_FSBT(mp, isize); | |
430 | nimaps = 1; | |
5c8ed202 DC |
431 | error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); |
432 | if (error) | |
dda35b8f | 433 | return error; |
dda35b8f CH |
434 | ASSERT(nimaps > 0); |
435 | /* | |
436 | * If the block underlying isize is just a hole, then there | |
437 | * is nothing to zero. | |
438 | */ | |
439 | if (imap.br_startblock == HOLESTARTBLOCK) { | |
440 | return 0; | |
441 | } | |
442 | /* | |
443 | * Zero the part of the last block beyond the EOF, and write it | |
444 | * out sync. We need to drop the ilock while we do this so we | |
445 | * don't deadlock when the buffer cache calls back to us. | |
446 | */ | |
447 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
448 | ||
449 | zero_len = mp->m_sb.sb_blocksize - zero_offset; | |
450 | if (isize + zero_len > offset) | |
451 | zero_len = offset - isize; | |
452 | error = xfs_iozero(ip, isize, zero_len); | |
453 | ||
454 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
455 | ASSERT(error >= 0); | |
456 | return error; | |
457 | } | |
458 | ||
459 | /* | |
460 | * Zero any on disk space between the current EOF and the new, | |
461 | * larger EOF. This handles the normal case of zeroing the remainder | |
462 | * of the last block in the file and the unusual case of zeroing blocks | |
463 | * out beyond the size of the file. This second case only happens | |
464 | * with fixed size extents and when the system crashes before the inode | |
465 | * size was updated but after blocks were allocated. If fill is set, | |
466 | * then any holes in the range are filled and zeroed. If not, the holes | |
467 | * are left alone as holes. | |
468 | */ | |
469 | ||
470 | int /* error (positive) */ | |
471 | xfs_zero_eof( | |
472 | xfs_inode_t *ip, | |
473 | xfs_off_t offset, /* starting I/O offset */ | |
474 | xfs_fsize_t isize) /* current inode size */ | |
475 | { | |
476 | xfs_mount_t *mp = ip->i_mount; | |
477 | xfs_fileoff_t start_zero_fsb; | |
478 | xfs_fileoff_t end_zero_fsb; | |
479 | xfs_fileoff_t zero_count_fsb; | |
480 | xfs_fileoff_t last_fsb; | |
481 | xfs_fileoff_t zero_off; | |
482 | xfs_fsize_t zero_len; | |
483 | int nimaps; | |
484 | int error = 0; | |
485 | xfs_bmbt_irec_t imap; | |
486 | ||
487 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
488 | ASSERT(offset > isize); | |
489 | ||
490 | /* | |
491 | * First handle zeroing the block on which isize resides. | |
492 | * We only zero a part of that block so it is handled specially. | |
493 | */ | |
494 | error = xfs_zero_last_block(ip, offset, isize); | |
495 | if (error) { | |
496 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
497 | return error; | |
498 | } | |
499 | ||
500 | /* | |
501 | * Calculate the range between the new size and the old | |
502 | * where blocks needing to be zeroed may exist. To get the | |
503 | * block where the last byte in the file currently resides, | |
504 | * we need to subtract one from the size and truncate back | |
505 | * to a block boundary. We subtract 1 in case the size is | |
506 | * exactly on a block boundary. | |
507 | */ | |
508 | last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; | |
509 | start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); | |
510 | end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); | |
511 | ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); | |
512 | if (last_fsb == end_zero_fsb) { | |
513 | /* | |
514 | * The size was only incremented on its last block. | |
515 | * We took care of that above, so just return. | |
516 | */ | |
517 | return 0; | |
518 | } | |
519 | ||
520 | ASSERT(start_zero_fsb <= end_zero_fsb); | |
521 | while (start_zero_fsb <= end_zero_fsb) { | |
522 | nimaps = 1; | |
523 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; | |
5c8ed202 DC |
524 | error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb, |
525 | &imap, &nimaps, 0); | |
dda35b8f CH |
526 | if (error) { |
527 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
528 | return error; | |
529 | } | |
530 | ASSERT(nimaps > 0); | |
531 | ||
532 | if (imap.br_state == XFS_EXT_UNWRITTEN || | |
533 | imap.br_startblock == HOLESTARTBLOCK) { | |
534 | /* | |
535 | * This loop handles initializing pages that were | |
536 | * partially initialized by the code below this | |
537 | * loop. It basically zeroes the part of the page | |
538 | * that sits on a hole and sets the page as P_HOLE | |
539 | * and calls remapf if it is a mapped file. | |
540 | */ | |
541 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | |
542 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
543 | continue; | |
544 | } | |
545 | ||
546 | /* | |
547 | * There are blocks we need to zero. | |
548 | * Drop the inode lock while we're doing the I/O. | |
549 | * We'll still have the iolock to protect us. | |
550 | */ | |
551 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
552 | ||
553 | zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); | |
554 | zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); | |
555 | ||
556 | if ((zero_off + zero_len) > offset) | |
557 | zero_len = offset - zero_off; | |
558 | ||
559 | error = xfs_iozero(ip, zero_off, zero_len); | |
560 | if (error) { | |
561 | goto out_lock; | |
562 | } | |
563 | ||
564 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | |
565 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
566 | ||
567 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
568 | } | |
569 | ||
570 | return 0; | |
571 | ||
572 | out_lock: | |
573 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
574 | ASSERT(error >= 0); | |
575 | return error; | |
576 | } | |
577 | ||
4d8d1581 DC |
578 | /* |
579 | * Common pre-write limit and setup checks. | |
580 | * | |
5bf1f262 CH |
581 | * Called with the iolocked held either shared and exclusive according to |
582 | * @iolock, and returns with it held. Might upgrade the iolock to exclusive | |
583 | * if called for a direct write beyond i_size. | |
4d8d1581 DC |
584 | */ |
585 | STATIC ssize_t | |
586 | xfs_file_aio_write_checks( | |
587 | struct file *file, | |
588 | loff_t *pos, | |
589 | size_t *count, | |
590 | int *iolock) | |
591 | { | |
592 | struct inode *inode = file->f_mapping->host; | |
593 | struct xfs_inode *ip = XFS_I(inode); | |
4d8d1581 DC |
594 | int error = 0; |
595 | ||
c58cb165 | 596 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL); |
7271d243 | 597 | restart: |
4d8d1581 DC |
598 | error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); |
599 | if (error) { | |
5bf1f262 | 600 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); |
4d8d1581 DC |
601 | return error; |
602 | } | |
603 | ||
4d8d1581 DC |
604 | /* |
605 | * If the offset is beyond the size of the file, we need to zero any | |
606 | * blocks that fall between the existing EOF and the start of this | |
2813d682 CH |
607 | * write. If zeroing is needed and we are currently holding the |
608 | * iolock shared, we need to update it to exclusive which involves | |
609 | * dropping all locks and relocking to maintain correct locking order. | |
610 | * If we do this, restart the function to ensure all checks and values | |
611 | * are still valid. | |
4d8d1581 | 612 | */ |
2813d682 | 613 | if (*pos > i_size_read(inode)) { |
7271d243 DC |
614 | if (*iolock == XFS_IOLOCK_SHARED) { |
615 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); | |
616 | *iolock = XFS_IOLOCK_EXCL; | |
617 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); | |
618 | goto restart; | |
619 | } | |
ce7ae151 | 620 | error = -xfs_zero_eof(ip, *pos, i_size_read(inode)); |
7271d243 | 621 | } |
4d8d1581 DC |
622 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); |
623 | if (error) | |
624 | return error; | |
625 | ||
8a9c9980 CH |
626 | /* |
627 | * Updating the timestamps will grab the ilock again from | |
628 | * xfs_fs_dirty_inode, so we have to call it after dropping the | |
629 | * lock above. Eventually we should look into a way to avoid | |
630 | * the pointless lock roundtrip. | |
631 | */ | |
632 | if (likely(!(file->f_mode & FMODE_NOCMTIME))) | |
633 | file_update_time(file); | |
634 | ||
4d8d1581 DC |
635 | /* |
636 | * If we're writing the file then make sure to clear the setuid and | |
637 | * setgid bits if the process is not being run by root. This keeps | |
638 | * people from modifying setuid and setgid binaries. | |
639 | */ | |
640 | return file_remove_suid(file); | |
641 | ||
642 | } | |
643 | ||
f0d26e86 DC |
644 | /* |
645 | * xfs_file_dio_aio_write - handle direct IO writes | |
646 | * | |
647 | * Lock the inode appropriately to prepare for and issue a direct IO write. | |
eda77982 | 648 | * By separating it from the buffered write path we remove all the tricky to |
f0d26e86 DC |
649 | * follow locking changes and looping. |
650 | * | |
eda77982 DC |
651 | * If there are cached pages or we're extending the file, we need IOLOCK_EXCL |
652 | * until we're sure the bytes at the new EOF have been zeroed and/or the cached | |
653 | * pages are flushed out. | |
654 | * | |
655 | * In most cases the direct IO writes will be done holding IOLOCK_SHARED | |
656 | * allowing them to be done in parallel with reads and other direct IO writes. | |
657 | * However, if the IO is not aligned to filesystem blocks, the direct IO layer | |
658 | * needs to do sub-block zeroing and that requires serialisation against other | |
659 | * direct IOs to the same block. In this case we need to serialise the | |
660 | * submission of the unaligned IOs so that we don't get racing block zeroing in | |
661 | * the dio layer. To avoid the problem with aio, we also need to wait for | |
662 | * outstanding IOs to complete so that unwritten extent conversion is completed | |
663 | * before we try to map the overlapping block. This is currently implemented by | |
4a06fd26 | 664 | * hitting it with a big hammer (i.e. inode_dio_wait()). |
eda77982 | 665 | * |
f0d26e86 DC |
666 | * Returns with locks held indicated by @iolock and errors indicated by |
667 | * negative return values. | |
668 | */ | |
669 | STATIC ssize_t | |
670 | xfs_file_dio_aio_write( | |
671 | struct kiocb *iocb, | |
672 | const struct iovec *iovp, | |
673 | unsigned long nr_segs, | |
674 | loff_t pos, | |
d0606464 | 675 | size_t ocount) |
f0d26e86 DC |
676 | { |
677 | struct file *file = iocb->ki_filp; | |
678 | struct address_space *mapping = file->f_mapping; | |
679 | struct inode *inode = mapping->host; | |
680 | struct xfs_inode *ip = XFS_I(inode); | |
681 | struct xfs_mount *mp = ip->i_mount; | |
682 | ssize_t ret = 0; | |
f0d26e86 | 683 | size_t count = ocount; |
eda77982 | 684 | int unaligned_io = 0; |
d0606464 | 685 | int iolock; |
f0d26e86 DC |
686 | struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? |
687 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
688 | ||
f0d26e86 DC |
689 | if ((pos & target->bt_smask) || (count & target->bt_smask)) |
690 | return -XFS_ERROR(EINVAL); | |
691 | ||
eda77982 DC |
692 | if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) |
693 | unaligned_io = 1; | |
694 | ||
7271d243 DC |
695 | /* |
696 | * We don't need to take an exclusive lock unless there page cache needs | |
697 | * to be invalidated or unaligned IO is being executed. We don't need to | |
698 | * consider the EOF extension case here because | |
699 | * xfs_file_aio_write_checks() will relock the inode as necessary for | |
700 | * EOF zeroing cases and fill out the new inode size as appropriate. | |
701 | */ | |
702 | if (unaligned_io || mapping->nrpages) | |
d0606464 | 703 | iolock = XFS_IOLOCK_EXCL; |
f0d26e86 | 704 | else |
d0606464 CH |
705 | iolock = XFS_IOLOCK_SHARED; |
706 | xfs_rw_ilock(ip, iolock); | |
c58cb165 CH |
707 | |
708 | /* | |
709 | * Recheck if there are cached pages that need invalidate after we got | |
710 | * the iolock to protect against other threads adding new pages while | |
711 | * we were waiting for the iolock. | |
712 | */ | |
d0606464 CH |
713 | if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { |
714 | xfs_rw_iunlock(ip, iolock); | |
715 | iolock = XFS_IOLOCK_EXCL; | |
716 | xfs_rw_ilock(ip, iolock); | |
c58cb165 | 717 | } |
f0d26e86 | 718 | |
d0606464 | 719 | ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); |
4d8d1581 | 720 | if (ret) |
d0606464 | 721 | goto out; |
f0d26e86 DC |
722 | |
723 | if (mapping->nrpages) { | |
f0d26e86 DC |
724 | ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, |
725 | FI_REMAPF_LOCKED); | |
726 | if (ret) | |
d0606464 | 727 | goto out; |
f0d26e86 DC |
728 | } |
729 | ||
eda77982 DC |
730 | /* |
731 | * If we are doing unaligned IO, wait for all other IO to drain, | |
732 | * otherwise demote the lock if we had to flush cached pages | |
733 | */ | |
734 | if (unaligned_io) | |
4a06fd26 | 735 | inode_dio_wait(inode); |
d0606464 | 736 | else if (iolock == XFS_IOLOCK_EXCL) { |
f0d26e86 | 737 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
d0606464 | 738 | iolock = XFS_IOLOCK_SHARED; |
f0d26e86 DC |
739 | } |
740 | ||
741 | trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); | |
742 | ret = generic_file_direct_write(iocb, iovp, | |
743 | &nr_segs, pos, &iocb->ki_pos, count, ocount); | |
744 | ||
d0606464 CH |
745 | out: |
746 | xfs_rw_iunlock(ip, iolock); | |
747 | ||
f0d26e86 DC |
748 | /* No fallback to buffered IO on errors for XFS. */ |
749 | ASSERT(ret < 0 || ret == count); | |
750 | return ret; | |
751 | } | |
752 | ||
00258e36 | 753 | STATIC ssize_t |
637bbc75 | 754 | xfs_file_buffered_aio_write( |
dda35b8f CH |
755 | struct kiocb *iocb, |
756 | const struct iovec *iovp, | |
00258e36 | 757 | unsigned long nr_segs, |
637bbc75 | 758 | loff_t pos, |
d0606464 | 759 | size_t ocount) |
dda35b8f CH |
760 | { |
761 | struct file *file = iocb->ki_filp; | |
762 | struct address_space *mapping = file->f_mapping; | |
763 | struct inode *inode = mapping->host; | |
00258e36 | 764 | struct xfs_inode *ip = XFS_I(inode); |
637bbc75 DC |
765 | ssize_t ret; |
766 | int enospc = 0; | |
d0606464 | 767 | int iolock = XFS_IOLOCK_EXCL; |
637bbc75 | 768 | size_t count = ocount; |
dda35b8f | 769 | |
d0606464 | 770 | xfs_rw_ilock(ip, iolock); |
dda35b8f | 771 | |
d0606464 | 772 | ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); |
4d8d1581 | 773 | if (ret) |
d0606464 | 774 | goto out; |
dda35b8f CH |
775 | |
776 | /* We can write back this queue in page reclaim */ | |
777 | current->backing_dev_info = mapping->backing_dev_info; | |
778 | ||
dda35b8f | 779 | write_retry: |
637bbc75 DC |
780 | trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); |
781 | ret = generic_file_buffered_write(iocb, iovp, nr_segs, | |
782 | pos, &iocb->ki_pos, count, ret); | |
783 | /* | |
784 | * if we just got an ENOSPC, flush the inode now we aren't holding any | |
785 | * page locks and retry *once* | |
786 | */ | |
787 | if (ret == -ENOSPC && !enospc) { | |
637bbc75 | 788 | enospc = 1; |
d0606464 CH |
789 | ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); |
790 | if (!ret) | |
791 | goto write_retry; | |
dda35b8f | 792 | } |
d0606464 | 793 | |
dda35b8f | 794 | current->backing_dev_info = NULL; |
d0606464 CH |
795 | out: |
796 | xfs_rw_iunlock(ip, iolock); | |
637bbc75 DC |
797 | return ret; |
798 | } | |
799 | ||
800 | STATIC ssize_t | |
801 | xfs_file_aio_write( | |
802 | struct kiocb *iocb, | |
803 | const struct iovec *iovp, | |
804 | unsigned long nr_segs, | |
805 | loff_t pos) | |
806 | { | |
807 | struct file *file = iocb->ki_filp; | |
808 | struct address_space *mapping = file->f_mapping; | |
809 | struct inode *inode = mapping->host; | |
810 | struct xfs_inode *ip = XFS_I(inode); | |
811 | ssize_t ret; | |
637bbc75 DC |
812 | size_t ocount = 0; |
813 | ||
814 | XFS_STATS_INC(xs_write_calls); | |
815 | ||
816 | BUG_ON(iocb->ki_pos != pos); | |
817 | ||
818 | ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); | |
819 | if (ret) | |
820 | return ret; | |
821 | ||
822 | if (ocount == 0) | |
823 | return 0; | |
824 | ||
825 | xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); | |
826 | ||
827 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
828 | return -EIO; | |
829 | ||
830 | if (unlikely(file->f_flags & O_DIRECT)) | |
d0606464 | 831 | ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount); |
637bbc75 DC |
832 | else |
833 | ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, | |
d0606464 | 834 | ocount); |
dda35b8f | 835 | |
d0606464 CH |
836 | if (ret > 0) { |
837 | ssize_t err; | |
dda35b8f | 838 | |
d0606464 | 839 | XFS_STATS_ADD(xs_write_bytes, ret); |
dda35b8f | 840 | |
d0606464 CH |
841 | /* Handle various SYNC-type writes */ |
842 | err = generic_write_sync(file, pos, ret); | |
843 | if (err < 0) | |
844 | ret = err; | |
dda35b8f CH |
845 | } |
846 | ||
a363f0c2 | 847 | return ret; |
dda35b8f CH |
848 | } |
849 | ||
2fe17c10 CH |
850 | STATIC long |
851 | xfs_file_fallocate( | |
852 | struct file *file, | |
853 | int mode, | |
854 | loff_t offset, | |
855 | loff_t len) | |
856 | { | |
857 | struct inode *inode = file->f_path.dentry->d_inode; | |
858 | long error; | |
859 | loff_t new_size = 0; | |
860 | xfs_flock64_t bf; | |
861 | xfs_inode_t *ip = XFS_I(inode); | |
862 | int cmd = XFS_IOC_RESVSP; | |
82878897 | 863 | int attr_flags = XFS_ATTR_NOLOCK; |
2fe17c10 CH |
864 | |
865 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
866 | return -EOPNOTSUPP; | |
867 | ||
868 | bf.l_whence = 0; | |
869 | bf.l_start = offset; | |
870 | bf.l_len = len; | |
871 | ||
872 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | |
873 | ||
874 | if (mode & FALLOC_FL_PUNCH_HOLE) | |
875 | cmd = XFS_IOC_UNRESVSP; | |
876 | ||
877 | /* check the new inode size is valid before allocating */ | |
878 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | |
879 | offset + len > i_size_read(inode)) { | |
880 | new_size = offset + len; | |
881 | error = inode_newsize_ok(inode, new_size); | |
882 | if (error) | |
883 | goto out_unlock; | |
884 | } | |
885 | ||
82878897 DC |
886 | if (file->f_flags & O_DSYNC) |
887 | attr_flags |= XFS_ATTR_SYNC; | |
888 | ||
889 | error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags); | |
2fe17c10 CH |
890 | if (error) |
891 | goto out_unlock; | |
892 | ||
893 | /* Change file size if needed */ | |
894 | if (new_size) { | |
895 | struct iattr iattr; | |
896 | ||
897 | iattr.ia_valid = ATTR_SIZE; | |
898 | iattr.ia_size = new_size; | |
c4ed4243 | 899 | error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK); |
2fe17c10 CH |
900 | } |
901 | ||
902 | out_unlock: | |
903 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
904 | return error; | |
905 | } | |
906 | ||
907 | ||
1da177e4 | 908 | STATIC int |
3562fd45 | 909 | xfs_file_open( |
1da177e4 | 910 | struct inode *inode, |
f999a5bf | 911 | struct file *file) |
1da177e4 | 912 | { |
f999a5bf | 913 | if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) |
1da177e4 | 914 | return -EFBIG; |
f999a5bf CH |
915 | if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) |
916 | return -EIO; | |
917 | return 0; | |
918 | } | |
919 | ||
920 | STATIC int | |
921 | xfs_dir_open( | |
922 | struct inode *inode, | |
923 | struct file *file) | |
924 | { | |
925 | struct xfs_inode *ip = XFS_I(inode); | |
926 | int mode; | |
927 | int error; | |
928 | ||
929 | error = xfs_file_open(inode, file); | |
930 | if (error) | |
931 | return error; | |
932 | ||
933 | /* | |
934 | * If there are any blocks, read-ahead block 0 as we're almost | |
935 | * certain to have the next operation be a read there. | |
936 | */ | |
937 | mode = xfs_ilock_map_shared(ip); | |
938 | if (ip->i_d.di_nextents > 0) | |
939 | xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); | |
940 | xfs_iunlock(ip, mode); | |
941 | return 0; | |
1da177e4 LT |
942 | } |
943 | ||
1da177e4 | 944 | STATIC int |
3562fd45 | 945 | xfs_file_release( |
1da177e4 LT |
946 | struct inode *inode, |
947 | struct file *filp) | |
948 | { | |
739bfb2a | 949 | return -xfs_release(XFS_I(inode)); |
1da177e4 LT |
950 | } |
951 | ||
1da177e4 | 952 | STATIC int |
3562fd45 | 953 | xfs_file_readdir( |
1da177e4 LT |
954 | struct file *filp, |
955 | void *dirent, | |
956 | filldir_t filldir) | |
957 | { | |
051e7cd4 | 958 | struct inode *inode = filp->f_path.dentry->d_inode; |
739bfb2a | 959 | xfs_inode_t *ip = XFS_I(inode); |
051e7cd4 CH |
960 | int error; |
961 | size_t bufsize; | |
962 | ||
963 | /* | |
964 | * The Linux API doesn't pass down the total size of the buffer | |
965 | * we read into down to the filesystem. With the filldir concept | |
966 | * it's not needed for correct information, but the XFS dir2 leaf | |
967 | * code wants an estimate of the buffer size to calculate it's | |
968 | * readahead window and size the buffers used for mapping to | |
969 | * physical blocks. | |
970 | * | |
971 | * Try to give it an estimate that's good enough, maybe at some | |
972 | * point we can change the ->readdir prototype to include the | |
a9cc799e | 973 | * buffer size. For now we use the current glibc buffer size. |
051e7cd4 | 974 | */ |
a9cc799e | 975 | bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); |
051e7cd4 | 976 | |
739bfb2a | 977 | error = xfs_readdir(ip, dirent, bufsize, |
051e7cd4 CH |
978 | (xfs_off_t *)&filp->f_pos, filldir); |
979 | if (error) | |
980 | return -error; | |
981 | return 0; | |
1da177e4 LT |
982 | } |
983 | ||
1da177e4 | 984 | STATIC int |
3562fd45 | 985 | xfs_file_mmap( |
1da177e4 LT |
986 | struct file *filp, |
987 | struct vm_area_struct *vma) | |
988 | { | |
3562fd45 | 989 | vma->vm_ops = &xfs_file_vm_ops; |
d0217ac0 | 990 | vma->vm_flags |= VM_CAN_NONLINEAR; |
6fac0cb4 | 991 | |
fbc1462b | 992 | file_accessed(filp); |
1da177e4 LT |
993 | return 0; |
994 | } | |
995 | ||
4f57dbc6 DC |
996 | /* |
997 | * mmap()d file has taken write protection fault and is being made | |
998 | * writable. We can set the page state up correctly for a writable | |
999 | * page, which means we can do correct delalloc accounting (ENOSPC | |
1000 | * checking!) and unwritten extent mapping. | |
1001 | */ | |
1002 | STATIC int | |
1003 | xfs_vm_page_mkwrite( | |
1004 | struct vm_area_struct *vma, | |
c2ec175c | 1005 | struct vm_fault *vmf) |
4f57dbc6 | 1006 | { |
c2ec175c | 1007 | return block_page_mkwrite(vma, vmf, xfs_get_blocks); |
4f57dbc6 DC |
1008 | } |
1009 | ||
4b6f5d20 | 1010 | const struct file_operations xfs_file_operations = { |
1da177e4 LT |
1011 | .llseek = generic_file_llseek, |
1012 | .read = do_sync_read, | |
bb3f724e | 1013 | .write = do_sync_write, |
3562fd45 NS |
1014 | .aio_read = xfs_file_aio_read, |
1015 | .aio_write = xfs_file_aio_write, | |
1b895840 NS |
1016 | .splice_read = xfs_file_splice_read, |
1017 | .splice_write = xfs_file_splice_write, | |
3562fd45 | 1018 | .unlocked_ioctl = xfs_file_ioctl, |
1da177e4 | 1019 | #ifdef CONFIG_COMPAT |
3562fd45 | 1020 | .compat_ioctl = xfs_file_compat_ioctl, |
1da177e4 | 1021 | #endif |
3562fd45 NS |
1022 | .mmap = xfs_file_mmap, |
1023 | .open = xfs_file_open, | |
1024 | .release = xfs_file_release, | |
1025 | .fsync = xfs_file_fsync, | |
2fe17c10 | 1026 | .fallocate = xfs_file_fallocate, |
1da177e4 LT |
1027 | }; |
1028 | ||
4b6f5d20 | 1029 | const struct file_operations xfs_dir_file_operations = { |
f999a5bf | 1030 | .open = xfs_dir_open, |
1da177e4 | 1031 | .read = generic_read_dir, |
3562fd45 | 1032 | .readdir = xfs_file_readdir, |
59af1584 | 1033 | .llseek = generic_file_llseek, |
3562fd45 | 1034 | .unlocked_ioctl = xfs_file_ioctl, |
d3870398 | 1035 | #ifdef CONFIG_COMPAT |
3562fd45 | 1036 | .compat_ioctl = xfs_file_compat_ioctl, |
d3870398 | 1037 | #endif |
1da2f2db | 1038 | .fsync = xfs_dir_fsync, |
1da177e4 LT |
1039 | }; |
1040 | ||
f0f37e2f | 1041 | static const struct vm_operations_struct xfs_file_vm_ops = { |
54cb8821 | 1042 | .fault = filemap_fault, |
4f57dbc6 | 1043 | .page_mkwrite = xfs_vm_page_mkwrite, |
6fac0cb4 | 1044 | }; |