Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
7b718769 NS |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
1da177e4 | 4 | * |
7b718769 NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
1da177e4 LT |
7 | * published by the Free Software Foundation. |
8 | * | |
7b718769 NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
1da177e4 | 13 | * |
7b718769 NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
1da177e4 | 17 | */ |
1da177e4 | 18 | #include "xfs.h" |
dda35b8f | 19 | #include "xfs_fs.h" |
a844f451 | 20 | #include "xfs_bit.h" |
1da177e4 | 21 | #include "xfs_log.h" |
a844f451 | 22 | #include "xfs_inum.h" |
1da177e4 | 23 | #include "xfs_sb.h" |
a844f451 | 24 | #include "xfs_ag.h" |
1da177e4 LT |
25 | #include "xfs_dir2.h" |
26 | #include "xfs_trans.h" | |
27 | #include "xfs_dmapi.h" | |
28 | #include "xfs_mount.h" | |
29 | #include "xfs_bmap_btree.h" | |
30 | #include "xfs_alloc_btree.h" | |
31 | #include "xfs_ialloc_btree.h" | |
32 | #include "xfs_alloc.h" | |
33 | #include "xfs_btree.h" | |
34 | #include "xfs_attr_sf.h" | |
1da177e4 LT |
35 | #include "xfs_dir2_sf.h" |
36 | #include "xfs_dinode.h" | |
37 | #include "xfs_inode.h" | |
fd3200be | 38 | #include "xfs_inode_item.h" |
dda35b8f | 39 | #include "xfs_bmap.h" |
1da177e4 LT |
40 | #include "xfs_error.h" |
41 | #include "xfs_rw.h" | |
739bfb2a | 42 | #include "xfs_vnodeops.h" |
f999a5bf | 43 | #include "xfs_da_btree.h" |
ddcd856d | 44 | #include "xfs_ioctl.h" |
dda35b8f | 45 | #include "xfs_trace.h" |
1da177e4 LT |
46 | |
47 | #include <linux/dcache.h> | |
1da177e4 | 48 | |
f0f37e2f | 49 | static const struct vm_operations_struct xfs_file_vm_ops; |
1da177e4 | 50 | |
dda35b8f CH |
51 | /* |
52 | * xfs_iozero | |
53 | * | |
54 | * xfs_iozero clears the specified range of buffer supplied, | |
55 | * and marks all the affected blocks as valid and modified. If | |
56 | * an affected block is not allocated, it will be allocated. If | |
57 | * an affected block is not completely overwritten, and is not | |
58 | * valid before the operation, it will be read from disk before | |
59 | * being partially zeroed. | |
60 | */ | |
61 | STATIC int | |
62 | xfs_iozero( | |
63 | struct xfs_inode *ip, /* inode */ | |
64 | loff_t pos, /* offset in file */ | |
65 | size_t count) /* size of data to zero */ | |
66 | { | |
67 | struct page *page; | |
68 | struct address_space *mapping; | |
69 | int status; | |
70 | ||
71 | mapping = VFS_I(ip)->i_mapping; | |
72 | do { | |
73 | unsigned offset, bytes; | |
74 | void *fsdata; | |
75 | ||
76 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
77 | bytes = PAGE_CACHE_SIZE - offset; | |
78 | if (bytes > count) | |
79 | bytes = count; | |
80 | ||
81 | status = pagecache_write_begin(NULL, mapping, pos, bytes, | |
82 | AOP_FLAG_UNINTERRUPTIBLE, | |
83 | &page, &fsdata); | |
84 | if (status) | |
85 | break; | |
86 | ||
87 | zero_user(page, offset, bytes); | |
88 | ||
89 | status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, | |
90 | page, fsdata); | |
91 | WARN_ON(status <= 0); /* can't return less than zero! */ | |
92 | pos += bytes; | |
93 | count -= bytes; | |
94 | status = 0; | |
95 | } while (count); | |
96 | ||
97 | return (-status); | |
98 | } | |
99 | ||
fd3200be CH |
100 | /* |
101 | * We ignore the datasync flag here because a datasync is effectively | |
102 | * identical to an fsync. That is, datasync implies that we need to write | |
103 | * only the metadata needed to be able to access the data that is written | |
104 | * if we crash after the call completes. Hence if we are writing beyond | |
105 | * EOF we have to log the inode size change as well, which makes it a | |
106 | * full fsync. If we don't write beyond EOF, the inode core will be | |
107 | * clean in memory and so we don't need to log the inode, just like | |
108 | * fsync. | |
109 | */ | |
110 | STATIC int | |
111 | xfs_file_fsync( | |
112 | struct file *file, | |
113 | struct dentry *dentry, | |
114 | int datasync) | |
115 | { | |
116 | struct xfs_inode *ip = XFS_I(dentry->d_inode); | |
117 | struct xfs_trans *tp; | |
118 | int error = 0; | |
119 | int log_flushed = 0; | |
120 | ||
121 | xfs_itrace_entry(ip); | |
122 | ||
123 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
124 | return -XFS_ERROR(EIO); | |
125 | ||
126 | xfs_iflags_clear(ip, XFS_ITRUNCATED); | |
127 | ||
128 | /* | |
129 | * We always need to make sure that the required inode state is safe on | |
130 | * disk. The inode might be clean but we still might need to force the | |
131 | * log because of committed transactions that haven't hit the disk yet. | |
132 | * Likewise, there could be unflushed non-transactional changes to the | |
133 | * inode core that have to go to disk and this requires us to issue | |
134 | * a synchronous transaction to capture these changes correctly. | |
135 | * | |
136 | * This code relies on the assumption that if the i_update_core field | |
137 | * of the inode is clear and the inode is unpinned then it is clean | |
138 | * and no action is required. | |
139 | */ | |
140 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
141 | ||
142 | if (ip->i_update_core) { | |
143 | /* | |
144 | * Kick off a transaction to log the inode core to get the | |
145 | * updates. The sync transaction will also force the log. | |
146 | */ | |
147 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
148 | tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS); | |
149 | error = xfs_trans_reserve(tp, 0, | |
150 | XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0); | |
151 | if (error) { | |
152 | xfs_trans_cancel(tp, 0); | |
153 | return -error; | |
154 | } | |
155 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
156 | ||
157 | /* | |
158 | * Note - it's possible that we might have pushed ourselves out | |
159 | * of the way during trans_reserve which would flush the inode. | |
160 | * But there's no guarantee that the inode buffer has actually | |
161 | * gone out yet (it's delwri). Plus the buffer could be pinned | |
162 | * anyway if it's part of an inode in another recent | |
163 | * transaction. So we play it safe and fire off the | |
164 | * transaction anyway. | |
165 | */ | |
166 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | |
167 | xfs_trans_ihold(tp, ip); | |
168 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | |
169 | xfs_trans_set_sync(tp); | |
170 | error = _xfs_trans_commit(tp, 0, &log_flushed); | |
171 | ||
172 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
173 | } else { | |
174 | /* | |
175 | * Timestamps/size haven't changed since last inode flush or | |
176 | * inode transaction commit. That means either nothing got | |
177 | * written or a transaction committed which caught the updates. | |
178 | * If the latter happened and the transaction hasn't hit the | |
179 | * disk yet, the inode will be still be pinned. If it is, | |
180 | * force the log. | |
181 | */ | |
182 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
183 | if (xfs_ipincount(ip)) { | |
184 | if (ip->i_itemp->ili_last_lsn) { | |
185 | error = _xfs_log_force_lsn(ip->i_mount, | |
186 | ip->i_itemp->ili_last_lsn, | |
187 | XFS_LOG_SYNC, &log_flushed); | |
188 | } else { | |
189 | error = _xfs_log_force(ip->i_mount, | |
190 | XFS_LOG_SYNC, &log_flushed); | |
191 | } | |
192 | } | |
193 | } | |
194 | ||
195 | if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) { | |
196 | /* | |
197 | * If the log write didn't issue an ordered tag we need | |
198 | * to flush the disk cache for the data device now. | |
199 | */ | |
200 | if (!log_flushed) | |
201 | xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp); | |
202 | ||
203 | /* | |
204 | * If this inode is on the RT dev we need to flush that | |
205 | * cache as well. | |
206 | */ | |
207 | if (XFS_IS_REALTIME_INODE(ip)) | |
208 | xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp); | |
209 | } | |
210 | ||
211 | return -error; | |
212 | } | |
213 | ||
00258e36 CH |
214 | STATIC ssize_t |
215 | xfs_file_aio_read( | |
dda35b8f CH |
216 | struct kiocb *iocb, |
217 | const struct iovec *iovp, | |
00258e36 CH |
218 | unsigned long nr_segs, |
219 | loff_t pos) | |
dda35b8f CH |
220 | { |
221 | struct file *file = iocb->ki_filp; | |
222 | struct inode *inode = file->f_mapping->host; | |
00258e36 CH |
223 | struct xfs_inode *ip = XFS_I(inode); |
224 | struct xfs_mount *mp = ip->i_mount; | |
dda35b8f CH |
225 | size_t size = 0; |
226 | ssize_t ret = 0; | |
00258e36 | 227 | int ioflags = 0; |
dda35b8f CH |
228 | xfs_fsize_t n; |
229 | unsigned long seg; | |
230 | ||
dda35b8f CH |
231 | XFS_STATS_INC(xs_read_calls); |
232 | ||
00258e36 CH |
233 | BUG_ON(iocb->ki_pos != pos); |
234 | ||
235 | if (unlikely(file->f_flags & O_DIRECT)) | |
236 | ioflags |= IO_ISDIRECT; | |
237 | if (file->f_mode & FMODE_NOCMTIME) | |
238 | ioflags |= IO_INVIS; | |
239 | ||
dda35b8f | 240 | /* START copy & waste from filemap.c */ |
00258e36 | 241 | for (seg = 0; seg < nr_segs; seg++) { |
dda35b8f CH |
242 | const struct iovec *iv = &iovp[seg]; |
243 | ||
244 | /* | |
245 | * If any segment has a negative length, or the cumulative | |
246 | * length ever wraps negative then return -EINVAL. | |
247 | */ | |
248 | size += iv->iov_len; | |
249 | if (unlikely((ssize_t)(size|iv->iov_len) < 0)) | |
250 | return XFS_ERROR(-EINVAL); | |
251 | } | |
252 | /* END copy & waste from filemap.c */ | |
253 | ||
254 | if (unlikely(ioflags & IO_ISDIRECT)) { | |
255 | xfs_buftarg_t *target = | |
256 | XFS_IS_REALTIME_INODE(ip) ? | |
257 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
00258e36 | 258 | if ((iocb->ki_pos & target->bt_smask) || |
dda35b8f | 259 | (size & target->bt_smask)) { |
00258e36 CH |
260 | if (iocb->ki_pos == ip->i_size) |
261 | return 0; | |
dda35b8f CH |
262 | return -XFS_ERROR(EINVAL); |
263 | } | |
264 | } | |
265 | ||
00258e36 CH |
266 | n = XFS_MAXIOFFSET(mp) - iocb->ki_pos; |
267 | if (n <= 0 || size == 0) | |
dda35b8f CH |
268 | return 0; |
269 | ||
270 | if (n < size) | |
271 | size = n; | |
272 | ||
273 | if (XFS_FORCED_SHUTDOWN(mp)) | |
274 | return -EIO; | |
275 | ||
276 | if (unlikely(ioflags & IO_ISDIRECT)) | |
277 | mutex_lock(&inode->i_mutex); | |
278 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | |
279 | ||
280 | if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) { | |
281 | int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags); | |
282 | int iolock = XFS_IOLOCK_SHARED; | |
283 | ||
00258e36 | 284 | ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, iocb->ki_pos, size, |
dda35b8f CH |
285 | dmflags, &iolock); |
286 | if (ret) { | |
287 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
288 | if (unlikely(ioflags & IO_ISDIRECT)) | |
289 | mutex_unlock(&inode->i_mutex); | |
290 | return ret; | |
291 | } | |
292 | } | |
293 | ||
294 | if (unlikely(ioflags & IO_ISDIRECT)) { | |
00258e36 CH |
295 | if (inode->i_mapping->nrpages) { |
296 | ret = -xfs_flushinval_pages(ip, | |
297 | (iocb->ki_pos & PAGE_CACHE_MASK), | |
298 | -1, FI_REMAPF_LOCKED); | |
299 | } | |
dda35b8f CH |
300 | mutex_unlock(&inode->i_mutex); |
301 | if (ret) { | |
302 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
303 | return ret; | |
304 | } | |
305 | } | |
306 | ||
00258e36 | 307 | trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags); |
dda35b8f | 308 | |
00258e36 | 309 | ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos); |
dda35b8f CH |
310 | if (ret > 0) |
311 | XFS_STATS_ADD(xs_read_bytes, ret); | |
312 | ||
313 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
314 | return ret; | |
315 | } | |
316 | ||
00258e36 CH |
317 | STATIC ssize_t |
318 | xfs_file_splice_read( | |
dda35b8f CH |
319 | struct file *infilp, |
320 | loff_t *ppos, | |
321 | struct pipe_inode_info *pipe, | |
322 | size_t count, | |
00258e36 | 323 | unsigned int flags) |
dda35b8f | 324 | { |
00258e36 CH |
325 | struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); |
326 | struct xfs_mount *mp = ip->i_mount; | |
327 | int ioflags = 0; | |
dda35b8f CH |
328 | ssize_t ret; |
329 | ||
330 | XFS_STATS_INC(xs_read_calls); | |
00258e36 CH |
331 | |
332 | if (infilp->f_mode & FMODE_NOCMTIME) | |
333 | ioflags |= IO_INVIS; | |
334 | ||
dda35b8f CH |
335 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
336 | return -EIO; | |
337 | ||
338 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | |
339 | ||
340 | if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) { | |
341 | int iolock = XFS_IOLOCK_SHARED; | |
342 | int error; | |
343 | ||
344 | error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count, | |
345 | FILP_DELAY_FLAG(infilp), &iolock); | |
346 | if (error) { | |
347 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
348 | return -error; | |
349 | } | |
350 | } | |
351 | ||
352 | trace_xfs_file_splice_read(ip, count, *ppos, ioflags); | |
353 | ||
354 | ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); | |
355 | if (ret > 0) | |
356 | XFS_STATS_ADD(xs_read_bytes, ret); | |
357 | ||
358 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | |
359 | return ret; | |
360 | } | |
361 | ||
00258e36 CH |
362 | STATIC ssize_t |
363 | xfs_file_splice_write( | |
dda35b8f CH |
364 | struct pipe_inode_info *pipe, |
365 | struct file *outfilp, | |
366 | loff_t *ppos, | |
367 | size_t count, | |
00258e36 | 368 | unsigned int flags) |
dda35b8f | 369 | { |
dda35b8f | 370 | struct inode *inode = outfilp->f_mapping->host; |
00258e36 CH |
371 | struct xfs_inode *ip = XFS_I(inode); |
372 | struct xfs_mount *mp = ip->i_mount; | |
dda35b8f | 373 | xfs_fsize_t isize, new_size; |
00258e36 CH |
374 | int ioflags = 0; |
375 | ssize_t ret; | |
dda35b8f CH |
376 | |
377 | XFS_STATS_INC(xs_write_calls); | |
00258e36 CH |
378 | |
379 | if (outfilp->f_mode & FMODE_NOCMTIME) | |
380 | ioflags |= IO_INVIS; | |
381 | ||
dda35b8f CH |
382 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
383 | return -EIO; | |
384 | ||
385 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | |
386 | ||
387 | if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) { | |
388 | int iolock = XFS_IOLOCK_EXCL; | |
389 | int error; | |
390 | ||
391 | error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count, | |
392 | FILP_DELAY_FLAG(outfilp), &iolock); | |
393 | if (error) { | |
394 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
395 | return -error; | |
396 | } | |
397 | } | |
398 | ||
399 | new_size = *ppos + count; | |
400 | ||
401 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
402 | if (new_size > ip->i_size) | |
403 | ip->i_new_size = new_size; | |
404 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
405 | ||
406 | trace_xfs_file_splice_write(ip, count, *ppos, ioflags); | |
407 | ||
408 | ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); | |
409 | if (ret > 0) | |
410 | XFS_STATS_ADD(xs_write_bytes, ret); | |
411 | ||
412 | isize = i_size_read(inode); | |
413 | if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize)) | |
414 | *ppos = isize; | |
415 | ||
416 | if (*ppos > ip->i_size) { | |
417 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
418 | if (*ppos > ip->i_size) | |
419 | ip->i_size = *ppos; | |
420 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
421 | } | |
422 | ||
423 | if (ip->i_new_size) { | |
424 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
425 | ip->i_new_size = 0; | |
426 | if (ip->i_d.di_size > ip->i_size) | |
427 | ip->i_d.di_size = ip->i_size; | |
428 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
429 | } | |
430 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
431 | return ret; | |
432 | } | |
433 | ||
434 | /* | |
435 | * This routine is called to handle zeroing any space in the last | |
436 | * block of the file that is beyond the EOF. We do this since the | |
437 | * size is being increased without writing anything to that block | |
438 | * and we don't want anyone to read the garbage on the disk. | |
439 | */ | |
440 | STATIC int /* error (positive) */ | |
441 | xfs_zero_last_block( | |
442 | xfs_inode_t *ip, | |
443 | xfs_fsize_t offset, | |
444 | xfs_fsize_t isize) | |
445 | { | |
446 | xfs_fileoff_t last_fsb; | |
447 | xfs_mount_t *mp = ip->i_mount; | |
448 | int nimaps; | |
449 | int zero_offset; | |
450 | int zero_len; | |
451 | int error = 0; | |
452 | xfs_bmbt_irec_t imap; | |
453 | ||
454 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | |
455 | ||
456 | zero_offset = XFS_B_FSB_OFFSET(mp, isize); | |
457 | if (zero_offset == 0) { | |
458 | /* | |
459 | * There are no extra bytes in the last block on disk to | |
460 | * zero, so return. | |
461 | */ | |
462 | return 0; | |
463 | } | |
464 | ||
465 | last_fsb = XFS_B_TO_FSBT(mp, isize); | |
466 | nimaps = 1; | |
467 | error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap, | |
468 | &nimaps, NULL, NULL); | |
469 | if (error) { | |
470 | return error; | |
471 | } | |
472 | ASSERT(nimaps > 0); | |
473 | /* | |
474 | * If the block underlying isize is just a hole, then there | |
475 | * is nothing to zero. | |
476 | */ | |
477 | if (imap.br_startblock == HOLESTARTBLOCK) { | |
478 | return 0; | |
479 | } | |
480 | /* | |
481 | * Zero the part of the last block beyond the EOF, and write it | |
482 | * out sync. We need to drop the ilock while we do this so we | |
483 | * don't deadlock when the buffer cache calls back to us. | |
484 | */ | |
485 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
486 | ||
487 | zero_len = mp->m_sb.sb_blocksize - zero_offset; | |
488 | if (isize + zero_len > offset) | |
489 | zero_len = offset - isize; | |
490 | error = xfs_iozero(ip, isize, zero_len); | |
491 | ||
492 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
493 | ASSERT(error >= 0); | |
494 | return error; | |
495 | } | |
496 | ||
497 | /* | |
498 | * Zero any on disk space between the current EOF and the new, | |
499 | * larger EOF. This handles the normal case of zeroing the remainder | |
500 | * of the last block in the file and the unusual case of zeroing blocks | |
501 | * out beyond the size of the file. This second case only happens | |
502 | * with fixed size extents and when the system crashes before the inode | |
503 | * size was updated but after blocks were allocated. If fill is set, | |
504 | * then any holes in the range are filled and zeroed. If not, the holes | |
505 | * are left alone as holes. | |
506 | */ | |
507 | ||
508 | int /* error (positive) */ | |
509 | xfs_zero_eof( | |
510 | xfs_inode_t *ip, | |
511 | xfs_off_t offset, /* starting I/O offset */ | |
512 | xfs_fsize_t isize) /* current inode size */ | |
513 | { | |
514 | xfs_mount_t *mp = ip->i_mount; | |
515 | xfs_fileoff_t start_zero_fsb; | |
516 | xfs_fileoff_t end_zero_fsb; | |
517 | xfs_fileoff_t zero_count_fsb; | |
518 | xfs_fileoff_t last_fsb; | |
519 | xfs_fileoff_t zero_off; | |
520 | xfs_fsize_t zero_len; | |
521 | int nimaps; | |
522 | int error = 0; | |
523 | xfs_bmbt_irec_t imap; | |
524 | ||
525 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
526 | ASSERT(offset > isize); | |
527 | ||
528 | /* | |
529 | * First handle zeroing the block on which isize resides. | |
530 | * We only zero a part of that block so it is handled specially. | |
531 | */ | |
532 | error = xfs_zero_last_block(ip, offset, isize); | |
533 | if (error) { | |
534 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
535 | return error; | |
536 | } | |
537 | ||
538 | /* | |
539 | * Calculate the range between the new size and the old | |
540 | * where blocks needing to be zeroed may exist. To get the | |
541 | * block where the last byte in the file currently resides, | |
542 | * we need to subtract one from the size and truncate back | |
543 | * to a block boundary. We subtract 1 in case the size is | |
544 | * exactly on a block boundary. | |
545 | */ | |
546 | last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; | |
547 | start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); | |
548 | end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); | |
549 | ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); | |
550 | if (last_fsb == end_zero_fsb) { | |
551 | /* | |
552 | * The size was only incremented on its last block. | |
553 | * We took care of that above, so just return. | |
554 | */ | |
555 | return 0; | |
556 | } | |
557 | ||
558 | ASSERT(start_zero_fsb <= end_zero_fsb); | |
559 | while (start_zero_fsb <= end_zero_fsb) { | |
560 | nimaps = 1; | |
561 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; | |
562 | error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, | |
563 | 0, NULL, 0, &imap, &nimaps, NULL, NULL); | |
564 | if (error) { | |
565 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
566 | return error; | |
567 | } | |
568 | ASSERT(nimaps > 0); | |
569 | ||
570 | if (imap.br_state == XFS_EXT_UNWRITTEN || | |
571 | imap.br_startblock == HOLESTARTBLOCK) { | |
572 | /* | |
573 | * This loop handles initializing pages that were | |
574 | * partially initialized by the code below this | |
575 | * loop. It basically zeroes the part of the page | |
576 | * that sits on a hole and sets the page as P_HOLE | |
577 | * and calls remapf if it is a mapped file. | |
578 | */ | |
579 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | |
580 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
581 | continue; | |
582 | } | |
583 | ||
584 | /* | |
585 | * There are blocks we need to zero. | |
586 | * Drop the inode lock while we're doing the I/O. | |
587 | * We'll still have the iolock to protect us. | |
588 | */ | |
589 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
590 | ||
591 | zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); | |
592 | zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); | |
593 | ||
594 | if ((zero_off + zero_len) > offset) | |
595 | zero_len = offset - zero_off; | |
596 | ||
597 | error = xfs_iozero(ip, zero_off, zero_len); | |
598 | if (error) { | |
599 | goto out_lock; | |
600 | } | |
601 | ||
602 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | |
603 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
604 | ||
605 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
606 | } | |
607 | ||
608 | return 0; | |
609 | ||
610 | out_lock: | |
611 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
612 | ASSERT(error >= 0); | |
613 | return error; | |
614 | } | |
615 | ||
00258e36 CH |
616 | STATIC ssize_t |
617 | xfs_file_aio_write( | |
dda35b8f CH |
618 | struct kiocb *iocb, |
619 | const struct iovec *iovp, | |
00258e36 CH |
620 | unsigned long nr_segs, |
621 | loff_t pos) | |
dda35b8f CH |
622 | { |
623 | struct file *file = iocb->ki_filp; | |
624 | struct address_space *mapping = file->f_mapping; | |
625 | struct inode *inode = mapping->host; | |
00258e36 CH |
626 | struct xfs_inode *ip = XFS_I(inode); |
627 | struct xfs_mount *mp = ip->i_mount; | |
dda35b8f | 628 | ssize_t ret = 0, error = 0; |
00258e36 | 629 | int ioflags = 0; |
dda35b8f CH |
630 | xfs_fsize_t isize, new_size; |
631 | int iolock; | |
632 | int eventsent = 0; | |
633 | size_t ocount = 0, count; | |
dda35b8f CH |
634 | int need_i_mutex; |
635 | ||
636 | XFS_STATS_INC(xs_write_calls); | |
637 | ||
00258e36 CH |
638 | BUG_ON(iocb->ki_pos != pos); |
639 | ||
640 | if (unlikely(file->f_flags & O_DIRECT)) | |
641 | ioflags |= IO_ISDIRECT; | |
642 | if (file->f_mode & FMODE_NOCMTIME) | |
643 | ioflags |= IO_INVIS; | |
644 | ||
645 | error = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); | |
dda35b8f CH |
646 | if (error) |
647 | return error; | |
648 | ||
649 | count = ocount; | |
dda35b8f CH |
650 | if (count == 0) |
651 | return 0; | |
652 | ||
dda35b8f CH |
653 | xfs_wait_for_freeze(mp, SB_FREEZE_WRITE); |
654 | ||
655 | if (XFS_FORCED_SHUTDOWN(mp)) | |
656 | return -EIO; | |
657 | ||
658 | relock: | |
659 | if (ioflags & IO_ISDIRECT) { | |
660 | iolock = XFS_IOLOCK_SHARED; | |
661 | need_i_mutex = 0; | |
662 | } else { | |
663 | iolock = XFS_IOLOCK_EXCL; | |
664 | need_i_mutex = 1; | |
665 | mutex_lock(&inode->i_mutex); | |
666 | } | |
667 | ||
00258e36 | 668 | xfs_ilock(ip, XFS_ILOCK_EXCL|iolock); |
dda35b8f CH |
669 | |
670 | start: | |
671 | error = -generic_write_checks(file, &pos, &count, | |
672 | S_ISBLK(inode->i_mode)); | |
673 | if (error) { | |
00258e36 | 674 | xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock); |
dda35b8f CH |
675 | goto out_unlock_mutex; |
676 | } | |
677 | ||
00258e36 | 678 | if ((DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && |
dda35b8f CH |
679 | !(ioflags & IO_INVIS) && !eventsent)) { |
680 | int dmflags = FILP_DELAY_FLAG(file); | |
681 | ||
682 | if (need_i_mutex) | |
683 | dmflags |= DM_FLAGS_IMUX; | |
684 | ||
00258e36 CH |
685 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
686 | error = XFS_SEND_DATA(ip->i_mount, DM_EVENT_WRITE, ip, | |
dda35b8f CH |
687 | pos, count, dmflags, &iolock); |
688 | if (error) { | |
689 | goto out_unlock_internal; | |
690 | } | |
00258e36 | 691 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
dda35b8f CH |
692 | eventsent = 1; |
693 | ||
694 | /* | |
695 | * The iolock was dropped and reacquired in XFS_SEND_DATA | |
696 | * so we have to recheck the size when appending. | |
697 | * We will only "goto start;" once, since having sent the | |
698 | * event prevents another call to XFS_SEND_DATA, which is | |
699 | * what allows the size to change in the first place. | |
700 | */ | |
00258e36 | 701 | if ((file->f_flags & O_APPEND) && pos != ip->i_size) |
dda35b8f CH |
702 | goto start; |
703 | } | |
704 | ||
705 | if (ioflags & IO_ISDIRECT) { | |
706 | xfs_buftarg_t *target = | |
00258e36 | 707 | XFS_IS_REALTIME_INODE(ip) ? |
dda35b8f CH |
708 | mp->m_rtdev_targp : mp->m_ddev_targp; |
709 | ||
710 | if ((pos & target->bt_smask) || (count & target->bt_smask)) { | |
00258e36 | 711 | xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock); |
dda35b8f CH |
712 | return XFS_ERROR(-EINVAL); |
713 | } | |
714 | ||
00258e36 CH |
715 | if (!need_i_mutex && (mapping->nrpages || pos > ip->i_size)) { |
716 | xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock); | |
dda35b8f CH |
717 | iolock = XFS_IOLOCK_EXCL; |
718 | need_i_mutex = 1; | |
719 | mutex_lock(&inode->i_mutex); | |
00258e36 | 720 | xfs_ilock(ip, XFS_ILOCK_EXCL|iolock); |
dda35b8f CH |
721 | goto start; |
722 | } | |
723 | } | |
724 | ||
725 | new_size = pos + count; | |
00258e36 CH |
726 | if (new_size > ip->i_size) |
727 | ip->i_new_size = new_size; | |
dda35b8f CH |
728 | |
729 | if (likely(!(ioflags & IO_INVIS))) | |
730 | file_update_time(file); | |
731 | ||
732 | /* | |
733 | * If the offset is beyond the size of the file, we have a couple | |
734 | * of things to do. First, if there is already space allocated | |
735 | * we need to either create holes or zero the disk or ... | |
736 | * | |
737 | * If there is a page where the previous size lands, we need | |
738 | * to zero it out up to the new size. | |
739 | */ | |
740 | ||
00258e36 CH |
741 | if (pos > ip->i_size) { |
742 | error = xfs_zero_eof(ip, pos, ip->i_size); | |
dda35b8f | 743 | if (error) { |
00258e36 | 744 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
dda35b8f CH |
745 | goto out_unlock_internal; |
746 | } | |
747 | } | |
00258e36 | 748 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
dda35b8f CH |
749 | |
750 | /* | |
751 | * If we're writing the file then make sure to clear the | |
752 | * setuid and setgid bits if the process is not being run | |
753 | * by root. This keeps people from modifying setuid and | |
754 | * setgid binaries. | |
755 | */ | |
756 | error = -file_remove_suid(file); | |
757 | if (unlikely(error)) | |
758 | goto out_unlock_internal; | |
759 | ||
760 | /* We can write back this queue in page reclaim */ | |
761 | current->backing_dev_info = mapping->backing_dev_info; | |
762 | ||
763 | if ((ioflags & IO_ISDIRECT)) { | |
764 | if (mapping->nrpages) { | |
765 | WARN_ON(need_i_mutex == 0); | |
00258e36 | 766 | error = xfs_flushinval_pages(ip, |
dda35b8f CH |
767 | (pos & PAGE_CACHE_MASK), |
768 | -1, FI_REMAPF_LOCKED); | |
769 | if (error) | |
770 | goto out_unlock_internal; | |
771 | } | |
772 | ||
773 | if (need_i_mutex) { | |
774 | /* demote the lock now the cached pages are gone */ | |
00258e36 | 775 | xfs_ilock_demote(ip, XFS_IOLOCK_EXCL); |
dda35b8f CH |
776 | mutex_unlock(&inode->i_mutex); |
777 | ||
778 | iolock = XFS_IOLOCK_SHARED; | |
779 | need_i_mutex = 0; | |
780 | } | |
781 | ||
00258e36 | 782 | trace_xfs_file_direct_write(ip, count, iocb->ki_pos, ioflags); |
dda35b8f | 783 | ret = generic_file_direct_write(iocb, iovp, |
00258e36 | 784 | &nr_segs, pos, &iocb->ki_pos, count, ocount); |
dda35b8f CH |
785 | |
786 | /* | |
787 | * direct-io write to a hole: fall through to buffered I/O | |
788 | * for completing the rest of the request. | |
789 | */ | |
790 | if (ret >= 0 && ret != count) { | |
791 | XFS_STATS_ADD(xs_write_bytes, ret); | |
792 | ||
793 | pos += ret; | |
794 | count -= ret; | |
795 | ||
796 | ioflags &= ~IO_ISDIRECT; | |
00258e36 | 797 | xfs_iunlock(ip, iolock); |
dda35b8f CH |
798 | goto relock; |
799 | } | |
800 | } else { | |
801 | int enospc = 0; | |
802 | ssize_t ret2 = 0; | |
803 | ||
804 | write_retry: | |
00258e36 CH |
805 | trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, ioflags); |
806 | ret2 = generic_file_buffered_write(iocb, iovp, nr_segs, | |
807 | pos, &iocb->ki_pos, count, ret); | |
dda35b8f CH |
808 | /* |
809 | * if we just got an ENOSPC, flush the inode now we | |
810 | * aren't holding any page locks and retry *once* | |
811 | */ | |
812 | if (ret2 == -ENOSPC && !enospc) { | |
00258e36 | 813 | error = xfs_flush_pages(ip, 0, -1, 0, FI_NONE); |
dda35b8f CH |
814 | if (error) |
815 | goto out_unlock_internal; | |
816 | enospc = 1; | |
817 | goto write_retry; | |
818 | } | |
819 | ret = ret2; | |
820 | } | |
821 | ||
822 | current->backing_dev_info = NULL; | |
823 | ||
824 | isize = i_size_read(inode); | |
00258e36 CH |
825 | if (unlikely(ret < 0 && ret != -EFAULT && iocb->ki_pos > isize)) |
826 | iocb->ki_pos = isize; | |
827 | ||
828 | if (iocb->ki_pos > ip->i_size) { | |
829 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
830 | if (iocb->ki_pos > ip->i_size) | |
831 | ip->i_size = iocb->ki_pos; | |
832 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
dda35b8f CH |
833 | } |
834 | ||
835 | if (ret == -ENOSPC && | |
00258e36 CH |
836 | DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) { |
837 | xfs_iunlock(ip, iolock); | |
dda35b8f CH |
838 | if (need_i_mutex) |
839 | mutex_unlock(&inode->i_mutex); | |
00258e36 CH |
840 | error = XFS_SEND_NAMESP(ip->i_mount, DM_EVENT_NOSPACE, ip, |
841 | DM_RIGHT_NULL, ip, DM_RIGHT_NULL, NULL, NULL, | |
dda35b8f CH |
842 | 0, 0, 0); /* Delay flag intentionally unused */ |
843 | if (need_i_mutex) | |
844 | mutex_lock(&inode->i_mutex); | |
00258e36 | 845 | xfs_ilock(ip, iolock); |
dda35b8f CH |
846 | if (error) |
847 | goto out_unlock_internal; | |
848 | goto start; | |
849 | } | |
850 | ||
851 | error = -ret; | |
852 | if (ret <= 0) | |
853 | goto out_unlock_internal; | |
854 | ||
855 | XFS_STATS_ADD(xs_write_bytes, ret); | |
856 | ||
857 | /* Handle various SYNC-type writes */ | |
858 | if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { | |
859 | loff_t end = pos + ret - 1; | |
860 | int error2; | |
861 | ||
00258e36 | 862 | xfs_iunlock(ip, iolock); |
dda35b8f CH |
863 | if (need_i_mutex) |
864 | mutex_unlock(&inode->i_mutex); | |
865 | ||
866 | error2 = filemap_write_and_wait_range(mapping, pos, end); | |
867 | if (!error) | |
868 | error = error2; | |
869 | if (need_i_mutex) | |
870 | mutex_lock(&inode->i_mutex); | |
00258e36 | 871 | xfs_ilock(ip, iolock); |
dda35b8f | 872 | |
fd3200be CH |
873 | error2 = -xfs_file_fsync(file, file->f_path.dentry, |
874 | (file->f_flags & __O_SYNC) ? 0 : 1); | |
dda35b8f CH |
875 | if (!error) |
876 | error = error2; | |
877 | } | |
878 | ||
879 | out_unlock_internal: | |
00258e36 CH |
880 | if (ip->i_new_size) { |
881 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
882 | ip->i_new_size = 0; | |
dda35b8f CH |
883 | /* |
884 | * If this was a direct or synchronous I/O that failed (such | |
885 | * as ENOSPC) then part of the I/O may have been written to | |
886 | * disk before the error occured. In this case the on-disk | |
887 | * file size may have been adjusted beyond the in-memory file | |
888 | * size and now needs to be truncated back. | |
889 | */ | |
00258e36 CH |
890 | if (ip->i_d.di_size > ip->i_size) |
891 | ip->i_d.di_size = ip->i_size; | |
892 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
dda35b8f | 893 | } |
00258e36 | 894 | xfs_iunlock(ip, iolock); |
dda35b8f CH |
895 | out_unlock_mutex: |
896 | if (need_i_mutex) | |
897 | mutex_unlock(&inode->i_mutex); | |
898 | return -error; | |
899 | } | |
900 | ||
1da177e4 | 901 | STATIC int |
3562fd45 | 902 | xfs_file_open( |
1da177e4 | 903 | struct inode *inode, |
f999a5bf | 904 | struct file *file) |
1da177e4 | 905 | { |
f999a5bf | 906 | if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) |
1da177e4 | 907 | return -EFBIG; |
f999a5bf CH |
908 | if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) |
909 | return -EIO; | |
910 | return 0; | |
911 | } | |
912 | ||
913 | STATIC int | |
914 | xfs_dir_open( | |
915 | struct inode *inode, | |
916 | struct file *file) | |
917 | { | |
918 | struct xfs_inode *ip = XFS_I(inode); | |
919 | int mode; | |
920 | int error; | |
921 | ||
922 | error = xfs_file_open(inode, file); | |
923 | if (error) | |
924 | return error; | |
925 | ||
926 | /* | |
927 | * If there are any blocks, read-ahead block 0 as we're almost | |
928 | * certain to have the next operation be a read there. | |
929 | */ | |
930 | mode = xfs_ilock_map_shared(ip); | |
931 | if (ip->i_d.di_nextents > 0) | |
932 | xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); | |
933 | xfs_iunlock(ip, mode); | |
934 | return 0; | |
1da177e4 LT |
935 | } |
936 | ||
1da177e4 | 937 | STATIC int |
3562fd45 | 938 | xfs_file_release( |
1da177e4 LT |
939 | struct inode *inode, |
940 | struct file *filp) | |
941 | { | |
739bfb2a | 942 | return -xfs_release(XFS_I(inode)); |
1da177e4 LT |
943 | } |
944 | ||
1da177e4 | 945 | STATIC int |
3562fd45 | 946 | xfs_file_readdir( |
1da177e4 LT |
947 | struct file *filp, |
948 | void *dirent, | |
949 | filldir_t filldir) | |
950 | { | |
051e7cd4 | 951 | struct inode *inode = filp->f_path.dentry->d_inode; |
739bfb2a | 952 | xfs_inode_t *ip = XFS_I(inode); |
051e7cd4 CH |
953 | int error; |
954 | size_t bufsize; | |
955 | ||
956 | /* | |
957 | * The Linux API doesn't pass down the total size of the buffer | |
958 | * we read into down to the filesystem. With the filldir concept | |
959 | * it's not needed for correct information, but the XFS dir2 leaf | |
960 | * code wants an estimate of the buffer size to calculate it's | |
961 | * readahead window and size the buffers used for mapping to | |
962 | * physical blocks. | |
963 | * | |
964 | * Try to give it an estimate that's good enough, maybe at some | |
965 | * point we can change the ->readdir prototype to include the | |
a9cc799e | 966 | * buffer size. For now we use the current glibc buffer size. |
051e7cd4 | 967 | */ |
a9cc799e | 968 | bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); |
051e7cd4 | 969 | |
739bfb2a | 970 | error = xfs_readdir(ip, dirent, bufsize, |
051e7cd4 CH |
971 | (xfs_off_t *)&filp->f_pos, filldir); |
972 | if (error) | |
973 | return -error; | |
974 | return 0; | |
1da177e4 LT |
975 | } |
976 | ||
1da177e4 | 977 | STATIC int |
3562fd45 | 978 | xfs_file_mmap( |
1da177e4 LT |
979 | struct file *filp, |
980 | struct vm_area_struct *vma) | |
981 | { | |
3562fd45 | 982 | vma->vm_ops = &xfs_file_vm_ops; |
d0217ac0 | 983 | vma->vm_flags |= VM_CAN_NONLINEAR; |
6fac0cb4 | 984 | |
fbc1462b | 985 | file_accessed(filp); |
1da177e4 LT |
986 | return 0; |
987 | } | |
988 | ||
4f57dbc6 DC |
989 | /* |
990 | * mmap()d file has taken write protection fault and is being made | |
991 | * writable. We can set the page state up correctly for a writable | |
992 | * page, which means we can do correct delalloc accounting (ENOSPC | |
993 | * checking!) and unwritten extent mapping. | |
994 | */ | |
995 | STATIC int | |
996 | xfs_vm_page_mkwrite( | |
997 | struct vm_area_struct *vma, | |
c2ec175c | 998 | struct vm_fault *vmf) |
4f57dbc6 | 999 | { |
c2ec175c | 1000 | return block_page_mkwrite(vma, vmf, xfs_get_blocks); |
4f57dbc6 DC |
1001 | } |
1002 | ||
4b6f5d20 | 1003 | const struct file_operations xfs_file_operations = { |
1da177e4 LT |
1004 | .llseek = generic_file_llseek, |
1005 | .read = do_sync_read, | |
bb3f724e | 1006 | .write = do_sync_write, |
3562fd45 NS |
1007 | .aio_read = xfs_file_aio_read, |
1008 | .aio_write = xfs_file_aio_write, | |
1b895840 NS |
1009 | .splice_read = xfs_file_splice_read, |
1010 | .splice_write = xfs_file_splice_write, | |
3562fd45 | 1011 | .unlocked_ioctl = xfs_file_ioctl, |
1da177e4 | 1012 | #ifdef CONFIG_COMPAT |
3562fd45 | 1013 | .compat_ioctl = xfs_file_compat_ioctl, |
1da177e4 | 1014 | #endif |
3562fd45 NS |
1015 | .mmap = xfs_file_mmap, |
1016 | .open = xfs_file_open, | |
1017 | .release = xfs_file_release, | |
1018 | .fsync = xfs_file_fsync, | |
1da177e4 | 1019 | #ifdef HAVE_FOP_OPEN_EXEC |
3562fd45 | 1020 | .open_exec = xfs_file_open_exec, |
1da177e4 LT |
1021 | #endif |
1022 | }; | |
1023 | ||
4b6f5d20 | 1024 | const struct file_operations xfs_dir_file_operations = { |
f999a5bf | 1025 | .open = xfs_dir_open, |
1da177e4 | 1026 | .read = generic_read_dir, |
3562fd45 | 1027 | .readdir = xfs_file_readdir, |
59af1584 | 1028 | .llseek = generic_file_llseek, |
3562fd45 | 1029 | .unlocked_ioctl = xfs_file_ioctl, |
d3870398 | 1030 | #ifdef CONFIG_COMPAT |
3562fd45 | 1031 | .compat_ioctl = xfs_file_compat_ioctl, |
d3870398 | 1032 | #endif |
3562fd45 | 1033 | .fsync = xfs_file_fsync, |
1da177e4 LT |
1034 | }; |
1035 | ||
f0f37e2f | 1036 | static const struct vm_operations_struct xfs_file_vm_ops = { |
54cb8821 | 1037 | .fault = filemap_fault, |
4f57dbc6 | 1038 | .page_mkwrite = xfs_vm_page_mkwrite, |
6fac0cb4 | 1039 | }; |