| 1 | /* |
| 2 | * linux/fs/nfs/file.c |
| 3 | * |
| 4 | * Copyright (C) 1992 Rick Sladkey |
| 5 | * |
| 6 | * Changes Copyright (C) 1994 by Florian La Roche |
| 7 | * - Do not copy data too often around in the kernel. |
| 8 | * - In nfs_file_read the return value of kmalloc wasn't checked. |
| 9 | * - Put in a better version of read look-ahead buffering. Original idea |
| 10 | * and implementation by Wai S Kok elekokws@ee.nus.sg. |
| 11 | * |
| 12 | * Expire cache on write to a file by Wai S Kok (Oct 1994). |
| 13 | * |
| 14 | * Total rewrite of read side for new NFS buffer cache.. Linus. |
| 15 | * |
| 16 | * nfs regular file handling functions |
| 17 | */ |
| 18 | |
| 19 | #include <linux/module.h> |
| 20 | #include <linux/time.h> |
| 21 | #include <linux/kernel.h> |
| 22 | #include <linux/errno.h> |
| 23 | #include <linux/fcntl.h> |
| 24 | #include <linux/stat.h> |
| 25 | #include <linux/nfs_fs.h> |
| 26 | #include <linux/nfs_mount.h> |
| 27 | #include <linux/mm.h> |
| 28 | #include <linux/pagemap.h> |
| 29 | #include <linux/gfp.h> |
| 30 | #include <linux/swap.h> |
| 31 | |
| 32 | #include <linux/uaccess.h> |
| 33 | |
| 34 | #include "delegation.h" |
| 35 | #include "internal.h" |
| 36 | #include "iostat.h" |
| 37 | #include "fscache.h" |
| 38 | #include "pnfs.h" |
| 39 | |
| 40 | #include "nfstrace.h" |
| 41 | |
| 42 | #define NFSDBG_FACILITY NFSDBG_FILE |
| 43 | |
| 44 | static const struct vm_operations_struct nfs_file_vm_ops; |
| 45 | |
| 46 | /* Hack for future NFS swap support */ |
| 47 | #ifndef IS_SWAPFILE |
| 48 | # define IS_SWAPFILE(inode) (0) |
| 49 | #endif |
| 50 | |
| 51 | int nfs_check_flags(int flags) |
| 52 | { |
| 53 | if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT)) |
| 54 | return -EINVAL; |
| 55 | |
| 56 | return 0; |
| 57 | } |
| 58 | EXPORT_SYMBOL_GPL(nfs_check_flags); |
| 59 | |
| 60 | /* |
| 61 | * Open file |
| 62 | */ |
| 63 | static int |
| 64 | nfs_file_open(struct inode *inode, struct file *filp) |
| 65 | { |
| 66 | int res; |
| 67 | |
| 68 | dprintk("NFS: open file(%pD2)\n", filp); |
| 69 | |
| 70 | nfs_inc_stats(inode, NFSIOS_VFSOPEN); |
| 71 | res = nfs_check_flags(filp->f_flags); |
| 72 | if (res) |
| 73 | return res; |
| 74 | |
| 75 | res = nfs_open(inode, filp); |
| 76 | return res; |
| 77 | } |
| 78 | |
| 79 | int |
| 80 | nfs_file_release(struct inode *inode, struct file *filp) |
| 81 | { |
| 82 | dprintk("NFS: release(%pD2)\n", filp); |
| 83 | |
| 84 | nfs_inc_stats(inode, NFSIOS_VFSRELEASE); |
| 85 | nfs_file_clear_open_context(filp); |
| 86 | return 0; |
| 87 | } |
| 88 | EXPORT_SYMBOL_GPL(nfs_file_release); |
| 89 | |
| 90 | /** |
| 91 | * nfs_revalidate_size - Revalidate the file size |
| 92 | * @inode - pointer to inode struct |
| 93 | * @file - pointer to struct file |
| 94 | * |
| 95 | * Revalidates the file length. This is basically a wrapper around |
| 96 | * nfs_revalidate_inode() that takes into account the fact that we may |
| 97 | * have cached writes (in which case we don't care about the server's |
| 98 | * idea of what the file length is), or O_DIRECT (in which case we |
| 99 | * shouldn't trust the cache). |
| 100 | */ |
| 101 | static int nfs_revalidate_file_size(struct inode *inode, struct file *filp) |
| 102 | { |
| 103 | struct nfs_server *server = NFS_SERVER(inode); |
| 104 | |
| 105 | if (filp->f_flags & O_DIRECT) |
| 106 | goto force_reval; |
| 107 | if (nfs_check_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE)) |
| 108 | goto force_reval; |
| 109 | return 0; |
| 110 | force_reval: |
| 111 | return __nfs_revalidate_inode(server, inode); |
| 112 | } |
| 113 | |
| 114 | loff_t nfs_file_llseek(struct file *filp, loff_t offset, int whence) |
| 115 | { |
| 116 | dprintk("NFS: llseek file(%pD2, %lld, %d)\n", |
| 117 | filp, offset, whence); |
| 118 | |
| 119 | /* |
| 120 | * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate |
| 121 | * the cached file length |
| 122 | */ |
| 123 | if (whence != SEEK_SET && whence != SEEK_CUR) { |
| 124 | struct inode *inode = filp->f_mapping->host; |
| 125 | |
| 126 | int retval = nfs_revalidate_file_size(inode, filp); |
| 127 | if (retval < 0) |
| 128 | return (loff_t)retval; |
| 129 | } |
| 130 | |
| 131 | return generic_file_llseek(filp, offset, whence); |
| 132 | } |
| 133 | EXPORT_SYMBOL_GPL(nfs_file_llseek); |
| 134 | |
| 135 | /* |
| 136 | * Flush all dirty pages, and check for write errors. |
| 137 | */ |
| 138 | static int |
| 139 | nfs_file_flush(struct file *file, fl_owner_t id) |
| 140 | { |
| 141 | struct inode *inode = file_inode(file); |
| 142 | |
| 143 | dprintk("NFS: flush(%pD2)\n", file); |
| 144 | |
| 145 | nfs_inc_stats(inode, NFSIOS_VFSFLUSH); |
| 146 | if ((file->f_mode & FMODE_WRITE) == 0) |
| 147 | return 0; |
| 148 | |
| 149 | /* Flush writes to the server and return any errors */ |
| 150 | return vfs_fsync(file, 0); |
| 151 | } |
| 152 | |
| 153 | ssize_t |
| 154 | nfs_file_read(struct kiocb *iocb, struct iov_iter *to) |
| 155 | { |
| 156 | struct inode *inode = file_inode(iocb->ki_filp); |
| 157 | ssize_t result; |
| 158 | |
| 159 | if (iocb->ki_flags & IOCB_DIRECT) |
| 160 | return nfs_file_direct_read(iocb, to); |
| 161 | |
| 162 | dprintk("NFS: read(%pD2, %zu@%lu)\n", |
| 163 | iocb->ki_filp, |
| 164 | iov_iter_count(to), (unsigned long) iocb->ki_pos); |
| 165 | |
| 166 | nfs_start_io_read(inode); |
| 167 | result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); |
| 168 | if (!result) { |
| 169 | result = generic_file_read_iter(iocb, to); |
| 170 | if (result > 0) |
| 171 | nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result); |
| 172 | } |
| 173 | nfs_end_io_read(inode); |
| 174 | return result; |
| 175 | } |
| 176 | EXPORT_SYMBOL_GPL(nfs_file_read); |
| 177 | |
| 178 | int |
| 179 | nfs_file_mmap(struct file * file, struct vm_area_struct * vma) |
| 180 | { |
| 181 | struct inode *inode = file_inode(file); |
| 182 | int status; |
| 183 | |
| 184 | dprintk("NFS: mmap(%pD2)\n", file); |
| 185 | |
| 186 | /* Note: generic_file_mmap() returns ENOSYS on nommu systems |
| 187 | * so we call that before revalidating the mapping |
| 188 | */ |
| 189 | status = generic_file_mmap(file, vma); |
| 190 | if (!status) { |
| 191 | vma->vm_ops = &nfs_file_vm_ops; |
| 192 | status = nfs_revalidate_mapping(inode, file->f_mapping); |
| 193 | } |
| 194 | return status; |
| 195 | } |
| 196 | EXPORT_SYMBOL_GPL(nfs_file_mmap); |
| 197 | |
| 198 | /* |
| 199 | * Flush any dirty pages for this process, and check for write errors. |
| 200 | * The return status from this call provides a reliable indication of |
| 201 | * whether any write errors occurred for this process. |
| 202 | * |
| 203 | * Notice that it clears the NFS_CONTEXT_ERROR_WRITE before synching to |
| 204 | * disk, but it retrieves and clears ctx->error after synching, despite |
| 205 | * the two being set at the same time in nfs_context_set_write_error(). |
| 206 | * This is because the former is used to notify the _next_ call to |
| 207 | * nfs_file_write() that a write error occurred, and hence cause it to |
| 208 | * fall back to doing a synchronous write. |
| 209 | */ |
| 210 | static int |
| 211 | nfs_file_fsync_commit(struct file *file, int datasync) |
| 212 | { |
| 213 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
| 214 | struct inode *inode = file_inode(file); |
| 215 | int do_resend, status; |
| 216 | int ret = 0; |
| 217 | |
| 218 | dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync); |
| 219 | |
| 220 | nfs_inc_stats(inode, NFSIOS_VFSFSYNC); |
| 221 | do_resend = test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags); |
| 222 | status = nfs_commit_inode(inode, FLUSH_SYNC); |
| 223 | if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags)) { |
| 224 | ret = xchg(&ctx->error, 0); |
| 225 | if (ret) |
| 226 | goto out; |
| 227 | } |
| 228 | if (status < 0) { |
| 229 | ret = status; |
| 230 | goto out; |
| 231 | } |
| 232 | do_resend |= test_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags); |
| 233 | if (do_resend) |
| 234 | ret = -EAGAIN; |
| 235 | out: |
| 236 | return ret; |
| 237 | } |
| 238 | |
| 239 | int |
| 240 | nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
| 241 | { |
| 242 | int ret; |
| 243 | struct inode *inode = file_inode(file); |
| 244 | |
| 245 | trace_nfs_fsync_enter(inode); |
| 246 | |
| 247 | do { |
| 248 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
| 249 | ret = filemap_write_and_wait_range(inode->i_mapping, start, end); |
| 250 | if (test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags)) { |
| 251 | int ret2 = xchg(&ctx->error, 0); |
| 252 | if (ret2) |
| 253 | ret = ret2; |
| 254 | } |
| 255 | if (ret != 0) |
| 256 | break; |
| 257 | ret = nfs_file_fsync_commit(file, datasync); |
| 258 | if (!ret) |
| 259 | ret = pnfs_sync_inode(inode, !!datasync); |
| 260 | /* |
| 261 | * If nfs_file_fsync_commit detected a server reboot, then |
| 262 | * resend all dirty pages that might have been covered by |
| 263 | * the NFS_CONTEXT_RESEND_WRITES flag |
| 264 | */ |
| 265 | start = 0; |
| 266 | end = LLONG_MAX; |
| 267 | } while (ret == -EAGAIN); |
| 268 | |
| 269 | trace_nfs_fsync_exit(inode, ret); |
| 270 | return ret; |
| 271 | } |
| 272 | EXPORT_SYMBOL_GPL(nfs_file_fsync); |
| 273 | |
| 274 | /* |
| 275 | * Decide whether a read/modify/write cycle may be more efficient |
| 276 | * then a modify/write/read cycle when writing to a page in the |
| 277 | * page cache. |
| 278 | * |
| 279 | * The modify/write/read cycle may occur if a page is read before |
| 280 | * being completely filled by the writer. In this situation, the |
| 281 | * page must be completely written to stable storage on the server |
| 282 | * before it can be refilled by reading in the page from the server. |
| 283 | * This can lead to expensive, small, FILE_SYNC mode writes being |
| 284 | * done. |
| 285 | * |
| 286 | * It may be more efficient to read the page first if the file is |
| 287 | * open for reading in addition to writing, the page is not marked |
| 288 | * as Uptodate, it is not dirty or waiting to be committed, |
| 289 | * indicating that it was previously allocated and then modified, |
| 290 | * that there were valid bytes of data in that range of the file, |
| 291 | * and that the new data won't completely replace the old data in |
| 292 | * that range of the file. |
| 293 | */ |
| 294 | static int nfs_want_read_modify_write(struct file *file, struct page *page, |
| 295 | loff_t pos, unsigned len) |
| 296 | { |
| 297 | unsigned int pglen = nfs_page_length(page); |
| 298 | unsigned int offset = pos & (PAGE_SIZE - 1); |
| 299 | unsigned int end = offset + len; |
| 300 | |
| 301 | if (pnfs_ld_read_whole_page(file->f_mapping->host)) { |
| 302 | if (!PageUptodate(page)) |
| 303 | return 1; |
| 304 | return 0; |
| 305 | } |
| 306 | |
| 307 | if ((file->f_mode & FMODE_READ) && /* open for read? */ |
| 308 | !PageUptodate(page) && /* Uptodate? */ |
| 309 | !PagePrivate(page) && /* i/o request already? */ |
| 310 | pglen && /* valid bytes of file? */ |
| 311 | (end < pglen || offset)) /* replace all valid bytes? */ |
| 312 | return 1; |
| 313 | return 0; |
| 314 | } |
| 315 | |
| 316 | /* |
| 317 | * This does the "real" work of the write. We must allocate and lock the |
| 318 | * page to be sent back to the generic routine, which then copies the |
| 319 | * data from user space. |
| 320 | * |
| 321 | * If the writer ends up delaying the write, the writer needs to |
| 322 | * increment the page use counts until he is done with the page. |
| 323 | */ |
| 324 | static int nfs_write_begin(struct file *file, struct address_space *mapping, |
| 325 | loff_t pos, unsigned len, unsigned flags, |
| 326 | struct page **pagep, void **fsdata) |
| 327 | { |
| 328 | int ret; |
| 329 | pgoff_t index = pos >> PAGE_SHIFT; |
| 330 | struct page *page; |
| 331 | int once_thru = 0; |
| 332 | |
| 333 | dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n", |
| 334 | file, mapping->host->i_ino, len, (long long) pos); |
| 335 | |
| 336 | start: |
| 337 | page = grab_cache_page_write_begin(mapping, index, flags); |
| 338 | if (!page) |
| 339 | return -ENOMEM; |
| 340 | *pagep = page; |
| 341 | |
| 342 | ret = nfs_flush_incompatible(file, page); |
| 343 | if (ret) { |
| 344 | unlock_page(page); |
| 345 | put_page(page); |
| 346 | } else if (!once_thru && |
| 347 | nfs_want_read_modify_write(file, page, pos, len)) { |
| 348 | once_thru = 1; |
| 349 | ret = nfs_readpage(file, page); |
| 350 | put_page(page); |
| 351 | if (!ret) |
| 352 | goto start; |
| 353 | } |
| 354 | return ret; |
| 355 | } |
| 356 | |
| 357 | static int nfs_write_end(struct file *file, struct address_space *mapping, |
| 358 | loff_t pos, unsigned len, unsigned copied, |
| 359 | struct page *page, void *fsdata) |
| 360 | { |
| 361 | unsigned offset = pos & (PAGE_SIZE - 1); |
| 362 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
| 363 | int status; |
| 364 | |
| 365 | dfprintk(PAGECACHE, "NFS: write_end(%pD2(%lu), %u@%lld)\n", |
| 366 | file, mapping->host->i_ino, len, (long long) pos); |
| 367 | |
| 368 | /* |
| 369 | * Zero any uninitialised parts of the page, and then mark the page |
| 370 | * as up to date if it turns out that we're extending the file. |
| 371 | */ |
| 372 | if (!PageUptodate(page)) { |
| 373 | unsigned pglen = nfs_page_length(page); |
| 374 | unsigned end = offset + copied; |
| 375 | |
| 376 | if (pglen == 0) { |
| 377 | zero_user_segments(page, 0, offset, |
| 378 | end, PAGE_SIZE); |
| 379 | SetPageUptodate(page); |
| 380 | } else if (end >= pglen) { |
| 381 | zero_user_segment(page, end, PAGE_SIZE); |
| 382 | if (offset == 0) |
| 383 | SetPageUptodate(page); |
| 384 | } else |
| 385 | zero_user_segment(page, pglen, PAGE_SIZE); |
| 386 | } |
| 387 | |
| 388 | status = nfs_updatepage(file, page, offset, copied); |
| 389 | |
| 390 | unlock_page(page); |
| 391 | put_page(page); |
| 392 | |
| 393 | if (status < 0) |
| 394 | return status; |
| 395 | NFS_I(mapping->host)->write_io += copied; |
| 396 | |
| 397 | if (nfs_ctx_key_to_expire(ctx, mapping->host)) { |
| 398 | status = nfs_wb_all(mapping->host); |
| 399 | if (status < 0) |
| 400 | return status; |
| 401 | } |
| 402 | |
| 403 | return copied; |
| 404 | } |
| 405 | |
| 406 | /* |
| 407 | * Partially or wholly invalidate a page |
| 408 | * - Release the private state associated with a page if undergoing complete |
| 409 | * page invalidation |
| 410 | * - Called if either PG_private or PG_fscache is set on the page |
| 411 | * - Caller holds page lock |
| 412 | */ |
| 413 | static void nfs_invalidate_page(struct page *page, unsigned int offset, |
| 414 | unsigned int length) |
| 415 | { |
| 416 | dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n", |
| 417 | page, offset, length); |
| 418 | |
| 419 | if (offset != 0 || length < PAGE_SIZE) |
| 420 | return; |
| 421 | /* Cancel any unstarted writes on this page */ |
| 422 | nfs_wb_page_cancel(page_file_mapping(page)->host, page); |
| 423 | |
| 424 | nfs_fscache_invalidate_page(page, page->mapping->host); |
| 425 | } |
| 426 | |
| 427 | /* |
| 428 | * Attempt to release the private state associated with a page |
| 429 | * - Called if either PG_private or PG_fscache is set on the page |
| 430 | * - Caller holds page lock |
| 431 | * - Return true (may release page) or false (may not) |
| 432 | */ |
| 433 | static int nfs_release_page(struct page *page, gfp_t gfp) |
| 434 | { |
| 435 | dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); |
| 436 | |
| 437 | /* If PagePrivate() is set, then the page is not freeable */ |
| 438 | if (PagePrivate(page)) |
| 439 | return 0; |
| 440 | return nfs_fscache_release_page(page, gfp); |
| 441 | } |
| 442 | |
| 443 | static void nfs_check_dirty_writeback(struct page *page, |
| 444 | bool *dirty, bool *writeback) |
| 445 | { |
| 446 | struct nfs_inode *nfsi; |
| 447 | struct address_space *mapping = page_file_mapping(page); |
| 448 | |
| 449 | if (!mapping || PageSwapCache(page)) |
| 450 | return; |
| 451 | |
| 452 | /* |
| 453 | * Check if an unstable page is currently being committed and |
| 454 | * if so, have the VM treat it as if the page is under writeback |
| 455 | * so it will not block due to pages that will shortly be freeable. |
| 456 | */ |
| 457 | nfsi = NFS_I(mapping->host); |
| 458 | if (atomic_read(&nfsi->commit_info.rpcs_out)) { |
| 459 | *writeback = true; |
| 460 | return; |
| 461 | } |
| 462 | |
| 463 | /* |
| 464 | * If PagePrivate() is set, then the page is not freeable and as the |
| 465 | * inode is not being committed, it's not going to be cleaned in the |
| 466 | * near future so treat it as dirty |
| 467 | */ |
| 468 | if (PagePrivate(page)) |
| 469 | *dirty = true; |
| 470 | } |
| 471 | |
| 472 | /* |
| 473 | * Attempt to clear the private state associated with a page when an error |
| 474 | * occurs that requires the cached contents of an inode to be written back or |
| 475 | * destroyed |
| 476 | * - Called if either PG_private or fscache is set on the page |
| 477 | * - Caller holds page lock |
| 478 | * - Return 0 if successful, -error otherwise |
| 479 | */ |
| 480 | static int nfs_launder_page(struct page *page) |
| 481 | { |
| 482 | struct inode *inode = page_file_mapping(page)->host; |
| 483 | struct nfs_inode *nfsi = NFS_I(inode); |
| 484 | |
| 485 | dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n", |
| 486 | inode->i_ino, (long long)page_offset(page)); |
| 487 | |
| 488 | nfs_fscache_wait_on_page_write(nfsi, page); |
| 489 | return nfs_wb_page(inode, page); |
| 490 | } |
| 491 | |
| 492 | static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, |
| 493 | sector_t *span) |
| 494 | { |
| 495 | struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); |
| 496 | |
| 497 | *span = sis->pages; |
| 498 | |
| 499 | return rpc_clnt_swap_activate(clnt); |
| 500 | } |
| 501 | |
| 502 | static void nfs_swap_deactivate(struct file *file) |
| 503 | { |
| 504 | struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); |
| 505 | |
| 506 | rpc_clnt_swap_deactivate(clnt); |
| 507 | } |
| 508 | |
| 509 | const struct address_space_operations nfs_file_aops = { |
| 510 | .readpage = nfs_readpage, |
| 511 | .readpages = nfs_readpages, |
| 512 | .set_page_dirty = __set_page_dirty_nobuffers, |
| 513 | .writepage = nfs_writepage, |
| 514 | .writepages = nfs_writepages, |
| 515 | .write_begin = nfs_write_begin, |
| 516 | .write_end = nfs_write_end, |
| 517 | .invalidatepage = nfs_invalidate_page, |
| 518 | .releasepage = nfs_release_page, |
| 519 | .direct_IO = nfs_direct_IO, |
| 520 | #ifdef CONFIG_MIGRATION |
| 521 | .migratepage = nfs_migrate_page, |
| 522 | #endif |
| 523 | .launder_page = nfs_launder_page, |
| 524 | .is_dirty_writeback = nfs_check_dirty_writeback, |
| 525 | .error_remove_page = generic_error_remove_page, |
| 526 | .swap_activate = nfs_swap_activate, |
| 527 | .swap_deactivate = nfs_swap_deactivate, |
| 528 | }; |
| 529 | |
| 530 | /* |
| 531 | * Notification that a PTE pointing to an NFS page is about to be made |
| 532 | * writable, implying that someone is about to modify the page through a |
| 533 | * shared-writable mapping |
| 534 | */ |
| 535 | static int nfs_vm_page_mkwrite(struct vm_fault *vmf) |
| 536 | { |
| 537 | struct page *page = vmf->page; |
| 538 | struct file *filp = vmf->vma->vm_file; |
| 539 | struct inode *inode = file_inode(filp); |
| 540 | unsigned pagelen; |
| 541 | int ret = VM_FAULT_NOPAGE; |
| 542 | struct address_space *mapping; |
| 543 | |
| 544 | dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n", |
| 545 | filp, filp->f_mapping->host->i_ino, |
| 546 | (long long)page_offset(page)); |
| 547 | |
| 548 | sb_start_pagefault(inode->i_sb); |
| 549 | |
| 550 | /* make sure the cache has finished storing the page */ |
| 551 | nfs_fscache_wait_on_page_write(NFS_I(inode), page); |
| 552 | |
| 553 | wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING, |
| 554 | nfs_wait_bit_killable, TASK_KILLABLE); |
| 555 | |
| 556 | lock_page(page); |
| 557 | mapping = page_file_mapping(page); |
| 558 | if (mapping != inode->i_mapping) |
| 559 | goto out_unlock; |
| 560 | |
| 561 | wait_on_page_writeback(page); |
| 562 | |
| 563 | pagelen = nfs_page_length(page); |
| 564 | if (pagelen == 0) |
| 565 | goto out_unlock; |
| 566 | |
| 567 | ret = VM_FAULT_LOCKED; |
| 568 | if (nfs_flush_incompatible(filp, page) == 0 && |
| 569 | nfs_updatepage(filp, page, 0, pagelen) == 0) |
| 570 | goto out; |
| 571 | |
| 572 | ret = VM_FAULT_SIGBUS; |
| 573 | out_unlock: |
| 574 | unlock_page(page); |
| 575 | out: |
| 576 | sb_end_pagefault(inode->i_sb); |
| 577 | return ret; |
| 578 | } |
| 579 | |
| 580 | static const struct vm_operations_struct nfs_file_vm_ops = { |
| 581 | .fault = filemap_fault, |
| 582 | .map_pages = filemap_map_pages, |
| 583 | .page_mkwrite = nfs_vm_page_mkwrite, |
| 584 | }; |
| 585 | |
| 586 | static int nfs_need_check_write(struct file *filp, struct inode *inode) |
| 587 | { |
| 588 | struct nfs_open_context *ctx; |
| 589 | |
| 590 | ctx = nfs_file_open_context(filp); |
| 591 | if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags) || |
| 592 | nfs_ctx_key_to_expire(ctx, inode)) |
| 593 | return 1; |
| 594 | return 0; |
| 595 | } |
| 596 | |
| 597 | ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) |
| 598 | { |
| 599 | struct file *file = iocb->ki_filp; |
| 600 | struct inode *inode = file_inode(file); |
| 601 | unsigned long written = 0; |
| 602 | ssize_t result; |
| 603 | |
| 604 | result = nfs_key_timeout_notify(file, inode); |
| 605 | if (result) |
| 606 | return result; |
| 607 | |
| 608 | if (iocb->ki_flags & IOCB_DIRECT) |
| 609 | return nfs_file_direct_write(iocb, from); |
| 610 | |
| 611 | dprintk("NFS: write(%pD2, %zu@%Ld)\n", |
| 612 | file, iov_iter_count(from), (long long) iocb->ki_pos); |
| 613 | |
| 614 | if (IS_SWAPFILE(inode)) |
| 615 | goto out_swapfile; |
| 616 | /* |
| 617 | * O_APPEND implies that we must revalidate the file length. |
| 618 | */ |
| 619 | if (iocb->ki_flags & IOCB_APPEND) { |
| 620 | result = nfs_revalidate_file_size(inode, file); |
| 621 | if (result) |
| 622 | goto out; |
| 623 | } |
| 624 | if (iocb->ki_pos > i_size_read(inode)) |
| 625 | nfs_revalidate_mapping(inode, file->f_mapping); |
| 626 | |
| 627 | nfs_start_io_write(inode); |
| 628 | result = generic_write_checks(iocb, from); |
| 629 | if (result > 0) { |
| 630 | current->backing_dev_info = inode_to_bdi(inode); |
| 631 | result = generic_perform_write(file, from, iocb->ki_pos); |
| 632 | current->backing_dev_info = NULL; |
| 633 | } |
| 634 | nfs_end_io_write(inode); |
| 635 | if (result <= 0) |
| 636 | goto out; |
| 637 | |
| 638 | written = result; |
| 639 | iocb->ki_pos += written; |
| 640 | result = generic_write_sync(iocb, written); |
| 641 | if (result < 0) |
| 642 | goto out; |
| 643 | |
| 644 | /* Return error values */ |
| 645 | if (nfs_need_check_write(file, inode)) { |
| 646 | int err = vfs_fsync(file, 0); |
| 647 | if (err < 0) |
| 648 | result = err; |
| 649 | } |
| 650 | nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); |
| 651 | out: |
| 652 | return result; |
| 653 | |
| 654 | out_swapfile: |
| 655 | printk(KERN_INFO "NFS: attempt to write to active swap file!\n"); |
| 656 | return -EBUSY; |
| 657 | } |
| 658 | EXPORT_SYMBOL_GPL(nfs_file_write); |
| 659 | |
| 660 | static int |
| 661 | do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) |
| 662 | { |
| 663 | struct inode *inode = filp->f_mapping->host; |
| 664 | int status = 0; |
| 665 | unsigned int saved_type = fl->fl_type; |
| 666 | |
| 667 | /* Try local locking first */ |
| 668 | posix_test_lock(filp, fl); |
| 669 | if (fl->fl_type != F_UNLCK) { |
| 670 | /* found a conflict */ |
| 671 | goto out; |
| 672 | } |
| 673 | fl->fl_type = saved_type; |
| 674 | |
| 675 | if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) |
| 676 | goto out_noconflict; |
| 677 | |
| 678 | if (is_local) |
| 679 | goto out_noconflict; |
| 680 | |
| 681 | status = NFS_PROTO(inode)->lock(filp, cmd, fl); |
| 682 | out: |
| 683 | return status; |
| 684 | out_noconflict: |
| 685 | fl->fl_type = F_UNLCK; |
| 686 | goto out; |
| 687 | } |
| 688 | |
| 689 | static int |
| 690 | do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) |
| 691 | { |
| 692 | struct inode *inode = filp->f_mapping->host; |
| 693 | struct nfs_lock_context *l_ctx; |
| 694 | int status; |
| 695 | |
| 696 | /* |
| 697 | * Flush all pending writes before doing anything |
| 698 | * with locks.. |
| 699 | */ |
| 700 | vfs_fsync(filp, 0); |
| 701 | |
| 702 | l_ctx = nfs_get_lock_context(nfs_file_open_context(filp)); |
| 703 | if (!IS_ERR(l_ctx)) { |
| 704 | status = nfs_iocounter_wait(l_ctx); |
| 705 | nfs_put_lock_context(l_ctx); |
| 706 | /* NOTE: special case |
| 707 | * If we're signalled while cleaning up locks on process exit, we |
| 708 | * still need to complete the unlock. |
| 709 | */ |
| 710 | if (status < 0 && !(fl->fl_flags & FL_CLOSE)) |
| 711 | return status; |
| 712 | } |
| 713 | |
| 714 | /* |
| 715 | * Use local locking if mounted with "-onolock" or with appropriate |
| 716 | * "-olocal_lock=" |
| 717 | */ |
| 718 | if (!is_local) |
| 719 | status = NFS_PROTO(inode)->lock(filp, cmd, fl); |
| 720 | else |
| 721 | status = locks_lock_file_wait(filp, fl); |
| 722 | return status; |
| 723 | } |
| 724 | |
| 725 | static int |
| 726 | do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) |
| 727 | { |
| 728 | struct inode *inode = filp->f_mapping->host; |
| 729 | int status; |
| 730 | |
| 731 | /* |
| 732 | * Flush all pending writes before doing anything |
| 733 | * with locks.. |
| 734 | */ |
| 735 | status = nfs_sync_mapping(filp->f_mapping); |
| 736 | if (status != 0) |
| 737 | goto out; |
| 738 | |
| 739 | /* |
| 740 | * Use local locking if mounted with "-onolock" or with appropriate |
| 741 | * "-olocal_lock=" |
| 742 | */ |
| 743 | if (!is_local) |
| 744 | status = NFS_PROTO(inode)->lock(filp, cmd, fl); |
| 745 | else |
| 746 | status = locks_lock_file_wait(filp, fl); |
| 747 | if (status < 0) |
| 748 | goto out; |
| 749 | |
| 750 | /* |
| 751 | * Invalidate cache to prevent missing any changes. If |
| 752 | * the file is mapped, clear the page cache as well so |
| 753 | * those mappings will be loaded. |
| 754 | * |
| 755 | * This makes locking act as a cache coherency point. |
| 756 | */ |
| 757 | nfs_sync_mapping(filp->f_mapping); |
| 758 | if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) { |
| 759 | nfs_zap_caches(inode); |
| 760 | if (mapping_mapped(filp->f_mapping)) |
| 761 | nfs_revalidate_mapping(inode, filp->f_mapping); |
| 762 | } |
| 763 | out: |
| 764 | return status; |
| 765 | } |
| 766 | |
| 767 | /* |
| 768 | * Lock a (portion of) a file |
| 769 | */ |
| 770 | int nfs_lock(struct file *filp, int cmd, struct file_lock *fl) |
| 771 | { |
| 772 | struct inode *inode = filp->f_mapping->host; |
| 773 | int ret = -ENOLCK; |
| 774 | int is_local = 0; |
| 775 | |
| 776 | dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n", |
| 777 | filp, fl->fl_type, fl->fl_flags, |
| 778 | (long long)fl->fl_start, (long long)fl->fl_end); |
| 779 | |
| 780 | nfs_inc_stats(inode, NFSIOS_VFSLOCK); |
| 781 | |
| 782 | /* No mandatory locks over NFS */ |
| 783 | if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) |
| 784 | goto out_err; |
| 785 | |
| 786 | if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL) |
| 787 | is_local = 1; |
| 788 | |
| 789 | if (NFS_PROTO(inode)->lock_check_bounds != NULL) { |
| 790 | ret = NFS_PROTO(inode)->lock_check_bounds(fl); |
| 791 | if (ret < 0) |
| 792 | goto out_err; |
| 793 | } |
| 794 | |
| 795 | if (IS_GETLK(cmd)) |
| 796 | ret = do_getlk(filp, cmd, fl, is_local); |
| 797 | else if (fl->fl_type == F_UNLCK) |
| 798 | ret = do_unlk(filp, cmd, fl, is_local); |
| 799 | else |
| 800 | ret = do_setlk(filp, cmd, fl, is_local); |
| 801 | out_err: |
| 802 | return ret; |
| 803 | } |
| 804 | EXPORT_SYMBOL_GPL(nfs_lock); |
| 805 | |
| 806 | /* |
| 807 | * Lock a (portion of) a file |
| 808 | */ |
| 809 | int nfs_flock(struct file *filp, int cmd, struct file_lock *fl) |
| 810 | { |
| 811 | struct inode *inode = filp->f_mapping->host; |
| 812 | int is_local = 0; |
| 813 | |
| 814 | dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n", |
| 815 | filp, fl->fl_type, fl->fl_flags); |
| 816 | |
| 817 | if (!(fl->fl_flags & FL_FLOCK)) |
| 818 | return -ENOLCK; |
| 819 | |
| 820 | /* |
| 821 | * The NFSv4 protocol doesn't support LOCK_MAND, which is not part of |
| 822 | * any standard. In principle we might be able to support LOCK_MAND |
| 823 | * on NFSv2/3 since NLMv3/4 support DOS share modes, but for now the |
| 824 | * NFS code is not set up for it. |
| 825 | */ |
| 826 | if (fl->fl_type & LOCK_MAND) |
| 827 | return -EINVAL; |
| 828 | |
| 829 | if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK) |
| 830 | is_local = 1; |
| 831 | |
| 832 | /* We're simulating flock() locks using posix locks on the server */ |
| 833 | if (fl->fl_type == F_UNLCK) |
| 834 | return do_unlk(filp, cmd, fl, is_local); |
| 835 | return do_setlk(filp, cmd, fl, is_local); |
| 836 | } |
| 837 | EXPORT_SYMBOL_GPL(nfs_flock); |
| 838 | |
| 839 | const struct file_operations nfs_file_operations = { |
| 840 | .llseek = nfs_file_llseek, |
| 841 | .read_iter = nfs_file_read, |
| 842 | .write_iter = nfs_file_write, |
| 843 | .mmap = nfs_file_mmap, |
| 844 | .open = nfs_file_open, |
| 845 | .flush = nfs_file_flush, |
| 846 | .release = nfs_file_release, |
| 847 | .fsync = nfs_file_fsync, |
| 848 | .lock = nfs_lock, |
| 849 | .flock = nfs_flock, |
| 850 | .splice_read = generic_file_splice_read, |
| 851 | .splice_write = iter_file_splice_write, |
| 852 | .check_flags = nfs_check_flags, |
| 853 | .setlease = simple_nosetlease, |
| 854 | }; |
| 855 | EXPORT_SYMBOL_GPL(nfs_file_operations); |