1 /* handling of writes to regular files and writing back to the server
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/backing-dev.h>
13 #include <linux/slab.h>
15 #include <linux/pagemap.h>
16 #include <linux/writeback.h>
17 #include <linux/pagevec.h>
21 * mark a page as having been made dirty and thus needing writeback
23 int afs_set_page_dirty(struct page *page)
26 return __set_page_dirty_nobuffers(page);
30 * partly or wholly fill a page that's under preparation for writing
32 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
33 loff_t pos, unsigned int len, struct page *page)
38 _enter(",,%llu", (unsigned long long)pos);
40 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
45 atomic_set(&req->usage, 1);
52 ret = afs_fetch_data(vnode, key, req);
56 _debug("got NOENT from server"
57 " - marking file deleted and stale");
58 set_bit(AFS_VNODE_DELETED, &vnode->flags);
68 * prepare to perform part of a write to a page
70 int afs_write_begin(struct file *file, struct address_space *mapping,
71 loff_t pos, unsigned len, unsigned flags,
72 struct page **pagep, void **fsdata)
74 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
76 struct key *key = afs_file_key(file);
78 unsigned f, from = pos & (PAGE_SIZE - 1);
79 unsigned t, to = from + len;
80 pgoff_t index = pos >> PAGE_SHIFT;
83 _enter("{%x:%u},{%lx},%u,%u",
84 vnode->fid.vid, vnode->fid.vnode, index, from, to);
86 /* We want to store information about how much of a page is altered in
89 BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
91 page = grab_cache_page_write_begin(mapping, index, flags);
95 if (!PageUptodate(page) && len != PAGE_SIZE) {
96 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
100 _leave(" = %d [prep]", ret);
103 SetPageUptodate(page);
106 /* page won't leak in error case: it eventually gets cleaned off LRU */
110 /* See if this page is already partially written in a way that we can
111 * merge the new write with.
114 if (PagePrivate(page)) {
115 priv = page_private(page);
116 f = priv & AFS_PRIV_MAX;
117 t = priv >> AFS_PRIV_SHIFT;
122 if (PageWriteback(page)) {
123 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
125 goto flush_conflicting_write;
127 if (to < f || from > t)
128 goto flush_conflicting_write;
138 priv = (unsigned long)t << AFS_PRIV_SHIFT;
140 trace_afs_page_dirty(vnode, tracepoint_string("begin"),
142 SetPagePrivate(page);
143 set_page_private(page, priv);
147 /* The previous write and this write aren't adjacent or overlapping, so
148 * flush the page out.
150 flush_conflicting_write:
151 _debug("flush conflict");
152 ret = write_one_page(page);
154 _leave(" = %d", ret);
158 ret = lock_page_killable(page);
160 _leave(" = %d", ret);
167 * finalise part of a write to a page
169 int afs_write_end(struct file *file, struct address_space *mapping,
170 loff_t pos, unsigned len, unsigned copied,
171 struct page *page, void *fsdata)
173 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
174 struct key *key = afs_file_key(file);
175 loff_t i_size, maybe_i_size;
178 _enter("{%x:%u},{%lx}",
179 vnode->fid.vid, vnode->fid.vnode, page->index);
181 maybe_i_size = pos + copied;
183 i_size = i_size_read(&vnode->vfs_inode);
184 if (maybe_i_size > i_size) {
185 spin_lock(&vnode->wb_lock);
186 i_size = i_size_read(&vnode->vfs_inode);
187 if (maybe_i_size > i_size)
188 i_size_write(&vnode->vfs_inode, maybe_i_size);
189 spin_unlock(&vnode->wb_lock);
192 if (!PageUptodate(page)) {
194 /* Try and load any missing data from the server. The
195 * unmarshalling routine will take care of clearing any
196 * bits that are beyond the EOF.
198 ret = afs_fill_page(vnode, key, pos + copied,
203 SetPageUptodate(page);
206 set_page_dirty(page);
218 * kill all the pages in the given range
220 static void afs_kill_pages(struct address_space *mapping,
221 pgoff_t first, pgoff_t last)
223 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
225 unsigned count, loop;
227 _enter("{%x:%u},%lx-%lx",
228 vnode->fid.vid, vnode->fid.vnode, first, last);
233 _debug("kill %lx-%lx", first, last);
235 count = last - first + 1;
236 if (count > PAGEVEC_SIZE)
237 count = PAGEVEC_SIZE;
238 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
239 ASSERTCMP(pv.nr, ==, count);
241 for (loop = 0; loop < count; loop++) {
242 struct page *page = pv.pages[loop];
243 ClearPageUptodate(page);
245 end_page_writeback(page);
246 if (page->index >= first)
247 first = page->index + 1;
249 generic_error_remove_page(mapping, page);
252 __pagevec_release(&pv);
253 } while (first <= last);
259 * Redirty all the pages in a given range.
261 static void afs_redirty_pages(struct writeback_control *wbc,
262 struct address_space *mapping,
263 pgoff_t first, pgoff_t last)
265 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
267 unsigned count, loop;
269 _enter("{%x:%u},%lx-%lx",
270 vnode->fid.vid, vnode->fid.vnode, first, last);
275 _debug("redirty %lx-%lx", first, last);
277 count = last - first + 1;
278 if (count > PAGEVEC_SIZE)
279 count = PAGEVEC_SIZE;
280 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
281 ASSERTCMP(pv.nr, ==, count);
283 for (loop = 0; loop < count; loop++) {
284 struct page *page = pv.pages[loop];
286 redirty_page_for_writepage(wbc, page);
287 end_page_writeback(page);
288 if (page->index >= first)
289 first = page->index + 1;
292 __pagevec_release(&pv);
293 } while (first <= last);
301 static int afs_store_data(struct address_space *mapping,
302 pgoff_t first, pgoff_t last,
303 unsigned offset, unsigned to)
305 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
306 struct afs_fs_cursor fc;
307 struct afs_wb_key *wbk = NULL;
309 int ret = -ENOKEY, ret2;
311 _enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
316 first, last, offset, to);
318 spin_lock(&vnode->wb_lock);
319 p = vnode->wb_keys.next;
321 /* Iterate through the list looking for a valid key to use. */
323 while (p != &vnode->wb_keys) {
324 wbk = list_entry(p, struct afs_wb_key, vnode_link);
325 _debug("wbk %u", key_serial(wbk->key));
326 ret2 = key_validate(wbk->key);
334 spin_unlock(&vnode->wb_lock);
336 _leave(" = %d [no keys]", ret);
340 refcount_inc(&wbk->usage);
341 spin_unlock(&vnode->wb_lock);
343 _debug("USE WB KEY %u", key_serial(wbk->key));
346 if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
347 while (afs_select_fileserver(&fc)) {
348 fc.cb_break = vnode->cb_break + vnode->cb_s_break;
349 afs_fs_store_data(&fc, mapping, first, last, offset, to);
352 afs_check_for_remote_deletion(&fc, fc.vnode);
353 afs_vnode_commit_status(&fc, vnode, fc.cb_break);
354 ret = afs_end_vnode_operation(&fc);
365 spin_lock(&vnode->wb_lock);
366 p = wbk->vnode_link.next;
372 _leave(" = %d", ret);
377 * Synchronously write back the locked page and any subsequent non-locked dirty
380 static int afs_write_back_from_locked_page(struct address_space *mapping,
381 struct writeback_control *wbc,
382 struct page *primary_page,
385 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
386 struct page *pages[8], *page;
387 unsigned long count, priv;
388 unsigned n, offset, to, f, t;
389 pgoff_t start, first, last;
392 _enter(",%lx", primary_page->index);
395 if (test_set_page_writeback(primary_page))
398 /* Find all consecutive lockable dirty pages that have contiguous
399 * written regions, stopping when we find a page that is not
400 * immediately lockable, is not dirty or is missing, or we reach the
403 start = primary_page->index;
404 priv = page_private(primary_page);
405 offset = priv & AFS_PRIV_MAX;
406 to = priv >> AFS_PRIV_SHIFT;
407 trace_afs_page_dirty(vnode, tracepoint_string("store"),
408 primary_page->index, priv);
410 WARN_ON(offset == to);
412 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
413 primary_page->index, priv);
415 if (start >= final_page || to < PAGE_SIZE)
420 _debug("more %lx [%lx]", start, count);
421 n = final_page - start + 1;
422 if (n > ARRAY_SIZE(pages))
423 n = ARRAY_SIZE(pages);
424 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
425 _debug("fgpc %u", n);
428 if (pages[0]->index != start) {
430 put_page(pages[--n]);
435 for (loop = 0; loop < n; loop++) {
439 if (page->index > final_page)
441 if (!trylock_page(page))
443 if (!PageDirty(page) || PageWriteback(page)) {
448 priv = page_private(page);
449 f = priv & AFS_PRIV_MAX;
450 t = priv >> AFS_PRIV_SHIFT;
457 trace_afs_page_dirty(vnode, tracepoint_string("store+"),
460 if (!clear_page_dirty_for_io(page))
462 if (test_set_page_writeback(page))
469 for (; loop < n; loop++)
470 put_page(pages[loop]);
475 } while (start <= final_page && count < 65536);
478 /* We now have a contiguous set of dirty pages, each with writeback
479 * set; the first page is still locked at this point, but all the rest
480 * have been unlocked.
482 unlock_page(primary_page);
484 first = primary_page->index;
485 last = first + count - 1;
487 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
489 ret = afs_store_data(mapping, first, last, offset, to);
496 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
504 afs_redirty_pages(wbc, mapping, first, last);
505 mapping_set_error(mapping, ret);
510 afs_redirty_pages(wbc, mapping, first, last);
511 mapping_set_error(mapping, -ENOSPC);
521 afs_kill_pages(mapping, first, last);
522 mapping_set_error(mapping, ret);
526 _leave(" = %d", ret);
531 * write a page back to the server
532 * - the caller locked the page for us
534 int afs_writepage(struct page *page, struct writeback_control *wbc)
538 _enter("{%lx},", page->index);
540 ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
541 wbc->range_end >> PAGE_SHIFT);
543 _leave(" = %d", ret);
547 wbc->nr_to_write -= ret;
554 * write a region of pages back to the server
556 static int afs_writepages_region(struct address_space *mapping,
557 struct writeback_control *wbc,
558 pgoff_t index, pgoff_t end, pgoff_t *_next)
563 _enter(",,%lx,%lx,", index, end);
566 n = find_get_pages_range_tag(mapping, &index, end,
567 PAGECACHE_TAG_DIRTY, 1, &page);
571 _debug("wback %lx", page->index);
573 /* at this point we hold neither mapping->tree_lock nor lock on
574 * the page itself: the page may be truncated or invalidated
575 * (changing page->mapping to NULL), or even swizzled back from
576 * swapper_space to tmpfs file mapping
578 ret = lock_page_killable(page);
581 _leave(" = %d", ret);
585 if (page->mapping != mapping || !PageDirty(page)) {
591 if (PageWriteback(page)) {
593 if (wbc->sync_mode != WB_SYNC_NONE)
594 wait_on_page_writeback(page);
599 if (!clear_page_dirty_for_io(page))
601 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
604 _leave(" = %d", ret);
608 wbc->nr_to_write -= ret;
611 } while (index < end && wbc->nr_to_write > 0);
614 _leave(" = 0 [%lx]", *_next);
619 * write some of the pending data back to the server
621 int afs_writepages(struct address_space *mapping,
622 struct writeback_control *wbc)
624 pgoff_t start, end, next;
629 if (wbc->range_cyclic) {
630 start = mapping->writeback_index;
632 ret = afs_writepages_region(mapping, wbc, start, end, &next);
633 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
634 ret = afs_writepages_region(mapping, wbc, 0, start,
636 mapping->writeback_index = next;
637 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
638 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
639 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
640 if (wbc->nr_to_write > 0)
641 mapping->writeback_index = next;
643 start = wbc->range_start >> PAGE_SHIFT;
644 end = wbc->range_end >> PAGE_SHIFT;
645 ret = afs_writepages_region(mapping, wbc, start, end, &next);
648 _leave(" = %d", ret);
653 * completion of write to server
655 void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
659 unsigned count, loop;
660 pgoff_t first = call->first, last = call->last;
662 _enter("{%x:%u},{%lx-%lx}",
663 vnode->fid.vid, vnode->fid.vnode, first, last);
668 _debug("done %lx-%lx", first, last);
670 count = last - first + 1;
671 if (count > PAGEVEC_SIZE)
672 count = PAGEVEC_SIZE;
673 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
674 first, count, pv.pages);
675 ASSERTCMP(pv.nr, ==, count);
677 for (loop = 0; loop < count; loop++) {
678 priv = page_private(pv.pages[loop]);
679 trace_afs_page_dirty(vnode, tracepoint_string("clear"),
680 pv.pages[loop]->index, priv);
681 set_page_private(pv.pages[loop], 0);
682 end_page_writeback(pv.pages[loop]);
685 __pagevec_release(&pv);
686 } while (first <= last);
688 afs_prune_wb_keys(vnode);
693 * write to an AFS file
695 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
697 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
699 size_t count = iov_iter_count(from);
701 _enter("{%x.%u},{%zu},",
702 vnode->fid.vid, vnode->fid.vnode, count);
704 if (IS_SWAPFILE(&vnode->vfs_inode)) {
706 "AFS: Attempt to write to active swap file!\n");
713 result = generic_file_write_iter(iocb, from);
715 _leave(" = %zd", result);
720 * flush any dirty pages for this process, and check for write errors.
721 * - the return status from this call provides a reliable indication of
722 * whether any write errors occurred for this process.
724 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
726 struct inode *inode = file_inode(file);
727 struct afs_vnode *vnode = AFS_FS_I(inode);
729 _enter("{%x:%u},{n=%pD},%d",
730 vnode->fid.vid, vnode->fid.vnode, file,
733 return file_write_and_wait_range(file, start, end);
737 * Flush out all outstanding writes on a file opened for writing when it is
740 int afs_flush(struct file *file, fl_owner_t id)
744 if ((file->f_mode & FMODE_WRITE) == 0)
747 return vfs_fsync(file, 0);
751 * notification that a previously read-only page is about to become writable
752 * - if it returns an error, the caller will deliver a bus error signal
754 int afs_page_mkwrite(struct vm_fault *vmf)
756 struct file *file = vmf->vma->vm_file;
757 struct inode *inode = file_inode(file);
758 struct afs_vnode *vnode = AFS_FS_I(inode);
761 _enter("{{%x:%u}},{%lx}",
762 vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
764 sb_start_pagefault(inode->i_sb);
766 /* Wait for the page to be written to the cache before we allow it to
767 * be modified. We then assume the entire page will need writing back.
769 #ifdef CONFIG_AFS_FSCACHE
770 fscache_wait_on_page_write(vnode->cache, vmf->page);
773 if (PageWriteback(vmf->page) &&
774 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
775 return VM_FAULT_RETRY;
777 if (lock_page_killable(vmf->page) < 0)
778 return VM_FAULT_RETRY;
780 /* We mustn't change page->private until writeback is complete as that
781 * details the portion of the page we need to write back and we might
782 * need to redirty the page if there's a problem.
784 wait_on_page_writeback(vmf->page);
786 priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
787 priv |= 0; /* From */
788 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
789 vmf->page->index, priv);
790 SetPagePrivate(vmf->page);
791 set_page_private(vmf->page, priv);
793 sb_end_pagefault(inode->i_sb);
794 return VM_FAULT_LOCKED;
798 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
800 void afs_prune_wb_keys(struct afs_vnode *vnode)
802 LIST_HEAD(graveyard);
803 struct afs_wb_key *wbk, *tmp;
805 /* Discard unused keys */
806 spin_lock(&vnode->wb_lock);
808 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
809 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
810 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
811 if (refcount_read(&wbk->usage) == 1)
812 list_move(&wbk->vnode_link, &graveyard);
816 spin_unlock(&vnode->wb_lock);
818 while (!list_empty(&graveyard)) {
819 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
820 list_del(&wbk->vnode_link);
826 * Clean up a page during invalidation.
828 int afs_launder_page(struct page *page)
830 struct address_space *mapping = page->mapping;
831 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
836 _enter("{%lx}", page->index);
838 priv = page_private(page);
839 if (clear_page_dirty_for_io(page)) {
842 if (PagePrivate(page)) {
843 f = priv & AFS_PRIV_MAX;
844 t = priv >> AFS_PRIV_SHIFT;
847 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
849 ret = afs_store_data(mapping, page->index, page->index, t, f);
852 trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
854 set_page_private(page, 0);
855 ClearPagePrivate(page);
857 #ifdef CONFIG_AFS_FSCACHE
858 if (PageFsCache(page)) {
859 fscache_wait_on_page_write(vnode->cache, page);
860 fscache_uncache_page(vnode->cache, page);