Merge branch 'old.dcache' into work.dcache
[linux-2.6-block.git] / fs / afs / write.c
1 /* handling of writes to regular files and writing back to the server
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/backing-dev.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/writeback.h>
17 #include <linux/pagevec.h>
18 #include "internal.h"
19
20 /*
21  * mark a page as having been made dirty and thus needing writeback
22  */
23 int afs_set_page_dirty(struct page *page)
24 {
25         _enter("");
26         return __set_page_dirty_nobuffers(page);
27 }
28
29 /*
30  * partly or wholly fill a page that's under preparation for writing
31  */
32 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
33                          loff_t pos, unsigned int len, struct page *page)
34 {
35         struct afs_read *req;
36         int ret;
37
38         _enter(",,%llu", (unsigned long long)pos);
39
40         req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
41                       GFP_KERNEL);
42         if (!req)
43                 return -ENOMEM;
44
45         atomic_set(&req->usage, 1);
46         req->pos = pos;
47         req->len = len;
48         req->nr_pages = 1;
49         req->pages[0] = page;
50         get_page(page);
51
52         ret = afs_fetch_data(vnode, key, req);
53         afs_put_read(req);
54         if (ret < 0) {
55                 if (ret == -ENOENT) {
56                         _debug("got NOENT from server"
57                                " - marking file deleted and stale");
58                         set_bit(AFS_VNODE_DELETED, &vnode->flags);
59                         ret = -ESTALE;
60                 }
61         }
62
63         _leave(" = %d", ret);
64         return ret;
65 }
66
67 /*
68  * prepare to perform part of a write to a page
69  */
70 int afs_write_begin(struct file *file, struct address_space *mapping,
71                     loff_t pos, unsigned len, unsigned flags,
72                     struct page **pagep, void **fsdata)
73 {
74         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
75         struct page *page;
76         struct key *key = afs_file_key(file);
77         unsigned long priv;
78         unsigned f, from = pos & (PAGE_SIZE - 1);
79         unsigned t, to = from + len;
80         pgoff_t index = pos >> PAGE_SHIFT;
81         int ret;
82
83         _enter("{%x:%u},{%lx},%u,%u",
84                vnode->fid.vid, vnode->fid.vnode, index, from, to);
85
86         /* We want to store information about how much of a page is altered in
87          * page->private.
88          */
89         BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
90
91         page = grab_cache_page_write_begin(mapping, index, flags);
92         if (!page)
93                 return -ENOMEM;
94
95         if (!PageUptodate(page) && len != PAGE_SIZE) {
96                 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
97                 if (ret < 0) {
98                         unlock_page(page);
99                         put_page(page);
100                         _leave(" = %d [prep]", ret);
101                         return ret;
102                 }
103                 SetPageUptodate(page);
104         }
105
106         /* page won't leak in error case: it eventually gets cleaned off LRU */
107         *pagep = page;
108
109 try_again:
110         /* See if this page is already partially written in a way that we can
111          * merge the new write with.
112          */
113         t = f = 0;
114         if (PagePrivate(page)) {
115                 priv = page_private(page);
116                 f = priv & AFS_PRIV_MAX;
117                 t = priv >> AFS_PRIV_SHIFT;
118                 ASSERTCMP(f, <=, t);
119         }
120
121         if (f != t) {
122                 if (PageWriteback(page)) {
123                         trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
124                                              page->index, priv);
125                         goto flush_conflicting_write;
126                 }
127                 if (to < f || from > t)
128                         goto flush_conflicting_write;
129                 if (from < f)
130                         f = from;
131                 if (to > t)
132                         t = to;
133         } else {
134                 f = from;
135                 t = to;
136         }
137
138         priv = (unsigned long)t << AFS_PRIV_SHIFT;
139         priv |= f;
140         trace_afs_page_dirty(vnode, tracepoint_string("begin"),
141                              page->index, priv);
142         SetPagePrivate(page);
143         set_page_private(page, priv);
144         _leave(" = 0");
145         return 0;
146
147         /* The previous write and this write aren't adjacent or overlapping, so
148          * flush the page out.
149          */
150 flush_conflicting_write:
151         _debug("flush conflict");
152         ret = write_one_page(page);
153         if (ret < 0) {
154                 _leave(" = %d", ret);
155                 return ret;
156         }
157
158         ret = lock_page_killable(page);
159         if (ret < 0) {
160                 _leave(" = %d", ret);
161                 return ret;
162         }
163         goto try_again;
164 }
165
166 /*
167  * finalise part of a write to a page
168  */
169 int afs_write_end(struct file *file, struct address_space *mapping,
170                   loff_t pos, unsigned len, unsigned copied,
171                   struct page *page, void *fsdata)
172 {
173         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
174         struct key *key = afs_file_key(file);
175         loff_t i_size, maybe_i_size;
176         int ret;
177
178         _enter("{%x:%u},{%lx}",
179                vnode->fid.vid, vnode->fid.vnode, page->index);
180
181         maybe_i_size = pos + copied;
182
183         i_size = i_size_read(&vnode->vfs_inode);
184         if (maybe_i_size > i_size) {
185                 spin_lock(&vnode->wb_lock);
186                 i_size = i_size_read(&vnode->vfs_inode);
187                 if (maybe_i_size > i_size)
188                         i_size_write(&vnode->vfs_inode, maybe_i_size);
189                 spin_unlock(&vnode->wb_lock);
190         }
191
192         if (!PageUptodate(page)) {
193                 if (copied < len) {
194                         /* Try and load any missing data from the server.  The
195                          * unmarshalling routine will take care of clearing any
196                          * bits that are beyond the EOF.
197                          */
198                         ret = afs_fill_page(vnode, key, pos + copied,
199                                             len - copied, page);
200                         if (ret < 0)
201                                 goto out;
202                 }
203                 SetPageUptodate(page);
204         }
205
206         set_page_dirty(page);
207         if (PageDirty(page))
208                 _debug("dirtied");
209         ret = copied;
210
211 out:
212         unlock_page(page);
213         put_page(page);
214         return ret;
215 }
216
217 /*
218  * kill all the pages in the given range
219  */
220 static void afs_kill_pages(struct address_space *mapping,
221                            pgoff_t first, pgoff_t last)
222 {
223         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
224         struct pagevec pv;
225         unsigned count, loop;
226
227         _enter("{%x:%u},%lx-%lx",
228                vnode->fid.vid, vnode->fid.vnode, first, last);
229
230         pagevec_init(&pv);
231
232         do {
233                 _debug("kill %lx-%lx", first, last);
234
235                 count = last - first + 1;
236                 if (count > PAGEVEC_SIZE)
237                         count = PAGEVEC_SIZE;
238                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
239                 ASSERTCMP(pv.nr, ==, count);
240
241                 for (loop = 0; loop < count; loop++) {
242                         struct page *page = pv.pages[loop];
243                         ClearPageUptodate(page);
244                         SetPageError(page);
245                         end_page_writeback(page);
246                         if (page->index >= first)
247                                 first = page->index + 1;
248                         lock_page(page);
249                         generic_error_remove_page(mapping, page);
250                 }
251
252                 __pagevec_release(&pv);
253         } while (first <= last);
254
255         _leave("");
256 }
257
258 /*
259  * Redirty all the pages in a given range.
260  */
261 static void afs_redirty_pages(struct writeback_control *wbc,
262                               struct address_space *mapping,
263                               pgoff_t first, pgoff_t last)
264 {
265         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
266         struct pagevec pv;
267         unsigned count, loop;
268
269         _enter("{%x:%u},%lx-%lx",
270                vnode->fid.vid, vnode->fid.vnode, first, last);
271
272         pagevec_init(&pv);
273
274         do {
275                 _debug("redirty %lx-%lx", first, last);
276
277                 count = last - first + 1;
278                 if (count > PAGEVEC_SIZE)
279                         count = PAGEVEC_SIZE;
280                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
281                 ASSERTCMP(pv.nr, ==, count);
282
283                 for (loop = 0; loop < count; loop++) {
284                         struct page *page = pv.pages[loop];
285
286                         redirty_page_for_writepage(wbc, page);
287                         end_page_writeback(page);
288                         if (page->index >= first)
289                                 first = page->index + 1;
290                 }
291
292                 __pagevec_release(&pv);
293         } while (first <= last);
294
295         _leave("");
296 }
297
298 /*
299  * write to a file
300  */
301 static int afs_store_data(struct address_space *mapping,
302                           pgoff_t first, pgoff_t last,
303                           unsigned offset, unsigned to)
304 {
305         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
306         struct afs_fs_cursor fc;
307         struct afs_wb_key *wbk = NULL;
308         struct list_head *p;
309         int ret = -ENOKEY, ret2;
310
311         _enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
312                vnode->volume->name,
313                vnode->fid.vid,
314                vnode->fid.vnode,
315                vnode->fid.unique,
316                first, last, offset, to);
317
318         spin_lock(&vnode->wb_lock);
319         p = vnode->wb_keys.next;
320
321         /* Iterate through the list looking for a valid key to use. */
322 try_next_key:
323         while (p != &vnode->wb_keys) {
324                 wbk = list_entry(p, struct afs_wb_key, vnode_link);
325                 _debug("wbk %u", key_serial(wbk->key));
326                 ret2 = key_validate(wbk->key);
327                 if (ret2 == 0)
328                         goto found_key;
329                 if (ret == -ENOKEY)
330                         ret = ret2;
331                 p = p->next;
332         }
333
334         spin_unlock(&vnode->wb_lock);
335         afs_put_wb_key(wbk);
336         _leave(" = %d [no keys]", ret);
337         return ret;
338
339 found_key:
340         refcount_inc(&wbk->usage);
341         spin_unlock(&vnode->wb_lock);
342
343         _debug("USE WB KEY %u", key_serial(wbk->key));
344
345         ret = -ERESTARTSYS;
346         if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
347                 while (afs_select_fileserver(&fc)) {
348                         fc.cb_break = vnode->cb_break + vnode->cb_s_break;
349                         afs_fs_store_data(&fc, mapping, first, last, offset, to);
350                 }
351
352                 afs_check_for_remote_deletion(&fc, fc.vnode);
353                 afs_vnode_commit_status(&fc, vnode, fc.cb_break);
354                 ret = afs_end_vnode_operation(&fc);
355         }
356
357         switch (ret) {
358         case -EACCES:
359         case -EPERM:
360         case -ENOKEY:
361         case -EKEYEXPIRED:
362         case -EKEYREJECTED:
363         case -EKEYREVOKED:
364                 _debug("next");
365                 spin_lock(&vnode->wb_lock);
366                 p = wbk->vnode_link.next;
367                 afs_put_wb_key(wbk);
368                 goto try_next_key;
369         }
370
371         afs_put_wb_key(wbk);
372         _leave(" = %d", ret);
373         return ret;
374 }
375
376 /*
377  * Synchronously write back the locked page and any subsequent non-locked dirty
378  * pages.
379  */
380 static int afs_write_back_from_locked_page(struct address_space *mapping,
381                                            struct writeback_control *wbc,
382                                            struct page *primary_page,
383                                            pgoff_t final_page)
384 {
385         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
386         struct page *pages[8], *page;
387         unsigned long count, priv;
388         unsigned n, offset, to, f, t;
389         pgoff_t start, first, last;
390         int loop, ret;
391
392         _enter(",%lx", primary_page->index);
393
394         count = 1;
395         if (test_set_page_writeback(primary_page))
396                 BUG();
397
398         /* Find all consecutive lockable dirty pages that have contiguous
399          * written regions, stopping when we find a page that is not
400          * immediately lockable, is not dirty or is missing, or we reach the
401          * end of the range.
402          */
403         start = primary_page->index;
404         priv = page_private(primary_page);
405         offset = priv & AFS_PRIV_MAX;
406         to = priv >> AFS_PRIV_SHIFT;
407         trace_afs_page_dirty(vnode, tracepoint_string("store"),
408                              primary_page->index, priv);
409
410         WARN_ON(offset == to);
411         if (offset == to)
412                 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
413                                      primary_page->index, priv);
414
415         if (start >= final_page || to < PAGE_SIZE)
416                 goto no_more;
417
418         start++;
419         do {
420                 _debug("more %lx [%lx]", start, count);
421                 n = final_page - start + 1;
422                 if (n > ARRAY_SIZE(pages))
423                         n = ARRAY_SIZE(pages);
424                 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
425                 _debug("fgpc %u", n);
426                 if (n == 0)
427                         goto no_more;
428                 if (pages[0]->index != start) {
429                         do {
430                                 put_page(pages[--n]);
431                         } while (n > 0);
432                         goto no_more;
433                 }
434
435                 for (loop = 0; loop < n; loop++) {
436                         if (to != PAGE_SIZE)
437                                 break;
438                         page = pages[loop];
439                         if (page->index > final_page)
440                                 break;
441                         if (!trylock_page(page))
442                                 break;
443                         if (!PageDirty(page) || PageWriteback(page)) {
444                                 unlock_page(page);
445                                 break;
446                         }
447
448                         priv = page_private(page);
449                         f = priv & AFS_PRIV_MAX;
450                         t = priv >> AFS_PRIV_SHIFT;
451                         if (f != 0) {
452                                 unlock_page(page);
453                                 break;
454                         }
455                         to = t;
456
457                         trace_afs_page_dirty(vnode, tracepoint_string("store+"),
458                                              page->index, priv);
459
460                         if (!clear_page_dirty_for_io(page))
461                                 BUG();
462                         if (test_set_page_writeback(page))
463                                 BUG();
464                         unlock_page(page);
465                         put_page(page);
466                 }
467                 count += loop;
468                 if (loop < n) {
469                         for (; loop < n; loop++)
470                                 put_page(pages[loop]);
471                         goto no_more;
472                 }
473
474                 start += loop;
475         } while (start <= final_page && count < 65536);
476
477 no_more:
478         /* We now have a contiguous set of dirty pages, each with writeback
479          * set; the first page is still locked at this point, but all the rest
480          * have been unlocked.
481          */
482         unlock_page(primary_page);
483
484         first = primary_page->index;
485         last = first + count - 1;
486
487         _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
488
489         ret = afs_store_data(mapping, first, last, offset, to);
490         switch (ret) {
491         case 0:
492                 ret = count;
493                 break;
494
495         default:
496                 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
497                 /* Fall through */
498         case -EACCES:
499         case -EPERM:
500         case -ENOKEY:
501         case -EKEYEXPIRED:
502         case -EKEYREJECTED:
503         case -EKEYREVOKED:
504                 afs_redirty_pages(wbc, mapping, first, last);
505                 mapping_set_error(mapping, ret);
506                 break;
507
508         case -EDQUOT:
509         case -ENOSPC:
510                 afs_redirty_pages(wbc, mapping, first, last);
511                 mapping_set_error(mapping, -ENOSPC);
512                 break;
513
514         case -EROFS:
515         case -EIO:
516         case -EREMOTEIO:
517         case -EFBIG:
518         case -ENOENT:
519         case -ENOMEDIUM:
520         case -ENXIO:
521                 afs_kill_pages(mapping, first, last);
522                 mapping_set_error(mapping, ret);
523                 break;
524         }
525
526         _leave(" = %d", ret);
527         return ret;
528 }
529
530 /*
531  * write a page back to the server
532  * - the caller locked the page for us
533  */
534 int afs_writepage(struct page *page, struct writeback_control *wbc)
535 {
536         int ret;
537
538         _enter("{%lx},", page->index);
539
540         ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
541                                               wbc->range_end >> PAGE_SHIFT);
542         if (ret < 0) {
543                 _leave(" = %d", ret);
544                 return 0;
545         }
546
547         wbc->nr_to_write -= ret;
548
549         _leave(" = 0");
550         return 0;
551 }
552
553 /*
554  * write a region of pages back to the server
555  */
556 static int afs_writepages_region(struct address_space *mapping,
557                                  struct writeback_control *wbc,
558                                  pgoff_t index, pgoff_t end, pgoff_t *_next)
559 {
560         struct page *page;
561         int ret, n;
562
563         _enter(",,%lx,%lx,", index, end);
564
565         do {
566                 n = find_get_pages_range_tag(mapping, &index, end,
567                                         PAGECACHE_TAG_DIRTY, 1, &page);
568                 if (!n)
569                         break;
570
571                 _debug("wback %lx", page->index);
572
573                 /* at this point we hold neither mapping->tree_lock nor lock on
574                  * the page itself: the page may be truncated or invalidated
575                  * (changing page->mapping to NULL), or even swizzled back from
576                  * swapper_space to tmpfs file mapping
577                  */
578                 ret = lock_page_killable(page);
579                 if (ret < 0) {
580                         put_page(page);
581                         _leave(" = %d", ret);
582                         return ret;
583                 }
584
585                 if (page->mapping != mapping || !PageDirty(page)) {
586                         unlock_page(page);
587                         put_page(page);
588                         continue;
589                 }
590
591                 if (PageWriteback(page)) {
592                         unlock_page(page);
593                         if (wbc->sync_mode != WB_SYNC_NONE)
594                                 wait_on_page_writeback(page);
595                         put_page(page);
596                         continue;
597                 }
598
599                 if (!clear_page_dirty_for_io(page))
600                         BUG();
601                 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
602                 put_page(page);
603                 if (ret < 0) {
604                         _leave(" = %d", ret);
605                         return ret;
606                 }
607
608                 wbc->nr_to_write -= ret;
609
610                 cond_resched();
611         } while (index < end && wbc->nr_to_write > 0);
612
613         *_next = index;
614         _leave(" = 0 [%lx]", *_next);
615         return 0;
616 }
617
618 /*
619  * write some of the pending data back to the server
620  */
621 int afs_writepages(struct address_space *mapping,
622                    struct writeback_control *wbc)
623 {
624         pgoff_t start, end, next;
625         int ret;
626
627         _enter("");
628
629         if (wbc->range_cyclic) {
630                 start = mapping->writeback_index;
631                 end = -1;
632                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
633                 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
634                         ret = afs_writepages_region(mapping, wbc, 0, start,
635                                                     &next);
636                 mapping->writeback_index = next;
637         } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
638                 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
639                 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
640                 if (wbc->nr_to_write > 0)
641                         mapping->writeback_index = next;
642         } else {
643                 start = wbc->range_start >> PAGE_SHIFT;
644                 end = wbc->range_end >> PAGE_SHIFT;
645                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
646         }
647
648         _leave(" = %d", ret);
649         return ret;
650 }
651
652 /*
653  * completion of write to server
654  */
655 void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
656 {
657         struct pagevec pv;
658         unsigned long priv;
659         unsigned count, loop;
660         pgoff_t first = call->first, last = call->last;
661
662         _enter("{%x:%u},{%lx-%lx}",
663                vnode->fid.vid, vnode->fid.vnode, first, last);
664
665         pagevec_init(&pv);
666
667         do {
668                 _debug("done %lx-%lx", first, last);
669
670                 count = last - first + 1;
671                 if (count > PAGEVEC_SIZE)
672                         count = PAGEVEC_SIZE;
673                 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
674                                               first, count, pv.pages);
675                 ASSERTCMP(pv.nr, ==, count);
676
677                 for (loop = 0; loop < count; loop++) {
678                         priv = page_private(pv.pages[loop]);
679                         trace_afs_page_dirty(vnode, tracepoint_string("clear"),
680                                              pv.pages[loop]->index, priv);
681                         set_page_private(pv.pages[loop], 0);
682                         end_page_writeback(pv.pages[loop]);
683                 }
684                 first += count;
685                 __pagevec_release(&pv);
686         } while (first <= last);
687
688         afs_prune_wb_keys(vnode);
689         _leave("");
690 }
691
692 /*
693  * write to an AFS file
694  */
695 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
696 {
697         struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
698         ssize_t result;
699         size_t count = iov_iter_count(from);
700
701         _enter("{%x.%u},{%zu},",
702                vnode->fid.vid, vnode->fid.vnode, count);
703
704         if (IS_SWAPFILE(&vnode->vfs_inode)) {
705                 printk(KERN_INFO
706                        "AFS: Attempt to write to active swap file!\n");
707                 return -EBUSY;
708         }
709
710         if (!count)
711                 return 0;
712
713         result = generic_file_write_iter(iocb, from);
714
715         _leave(" = %zd", result);
716         return result;
717 }
718
719 /*
720  * flush any dirty pages for this process, and check for write errors.
721  * - the return status from this call provides a reliable indication of
722  *   whether any write errors occurred for this process.
723  */
724 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
725 {
726         struct inode *inode = file_inode(file);
727         struct afs_vnode *vnode = AFS_FS_I(inode);
728
729         _enter("{%x:%u},{n=%pD},%d",
730                vnode->fid.vid, vnode->fid.vnode, file,
731                datasync);
732
733         return file_write_and_wait_range(file, start, end);
734 }
735
736 /*
737  * Flush out all outstanding writes on a file opened for writing when it is
738  * closed.
739  */
740 int afs_flush(struct file *file, fl_owner_t id)
741 {
742         _enter("");
743
744         if ((file->f_mode & FMODE_WRITE) == 0)
745                 return 0;
746
747         return vfs_fsync(file, 0);
748 }
749
750 /*
751  * notification that a previously read-only page is about to become writable
752  * - if it returns an error, the caller will deliver a bus error signal
753  */
754 int afs_page_mkwrite(struct vm_fault *vmf)
755 {
756         struct file *file = vmf->vma->vm_file;
757         struct inode *inode = file_inode(file);
758         struct afs_vnode *vnode = AFS_FS_I(inode);
759         unsigned long priv;
760
761         _enter("{{%x:%u}},{%lx}",
762                vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
763
764         sb_start_pagefault(inode->i_sb);
765
766         /* Wait for the page to be written to the cache before we allow it to
767          * be modified.  We then assume the entire page will need writing back.
768          */
769 #ifdef CONFIG_AFS_FSCACHE
770         fscache_wait_on_page_write(vnode->cache, vmf->page);
771 #endif
772
773         if (PageWriteback(vmf->page) &&
774             wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
775                 return VM_FAULT_RETRY;
776
777         if (lock_page_killable(vmf->page) < 0)
778                 return VM_FAULT_RETRY;
779
780         /* We mustn't change page->private until writeback is complete as that
781          * details the portion of the page we need to write back and we might
782          * need to redirty the page if there's a problem.
783          */
784         wait_on_page_writeback(vmf->page);
785
786         priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
787         priv |= 0; /* From */
788         trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
789                              vmf->page->index, priv);
790         SetPagePrivate(vmf->page);
791         set_page_private(vmf->page, priv);
792
793         sb_end_pagefault(inode->i_sb);
794         return VM_FAULT_LOCKED;
795 }
796
797 /*
798  * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
799  */
800 void afs_prune_wb_keys(struct afs_vnode *vnode)
801 {
802         LIST_HEAD(graveyard);
803         struct afs_wb_key *wbk, *tmp;
804
805         /* Discard unused keys */
806         spin_lock(&vnode->wb_lock);
807
808         if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
809             !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
810                 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
811                         if (refcount_read(&wbk->usage) == 1)
812                                 list_move(&wbk->vnode_link, &graveyard);
813                 }
814         }
815
816         spin_unlock(&vnode->wb_lock);
817
818         while (!list_empty(&graveyard)) {
819                 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
820                 list_del(&wbk->vnode_link);
821                 afs_put_wb_key(wbk);
822         }
823 }
824
825 /*
826  * Clean up a page during invalidation.
827  */
828 int afs_launder_page(struct page *page)
829 {
830         struct address_space *mapping = page->mapping;
831         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
832         unsigned long priv;
833         unsigned int f, t;
834         int ret = 0;
835
836         _enter("{%lx}", page->index);
837
838         priv = page_private(page);
839         if (clear_page_dirty_for_io(page)) {
840                 f = 0;
841                 t = PAGE_SIZE;
842                 if (PagePrivate(page)) {
843                         f = priv & AFS_PRIV_MAX;
844                         t = priv >> AFS_PRIV_SHIFT;
845                 }
846
847                 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
848                                      page->index, priv);
849                 ret = afs_store_data(mapping, page->index, page->index, t, f);
850         }
851
852         trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
853                              page->index, priv);
854         set_page_private(page, 0);
855         ClearPagePrivate(page);
856
857 #ifdef CONFIG_AFS_FSCACHE
858         if (PageFsCache(page)) {
859                 fscache_wait_on_page_write(vnode->cache, page);
860                 fscache_uncache_page(vnode->cache, page);
861         }
862 #endif
863         return ret;
864 }