Merge tag 'rtc-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[linux-block.git] / fs / afs / write.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* handling of writes to regular files and writing back to the server
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7
8 #include <linux/backing-dev.h>
9 #include <linux/slab.h>
10 #include <linux/fs.h>
11 #include <linux/pagemap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include <linux/netfs.h>
15 #include "internal.h"
16
17 static int afs_writepages_region(struct address_space *mapping,
18                                  struct writeback_control *wbc,
19                                  loff_t start, loff_t end, loff_t *_next,
20                                  bool max_one_loop);
21
22 static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len,
23                                loff_t i_size, bool caching);
24
25 #ifdef CONFIG_AFS_FSCACHE
26 /*
27  * Mark a page as having been made dirty and thus needing writeback.  We also
28  * need to pin the cache object to write back to.
29  */
30 bool afs_dirty_folio(struct address_space *mapping, struct folio *folio)
31 {
32         return fscache_dirty_folio(mapping, folio,
33                                 afs_vnode_cache(AFS_FS_I(mapping->host)));
34 }
35 static void afs_folio_start_fscache(bool caching, struct folio *folio)
36 {
37         if (caching)
38                 folio_start_fscache(folio);
39 }
40 #else
41 static void afs_folio_start_fscache(bool caching, struct folio *folio)
42 {
43 }
44 #endif
45
46 /*
47  * Flush out a conflicting write.  This may extend the write to the surrounding
48  * pages if also dirty and contiguous to the conflicting region..
49  */
50 static int afs_flush_conflicting_write(struct address_space *mapping,
51                                        struct folio *folio)
52 {
53         struct writeback_control wbc = {
54                 .sync_mode      = WB_SYNC_ALL,
55                 .nr_to_write    = LONG_MAX,
56                 .range_start    = folio_pos(folio),
57                 .range_end      = LLONG_MAX,
58         };
59         loff_t next;
60
61         return afs_writepages_region(mapping, &wbc, folio_pos(folio), LLONG_MAX,
62                                      &next, true);
63 }
64
65 /*
66  * prepare to perform part of a write to a page
67  */
68 int afs_write_begin(struct file *file, struct address_space *mapping,
69                     loff_t pos, unsigned len,
70                     struct page **_page, void **fsdata)
71 {
72         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
73         struct folio *folio;
74         unsigned long priv;
75         unsigned f, from;
76         unsigned t, to;
77         pgoff_t index;
78         int ret;
79
80         _enter("{%llx:%llu},%llx,%x",
81                vnode->fid.vid, vnode->fid.vnode, pos, len);
82
83         /* Prefetch area to be written into the cache if we're caching this
84          * file.  We need to do this before we get a lock on the page in case
85          * there's more than one writer competing for the same cache block.
86          */
87         ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata);
88         if (ret < 0)
89                 return ret;
90
91         index = folio_index(folio);
92         from = pos - index * PAGE_SIZE;
93         to = from + len;
94
95 try_again:
96         /* See if this page is already partially written in a way that we can
97          * merge the new write with.
98          */
99         if (folio_test_private(folio)) {
100                 priv = (unsigned long)folio_get_private(folio);
101                 f = afs_folio_dirty_from(folio, priv);
102                 t = afs_folio_dirty_to(folio, priv);
103                 ASSERTCMP(f, <=, t);
104
105                 if (folio_test_writeback(folio)) {
106                         trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio);
107                         folio_unlock(folio);
108                         goto wait_for_writeback;
109                 }
110                 /* If the file is being filled locally, allow inter-write
111                  * spaces to be merged into writes.  If it's not, only write
112                  * back what the user gives us.
113                  */
114                 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
115                     (to < f || from > t))
116                         goto flush_conflicting_write;
117         }
118
119         *_page = folio_file_page(folio, pos / PAGE_SIZE);
120         _leave(" = 0");
121         return 0;
122
123         /* The previous write and this write aren't adjacent or overlapping, so
124          * flush the page out.
125          */
126 flush_conflicting_write:
127         trace_afs_folio_dirty(vnode, tracepoint_string("confl"), folio);
128         folio_unlock(folio);
129
130         ret = afs_flush_conflicting_write(mapping, folio);
131         if (ret < 0)
132                 goto error;
133
134 wait_for_writeback:
135         ret = folio_wait_writeback_killable(folio);
136         if (ret < 0)
137                 goto error;
138
139         ret = folio_lock_killable(folio);
140         if (ret < 0)
141                 goto error;
142         goto try_again;
143
144 error:
145         folio_put(folio);
146         _leave(" = %d", ret);
147         return ret;
148 }
149
150 /*
151  * finalise part of a write to a page
152  */
153 int afs_write_end(struct file *file, struct address_space *mapping,
154                   loff_t pos, unsigned len, unsigned copied,
155                   struct page *subpage, void *fsdata)
156 {
157         struct folio *folio = page_folio(subpage);
158         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
159         unsigned long priv;
160         unsigned int f, from = offset_in_folio(folio, pos);
161         unsigned int t, to = from + copied;
162         loff_t i_size, write_end_pos;
163
164         _enter("{%llx:%llu},{%lx}",
165                vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
166
167         if (!folio_test_uptodate(folio)) {
168                 if (copied < len) {
169                         copied = 0;
170                         goto out;
171                 }
172
173                 folio_mark_uptodate(folio);
174         }
175
176         if (copied == 0)
177                 goto out;
178
179         write_end_pos = pos + copied;
180
181         i_size = i_size_read(&vnode->netfs.inode);
182         if (write_end_pos > i_size) {
183                 write_seqlock(&vnode->cb_lock);
184                 i_size = i_size_read(&vnode->netfs.inode);
185                 if (write_end_pos > i_size)
186                         afs_set_i_size(vnode, write_end_pos);
187                 write_sequnlock(&vnode->cb_lock);
188                 fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos);
189         }
190
191         if (folio_test_private(folio)) {
192                 priv = (unsigned long)folio_get_private(folio);
193                 f = afs_folio_dirty_from(folio, priv);
194                 t = afs_folio_dirty_to(folio, priv);
195                 if (from < f)
196                         f = from;
197                 if (to > t)
198                         t = to;
199                 priv = afs_folio_dirty(folio, f, t);
200                 folio_change_private(folio, (void *)priv);
201                 trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio);
202         } else {
203                 priv = afs_folio_dirty(folio, from, to);
204                 folio_attach_private(folio, (void *)priv);
205                 trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio);
206         }
207
208         if (folio_mark_dirty(folio))
209                 _debug("dirtied %lx", folio_index(folio));
210
211 out:
212         folio_unlock(folio);
213         folio_put(folio);
214         return copied;
215 }
216
217 /*
218  * kill all the pages in the given range
219  */
220 static void afs_kill_pages(struct address_space *mapping,
221                            loff_t start, loff_t len)
222 {
223         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
224         struct folio *folio;
225         pgoff_t index = start / PAGE_SIZE;
226         pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
227
228         _enter("{%llx:%llu},%llx @%llx",
229                vnode->fid.vid, vnode->fid.vnode, len, start);
230
231         do {
232                 _debug("kill %lx (to %lx)", index, last);
233
234                 folio = filemap_get_folio(mapping, index);
235                 if (IS_ERR(folio)) {
236                         next = index + 1;
237                         continue;
238                 }
239
240                 next = folio_next_index(folio);
241
242                 folio_clear_uptodate(folio);
243                 folio_end_writeback(folio);
244                 folio_lock(folio);
245                 generic_error_remove_folio(mapping, folio);
246                 folio_unlock(folio);
247                 folio_put(folio);
248
249         } while (index = next, index <= last);
250
251         _leave("");
252 }
253
254 /*
255  * Redirty all the pages in a given range.
256  */
257 static void afs_redirty_pages(struct writeback_control *wbc,
258                               struct address_space *mapping,
259                               loff_t start, loff_t len)
260 {
261         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
262         struct folio *folio;
263         pgoff_t index = start / PAGE_SIZE;
264         pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
265
266         _enter("{%llx:%llu},%llx @%llx",
267                vnode->fid.vid, vnode->fid.vnode, len, start);
268
269         do {
270                 _debug("redirty %llx @%llx", len, start);
271
272                 folio = filemap_get_folio(mapping, index);
273                 if (IS_ERR(folio)) {
274                         next = index + 1;
275                         continue;
276                 }
277
278                 next = index + folio_nr_pages(folio);
279                 folio_redirty_for_writepage(wbc, folio);
280                 folio_end_writeback(folio);
281                 folio_put(folio);
282         } while (index = next, index <= last);
283
284         _leave("");
285 }
286
287 /*
288  * completion of write to server
289  */
290 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
291 {
292         struct address_space *mapping = vnode->netfs.inode.i_mapping;
293         struct folio *folio;
294         pgoff_t end;
295
296         XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
297
298         _enter("{%llx:%llu},{%x @%llx}",
299                vnode->fid.vid, vnode->fid.vnode, len, start);
300
301         rcu_read_lock();
302
303         end = (start + len - 1) / PAGE_SIZE;
304         xas_for_each(&xas, folio, end) {
305                 if (!folio_test_writeback(folio)) {
306                         kdebug("bad %x @%llx page %lx %lx",
307                                len, start, folio_index(folio), end);
308                         ASSERT(folio_test_writeback(folio));
309                 }
310
311                 trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio);
312                 folio_detach_private(folio);
313                 folio_end_writeback(folio);
314         }
315
316         rcu_read_unlock();
317
318         afs_prune_wb_keys(vnode);
319         _leave("");
320 }
321
322 /*
323  * Find a key to use for the writeback.  We cached the keys used to author the
324  * writes on the vnode.  *_wbk will contain the last writeback key used or NULL
325  * and we need to start from there if it's set.
326  */
327 static int afs_get_writeback_key(struct afs_vnode *vnode,
328                                  struct afs_wb_key **_wbk)
329 {
330         struct afs_wb_key *wbk = NULL;
331         struct list_head *p;
332         int ret = -ENOKEY, ret2;
333
334         spin_lock(&vnode->wb_lock);
335         if (*_wbk)
336                 p = (*_wbk)->vnode_link.next;
337         else
338                 p = vnode->wb_keys.next;
339
340         while (p != &vnode->wb_keys) {
341                 wbk = list_entry(p, struct afs_wb_key, vnode_link);
342                 _debug("wbk %u", key_serial(wbk->key));
343                 ret2 = key_validate(wbk->key);
344                 if (ret2 == 0) {
345                         refcount_inc(&wbk->usage);
346                         _debug("USE WB KEY %u", key_serial(wbk->key));
347                         break;
348                 }
349
350                 wbk = NULL;
351                 if (ret == -ENOKEY)
352                         ret = ret2;
353                 p = p->next;
354         }
355
356         spin_unlock(&vnode->wb_lock);
357         if (*_wbk)
358                 afs_put_wb_key(*_wbk);
359         *_wbk = wbk;
360         return 0;
361 }
362
363 static void afs_store_data_success(struct afs_operation *op)
364 {
365         struct afs_vnode *vnode = op->file[0].vnode;
366
367         op->ctime = op->file[0].scb.status.mtime_client;
368         afs_vnode_commit_status(op, &op->file[0]);
369         if (!afs_op_error(op)) {
370                 if (!op->store.laundering)
371                         afs_pages_written_back(vnode, op->store.pos, op->store.size);
372                 afs_stat_v(vnode, n_stores);
373                 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
374         }
375 }
376
377 static const struct afs_operation_ops afs_store_data_operation = {
378         .issue_afs_rpc  = afs_fs_store_data,
379         .issue_yfs_rpc  = yfs_fs_store_data,
380         .success        = afs_store_data_success,
381 };
382
383 /*
384  * write to a file
385  */
386 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
387                           bool laundering)
388 {
389         struct afs_operation *op;
390         struct afs_wb_key *wbk = NULL;
391         loff_t size = iov_iter_count(iter);
392         int ret = -ENOKEY;
393
394         _enter("%s{%llx:%llu.%u},%llx,%llx",
395                vnode->volume->name,
396                vnode->fid.vid,
397                vnode->fid.vnode,
398                vnode->fid.unique,
399                size, pos);
400
401         ret = afs_get_writeback_key(vnode, &wbk);
402         if (ret) {
403                 _leave(" = %d [no keys]", ret);
404                 return ret;
405         }
406
407         op = afs_alloc_operation(wbk->key, vnode->volume);
408         if (IS_ERR(op)) {
409                 afs_put_wb_key(wbk);
410                 return -ENOMEM;
411         }
412
413         afs_op_set_vnode(op, 0, vnode);
414         op->file[0].dv_delta = 1;
415         op->file[0].modification = true;
416         op->store.pos = pos;
417         op->store.size = size;
418         op->store.laundering = laundering;
419         op->flags |= AFS_OPERATION_UNINTR;
420         op->ops = &afs_store_data_operation;
421
422 try_next_key:
423         afs_begin_vnode_operation(op);
424
425         op->store.write_iter = iter;
426         op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
427         op->mtime = inode_get_mtime(&vnode->netfs.inode);
428
429         afs_wait_for_operation(op);
430
431         switch (afs_op_error(op)) {
432         case -EACCES:
433         case -EPERM:
434         case -ENOKEY:
435         case -EKEYEXPIRED:
436         case -EKEYREJECTED:
437         case -EKEYREVOKED:
438                 _debug("next");
439
440                 ret = afs_get_writeback_key(vnode, &wbk);
441                 if (ret == 0) {
442                         key_put(op->key);
443                         op->key = key_get(wbk->key);
444                         goto try_next_key;
445                 }
446                 break;
447         }
448
449         afs_put_wb_key(wbk);
450         _leave(" = %d", afs_op_error(op));
451         return afs_put_operation(op);
452 }
453
454 /*
455  * Extend the region to be written back to include subsequent contiguously
456  * dirty pages if possible, but don't sleep while doing so.
457  *
458  * If this page holds new content, then we can include filler zeros in the
459  * writeback.
460  */
461 static void afs_extend_writeback(struct address_space *mapping,
462                                  struct afs_vnode *vnode,
463                                  long *_count,
464                                  loff_t start,
465                                  loff_t max_len,
466                                  bool new_content,
467                                  bool caching,
468                                  unsigned int *_len)
469 {
470         struct folio_batch fbatch;
471         struct folio *folio;
472         unsigned long priv;
473         unsigned int psize, filler = 0;
474         unsigned int f, t;
475         loff_t len = *_len;
476         pgoff_t index = (start + len) / PAGE_SIZE;
477         bool stop = true;
478         unsigned int i;
479
480         XA_STATE(xas, &mapping->i_pages, index);
481         folio_batch_init(&fbatch);
482
483         do {
484                 /* Firstly, we gather up a batch of contiguous dirty pages
485                  * under the RCU read lock - but we can't clear the dirty flags
486                  * there if any of those pages are mapped.
487                  */
488                 rcu_read_lock();
489
490                 xas_for_each(&xas, folio, ULONG_MAX) {
491                         stop = true;
492                         if (xas_retry(&xas, folio))
493                                 continue;
494                         if (xa_is_value(folio))
495                                 break;
496                         if (folio_index(folio) != index)
497                                 break;
498
499                         if (!folio_try_get_rcu(folio)) {
500                                 xas_reset(&xas);
501                                 continue;
502                         }
503
504                         /* Has the page moved or been split? */
505                         if (unlikely(folio != xas_reload(&xas))) {
506                                 folio_put(folio);
507                                 break;
508                         }
509
510                         if (!folio_trylock(folio)) {
511                                 folio_put(folio);
512                                 break;
513                         }
514                         if (!folio_test_dirty(folio) ||
515                             folio_test_writeback(folio) ||
516                             folio_test_fscache(folio)) {
517                                 folio_unlock(folio);
518                                 folio_put(folio);
519                                 break;
520                         }
521
522                         psize = folio_size(folio);
523                         priv = (unsigned long)folio_get_private(folio);
524                         f = afs_folio_dirty_from(folio, priv);
525                         t = afs_folio_dirty_to(folio, priv);
526                         if (f != 0 && !new_content) {
527                                 folio_unlock(folio);
528                                 folio_put(folio);
529                                 break;
530                         }
531
532                         len += filler + t;
533                         filler = psize - t;
534                         if (len >= max_len || *_count <= 0)
535                                 stop = true;
536                         else if (t == psize || new_content)
537                                 stop = false;
538
539                         index += folio_nr_pages(folio);
540                         if (!folio_batch_add(&fbatch, folio))
541                                 break;
542                         if (stop)
543                                 break;
544                 }
545
546                 if (!stop)
547                         xas_pause(&xas);
548                 rcu_read_unlock();
549
550                 /* Now, if we obtained any folios, we can shift them to being
551                  * writable and mark them for caching.
552                  */
553                 if (!folio_batch_count(&fbatch))
554                         break;
555
556                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
557                         folio = fbatch.folios[i];
558                         trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio);
559
560                         if (!folio_clear_dirty_for_io(folio))
561                                 BUG();
562                         folio_start_writeback(folio);
563                         afs_folio_start_fscache(caching, folio);
564
565                         *_count -= folio_nr_pages(folio);
566                         folio_unlock(folio);
567                 }
568
569                 folio_batch_release(&fbatch);
570                 cond_resched();
571         } while (!stop);
572
573         *_len = len;
574 }
575
576 /*
577  * Synchronously write back the locked page and any subsequent non-locked dirty
578  * pages.
579  */
580 static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
581                                                 struct writeback_control *wbc,
582                                                 struct folio *folio,
583                                                 loff_t start, loff_t end)
584 {
585         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
586         struct iov_iter iter;
587         unsigned long priv;
588         unsigned int offset, to, len, max_len;
589         loff_t i_size = i_size_read(&vnode->netfs.inode);
590         bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
591         bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode));
592         long count = wbc->nr_to_write;
593         int ret;
594
595         _enter(",%lx,%llx-%llx", folio_index(folio), start, end);
596
597         folio_start_writeback(folio);
598         afs_folio_start_fscache(caching, folio);
599
600         count -= folio_nr_pages(folio);
601
602         /* Find all consecutive lockable dirty pages that have contiguous
603          * written regions, stopping when we find a page that is not
604          * immediately lockable, is not dirty or is missing, or we reach the
605          * end of the range.
606          */
607         priv = (unsigned long)folio_get_private(folio);
608         offset = afs_folio_dirty_from(folio, priv);
609         to = afs_folio_dirty_to(folio, priv);
610         trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio);
611
612         len = to - offset;
613         start += offset;
614         if (start < i_size) {
615                 /* Trim the write to the EOF; the extra data is ignored.  Also
616                  * put an upper limit on the size of a single storedata op.
617                  */
618                 max_len = 65536 * 4096;
619                 max_len = min_t(unsigned long long, max_len, end - start + 1);
620                 max_len = min_t(unsigned long long, max_len, i_size - start);
621
622                 if (len < max_len &&
623                     (to == folio_size(folio) || new_content))
624                         afs_extend_writeback(mapping, vnode, &count,
625                                              start, max_len, new_content,
626                                              caching, &len);
627                 len = min_t(loff_t, len, max_len);
628         }
629
630         /* We now have a contiguous set of dirty pages, each with writeback
631          * set; the first page is still locked at this point, but all the rest
632          * have been unlocked.
633          */
634         folio_unlock(folio);
635
636         if (start < i_size) {
637                 _debug("write back %x @%llx [%llx]", len, start, i_size);
638
639                 /* Speculatively write to the cache.  We have to fix this up
640                  * later if the store fails.
641                  */
642                 afs_write_to_cache(vnode, start, len, i_size, caching);
643
644                 iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
645                 ret = afs_store_data(vnode, &iter, start, false);
646         } else {
647                 _debug("write discard %x @%llx [%llx]", len, start, i_size);
648
649                 /* The dirty region was entirely beyond the EOF. */
650                 fscache_clear_page_bits(mapping, start, len, caching);
651                 afs_pages_written_back(vnode, start, len);
652                 ret = 0;
653         }
654
655         switch (ret) {
656         case 0:
657                 wbc->nr_to_write = count;
658                 ret = len;
659                 break;
660
661         default:
662                 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
663                 fallthrough;
664         case -EACCES:
665         case -EPERM:
666         case -ENOKEY:
667         case -EKEYEXPIRED:
668         case -EKEYREJECTED:
669         case -EKEYREVOKED:
670         case -ENETRESET:
671                 afs_redirty_pages(wbc, mapping, start, len);
672                 mapping_set_error(mapping, ret);
673                 break;
674
675         case -EDQUOT:
676         case -ENOSPC:
677                 afs_redirty_pages(wbc, mapping, start, len);
678                 mapping_set_error(mapping, -ENOSPC);
679                 break;
680
681         case -EROFS:
682         case -EIO:
683         case -EREMOTEIO:
684         case -EFBIG:
685         case -ENOENT:
686         case -ENOMEDIUM:
687         case -ENXIO:
688                 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
689                 afs_kill_pages(mapping, start, len);
690                 mapping_set_error(mapping, ret);
691                 break;
692         }
693
694         _leave(" = %d", ret);
695         return ret;
696 }
697
698 /*
699  * write a region of pages back to the server
700  */
701 static int afs_writepages_region(struct address_space *mapping,
702                                  struct writeback_control *wbc,
703                                  loff_t start, loff_t end, loff_t *_next,
704                                  bool max_one_loop)
705 {
706         struct folio *folio;
707         struct folio_batch fbatch;
708         ssize_t ret;
709         unsigned int i;
710         int n, skips = 0;
711
712         _enter("%llx,%llx,", start, end);
713         folio_batch_init(&fbatch);
714
715         do {
716                 pgoff_t index = start / PAGE_SIZE;
717
718                 n = filemap_get_folios_tag(mapping, &index, end / PAGE_SIZE,
719                                         PAGECACHE_TAG_DIRTY, &fbatch);
720
721                 if (!n)
722                         break;
723                 for (i = 0; i < n; i++) {
724                         folio = fbatch.folios[i];
725                         start = folio_pos(folio); /* May regress with THPs */
726
727                         _debug("wback %lx", folio_index(folio));
728
729                         /* At this point we hold neither the i_pages lock nor the
730                          * page lock: the page may be truncated or invalidated
731                          * (changing page->mapping to NULL), or even swizzled
732                          * back from swapper_space to tmpfs file mapping
733                          */
734 try_again:
735                         if (wbc->sync_mode != WB_SYNC_NONE) {
736                                 ret = folio_lock_killable(folio);
737                                 if (ret < 0) {
738                                         folio_batch_release(&fbatch);
739                                         return ret;
740                                 }
741                         } else {
742                                 if (!folio_trylock(folio))
743                                         continue;
744                         }
745
746                         if (folio->mapping != mapping ||
747                             !folio_test_dirty(folio)) {
748                                 start += folio_size(folio);
749                                 folio_unlock(folio);
750                                 continue;
751                         }
752
753                         if (folio_test_writeback(folio) ||
754                             folio_test_fscache(folio)) {
755                                 folio_unlock(folio);
756                                 if (wbc->sync_mode != WB_SYNC_NONE) {
757                                         folio_wait_writeback(folio);
758 #ifdef CONFIG_AFS_FSCACHE
759                                         folio_wait_fscache(folio);
760 #endif
761                                         goto try_again;
762                                 }
763
764                                 start += folio_size(folio);
765                                 if (wbc->sync_mode == WB_SYNC_NONE) {
766                                         if (skips >= 5 || need_resched()) {
767                                                 *_next = start;
768                                                 folio_batch_release(&fbatch);
769                                                 _leave(" = 0 [%llx]", *_next);
770                                                 return 0;
771                                         }
772                                         skips++;
773                                 }
774                                 continue;
775                         }
776
777                         if (!folio_clear_dirty_for_io(folio))
778                                 BUG();
779                         ret = afs_write_back_from_locked_folio(mapping, wbc,
780                                         folio, start, end);
781                         if (ret < 0) {
782                                 _leave(" = %zd", ret);
783                                 folio_batch_release(&fbatch);
784                                 return ret;
785                         }
786
787                         start += ret;
788                 }
789
790                 folio_batch_release(&fbatch);
791                 cond_resched();
792         } while (wbc->nr_to_write > 0);
793
794         *_next = start;
795         _leave(" = 0 [%llx]", *_next);
796         return 0;
797 }
798
799 /*
800  * write some of the pending data back to the server
801  */
802 int afs_writepages(struct address_space *mapping,
803                    struct writeback_control *wbc)
804 {
805         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
806         loff_t start, next;
807         int ret;
808
809         _enter("");
810
811         /* We have to be careful as we can end up racing with setattr()
812          * truncating the pagecache since the caller doesn't take a lock here
813          * to prevent it.
814          */
815         if (wbc->sync_mode == WB_SYNC_ALL)
816                 down_read(&vnode->validate_lock);
817         else if (!down_read_trylock(&vnode->validate_lock))
818                 return 0;
819
820         if (wbc->range_cyclic) {
821                 start = mapping->writeback_index * PAGE_SIZE;
822                 ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX,
823                                             &next, false);
824                 if (ret == 0) {
825                         mapping->writeback_index = next / PAGE_SIZE;
826                         if (start > 0 && wbc->nr_to_write > 0) {
827                                 ret = afs_writepages_region(mapping, wbc, 0,
828                                                             start, &next, false);
829                                 if (ret == 0)
830                                         mapping->writeback_index =
831                                                 next / PAGE_SIZE;
832                         }
833                 }
834         } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
835                 ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX,
836                                             &next, false);
837                 if (wbc->nr_to_write > 0 && ret == 0)
838                         mapping->writeback_index = next / PAGE_SIZE;
839         } else {
840                 ret = afs_writepages_region(mapping, wbc,
841                                             wbc->range_start, wbc->range_end,
842                                             &next, false);
843         }
844
845         up_read(&vnode->validate_lock);
846         _leave(" = %d", ret);
847         return ret;
848 }
849
850 /*
851  * write to an AFS file
852  */
853 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
854 {
855         struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
856         struct afs_file *af = iocb->ki_filp->private_data;
857         ssize_t result;
858         size_t count = iov_iter_count(from);
859
860         _enter("{%llx:%llu},{%zu},",
861                vnode->fid.vid, vnode->fid.vnode, count);
862
863         if (IS_SWAPFILE(&vnode->netfs.inode)) {
864                 printk(KERN_INFO
865                        "AFS: Attempt to write to active swap file!\n");
866                 return -EBUSY;
867         }
868
869         if (!count)
870                 return 0;
871
872         result = afs_validate(vnode, af->key);
873         if (result < 0)
874                 return result;
875
876         result = generic_file_write_iter(iocb, from);
877
878         _leave(" = %zd", result);
879         return result;
880 }
881
882 /*
883  * flush any dirty pages for this process, and check for write errors.
884  * - the return status from this call provides a reliable indication of
885  *   whether any write errors occurred for this process.
886  */
887 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
888 {
889         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
890         struct afs_file *af = file->private_data;
891         int ret;
892
893         _enter("{%llx:%llu},{n=%pD},%d",
894                vnode->fid.vid, vnode->fid.vnode, file,
895                datasync);
896
897         ret = afs_validate(vnode, af->key);
898         if (ret < 0)
899                 return ret;
900
901         return file_write_and_wait_range(file, start, end);
902 }
903
904 /*
905  * notification that a previously read-only page is about to become writable
906  * - if it returns an error, the caller will deliver a bus error signal
907  */
908 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
909 {
910         struct folio *folio = page_folio(vmf->page);
911         struct file *file = vmf->vma->vm_file;
912         struct inode *inode = file_inode(file);
913         struct afs_vnode *vnode = AFS_FS_I(inode);
914         struct afs_file *af = file->private_data;
915         unsigned long priv;
916         vm_fault_t ret = VM_FAULT_RETRY;
917
918         _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
919
920         afs_validate(vnode, af->key);
921
922         sb_start_pagefault(inode->i_sb);
923
924         /* Wait for the page to be written to the cache before we allow it to
925          * be modified.  We then assume the entire page will need writing back.
926          */
927 #ifdef CONFIG_AFS_FSCACHE
928         if (folio_test_fscache(folio) &&
929             folio_wait_fscache_killable(folio) < 0)
930                 goto out;
931 #endif
932
933         if (folio_wait_writeback_killable(folio))
934                 goto out;
935
936         if (folio_lock_killable(folio) < 0)
937                 goto out;
938
939         /* We mustn't change folio->private until writeback is complete as that
940          * details the portion of the page we need to write back and we might
941          * need to redirty the page if there's a problem.
942          */
943         if (folio_wait_writeback_killable(folio) < 0) {
944                 folio_unlock(folio);
945                 goto out;
946         }
947
948         priv = afs_folio_dirty(folio, 0, folio_size(folio));
949         priv = afs_folio_dirty_mmapped(priv);
950         if (folio_test_private(folio)) {
951                 folio_change_private(folio, (void *)priv);
952                 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio);
953         } else {
954                 folio_attach_private(folio, (void *)priv);
955                 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio);
956         }
957         file_update_time(file);
958
959         ret = VM_FAULT_LOCKED;
960 out:
961         sb_end_pagefault(inode->i_sb);
962         return ret;
963 }
964
965 /*
966  * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
967  */
968 void afs_prune_wb_keys(struct afs_vnode *vnode)
969 {
970         LIST_HEAD(graveyard);
971         struct afs_wb_key *wbk, *tmp;
972
973         /* Discard unused keys */
974         spin_lock(&vnode->wb_lock);
975
976         if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
977             !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) {
978                 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
979                         if (refcount_read(&wbk->usage) == 1)
980                                 list_move(&wbk->vnode_link, &graveyard);
981                 }
982         }
983
984         spin_unlock(&vnode->wb_lock);
985
986         while (!list_empty(&graveyard)) {
987                 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
988                 list_del(&wbk->vnode_link);
989                 afs_put_wb_key(wbk);
990         }
991 }
992
993 /*
994  * Clean up a page during invalidation.
995  */
996 int afs_launder_folio(struct folio *folio)
997 {
998         struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
999         struct iov_iter iter;
1000         struct bio_vec bv;
1001         unsigned long priv;
1002         unsigned int f, t;
1003         int ret = 0;
1004
1005         _enter("{%lx}", folio->index);
1006
1007         priv = (unsigned long)folio_get_private(folio);
1008         if (folio_clear_dirty_for_io(folio)) {
1009                 f = 0;
1010                 t = folio_size(folio);
1011                 if (folio_test_private(folio)) {
1012                         f = afs_folio_dirty_from(folio, priv);
1013                         t = afs_folio_dirty_to(folio, priv);
1014                 }
1015
1016                 bvec_set_folio(&bv, folio, t - f, f);
1017                 iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, bv.bv_len);
1018
1019                 trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio);
1020                 ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
1021         }
1022
1023         trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio);
1024         folio_detach_private(folio);
1025         folio_wait_fscache(folio);
1026         return ret;
1027 }
1028
1029 /*
1030  * Deal with the completion of writing the data to the cache.
1031  */
1032 static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
1033                                     bool was_async)
1034 {
1035         struct afs_vnode *vnode = priv;
1036
1037         if (IS_ERR_VALUE(transferred_or_error) &&
1038             transferred_or_error != -ENOBUFS)
1039                 afs_invalidate_cache(vnode, 0);
1040 }
1041
1042 /*
1043  * Save the write to the cache also.
1044  */
1045 static void afs_write_to_cache(struct afs_vnode *vnode,
1046                                loff_t start, size_t len, loff_t i_size,
1047                                bool caching)
1048 {
1049         fscache_write_to_cache(afs_vnode_cache(vnode),
1050                                vnode->netfs.inode.i_mapping, start, len, i_size,
1051                                afs_write_to_cache_done, vnode, caching);
1052 }