netfs: Update i_blocks when write committed to pagecache
[linux-block.git] / fs / netfs / buffered_write.c
CommitLineData
c38f4e96
DH
1// SPDX-License-Identifier: GPL-2.0-only
2/* Network filesystem high-level write support.
3 *
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/export.h>
9#include <linux/fs.h>
10#include <linux/mm.h>
11#include <linux/pagemap.h>
12#include <linux/slab.h>
13#include <linux/pagevec.h>
14#include "internal.h"
15
16/*
17 * Determined write method. Adjust netfs_folio_traces if this is changed.
18 */
19enum netfs_how_to_modify {
20 NETFS_FOLIO_IS_UPTODATE, /* Folio is uptodate already */
21 NETFS_JUST_PREFETCH, /* We have to read the folio anyway */
22 NETFS_WHOLE_FOLIO_MODIFY, /* We're going to overwrite the whole folio */
23 NETFS_MODIFY_AND_CLEAR, /* We can assume there is no data to be downloaded. */
24 NETFS_STREAMING_WRITE, /* Store incomplete data in non-uptodate page. */
25 NETFS_STREAMING_WRITE_CONT, /* Continue streaming write. */
26 NETFS_FLUSH_CONTENT, /* Flush incompatible content. */
27};
28
41d8e767
DH
29static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq);
30
c38f4e96
DH
31static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
32{
33 if (netfs_group && !folio_get_private(folio))
34 folio_attach_private(folio, netfs_get_group(netfs_group));
35}
36
62c3b748
DH
37#if IS_ENABLED(CONFIG_FSCACHE)
38static void netfs_folio_start_fscache(bool caching, struct folio *folio)
39{
40 if (caching)
41 folio_start_fscache(folio);
42}
43#else
44static void netfs_folio_start_fscache(bool caching, struct folio *folio)
45{
46}
47#endif
48
c38f4e96
DH
49/*
50 * Decide how we should modify a folio. We might be attempting to do
51 * write-streaming, in which case we don't want to a local RMW cycle if we can
52 * avoid it. If we're doing local caching or content crypto, we award that
53 * priority over avoiding RMW. If the file is open readably, then we also
54 * assume that we may want to read what we wrote.
55 */
56static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
57 struct file *file,
58 struct folio *folio,
59 void *netfs_group,
60 size_t flen,
61 size_t offset,
62 size_t len,
63 bool maybe_trouble)
64{
65 struct netfs_folio *finfo = netfs_folio_info(folio);
66 loff_t pos = folio_file_pos(folio);
67
68 _enter("");
69
70 if (netfs_folio_group(folio) != netfs_group)
71 return NETFS_FLUSH_CONTENT;
72
73 if (folio_test_uptodate(folio))
74 return NETFS_FOLIO_IS_UPTODATE;
75
100ccd18 76 if (pos >= ctx->zero_point)
c38f4e96
DH
77 return NETFS_MODIFY_AND_CLEAR;
78
79 if (!maybe_trouble && offset == 0 && len >= flen)
80 return NETFS_WHOLE_FOLIO_MODIFY;
81
82 if (file->f_mode & FMODE_READ)
92a714d7
DH
83 goto no_write_streaming;
84 if (test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags))
85 goto no_write_streaming;
86
87 if (netfs_is_cache_enabled(ctx)) {
88 /* We don't want to get a streaming write on a file that loses
89 * caching service temporarily because the backing store got
90 * culled.
91 */
92 if (!test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags))
93 set_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags);
94 goto no_write_streaming;
95 }
c38f4e96
DH
96
97 if (!finfo)
98 return NETFS_STREAMING_WRITE;
99
100 /* We can continue a streaming write only if it continues on from the
101 * previous. If it overlaps, we must flush lest we suffer a partial
102 * copy and disjoint dirty regions.
103 */
104 if (offset == finfo->dirty_offset + finfo->dirty_len)
105 return NETFS_STREAMING_WRITE_CONT;
106 return NETFS_FLUSH_CONTENT;
92a714d7
DH
107
108no_write_streaming:
109 if (finfo) {
110 netfs_stat(&netfs_n_wh_wstream_conflict);
111 return NETFS_FLUSH_CONTENT;
112 }
113 return NETFS_JUST_PREFETCH;
c38f4e96
DH
114}
115
116/*
e2e2e839
DH
117 * Grab a folio for writing and lock it. Attempt to allocate as large a folio
118 * as possible to hold as much of the remaining length as possible in one go.
c38f4e96
DH
119 */
120static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
121 loff_t pos, size_t part)
122{
123 pgoff_t index = pos / PAGE_SIZE;
e2e2e839 124 fgf_t fgp_flags = FGP_WRITEBEGIN;
c38f4e96 125
e2e2e839
DH
126 if (mapping_large_folio_support(mapping))
127 fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part);
128
129 return __filemap_get_folio(mapping, index, fgp_flags,
c38f4e96
DH
130 mapping_gfp_mask(mapping));
131}
132
5f24162f
DH
133/*
134 * Update i_size and estimate the update to i_blocks to reflect the additional
135 * data written into the pagecache until we can find out from the server what
136 * the values actually are.
137 */
138static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
139 loff_t i_size, loff_t pos, size_t copied)
140{
141 blkcnt_t add;
142 size_t gap;
143
144 if (ctx->ops->update_i_size) {
145 ctx->ops->update_i_size(inode, pos);
146 return;
147 }
148
149 i_size_write(inode, pos);
150#if IS_ENABLED(CONFIG_FSCACHE)
151 fscache_update_cookie(ctx->cache, NULL, &pos);
152#endif
153
154 gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1));
155 if (copied > gap) {
156 add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE);
157
158 inode->i_blocks = min_t(blkcnt_t,
159 DIV_ROUND_UP(pos, SECTOR_SIZE),
160 inode->i_blocks + add);
161 }
162}
163
c38f4e96
DH
164/**
165 * netfs_perform_write - Copy data into the pagecache.
166 * @iocb: The operation parameters
167 * @iter: The source buffer
168 * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
169 *
170 * Copy data into pagecache pages attached to the inode specified by @iocb.
171 * The caller must hold appropriate inode locks.
172 *
173 * Dirty pages are tagged with a netfs_folio struct if they're not up to date
174 * to indicate the range modified. Dirty pages may also be tagged with a
175 * netfs-specific grouping such that data from an old group gets flushed before
176 * a new one is started.
177 */
178ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
179 struct netfs_group *netfs_group)
180{
181 struct file *file = iocb->ki_filp;
182 struct inode *inode = file_inode(file);
183 struct address_space *mapping = inode->i_mapping;
184 struct netfs_inode *ctx = netfs_inode(inode);
41d8e767
DH
185 struct writeback_control wbc = {
186 .sync_mode = WB_SYNC_NONE,
187 .for_sync = true,
188 .nr_to_write = LONG_MAX,
189 .range_start = iocb->ki_pos,
190 .range_end = iocb->ki_pos + iter->count,
191 };
192 struct netfs_io_request *wreq = NULL;
c38f4e96
DH
193 struct netfs_folio *finfo;
194 struct folio *folio;
195 enum netfs_how_to_modify howto;
196 enum netfs_folio_trace trace;
197 unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
619606a7 198 ssize_t written = 0, ret, ret2;
c38f4e96
DH
199 loff_t i_size, pos = iocb->ki_pos, from, to;
200 size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
201 bool maybe_trouble = false;
202
41d8e767
DH
203 if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
204 iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
205 ) {
41d8e767
DH
206 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
207
c97f59e2
DH
208 ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
209 if (ret < 0) {
210 wbc_detach_inode(&wbc);
211 goto out;
212 }
213
41d8e767
DH
214 wreq = netfs_begin_writethrough(iocb, iter->count);
215 if (IS_ERR(wreq)) {
216 wbc_detach_inode(&wbc);
217 ret = PTR_ERR(wreq);
218 wreq = NULL;
219 goto out;
220 }
221 if (!is_sync_kiocb(iocb))
222 wreq->iocb = iocb;
223 wreq->cleanup = netfs_cleanup_buffered_write;
224 }
225
c38f4e96
DH
226 do {
227 size_t flen;
228 size_t offset; /* Offset into pagecache folio */
229 size_t part; /* Bytes to write to folio */
230 size_t copied; /* Bytes copied from user */
231
232 ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
233 if (unlikely(ret < 0))
234 break;
235
236 offset = pos & (max_chunk - 1);
237 part = min(max_chunk - offset, iov_iter_count(iter));
238
239 /* Bring in the user pages that we will copy from _first_ lest
240 * we hit a nasty deadlock on copying from the same page as
241 * we're writing to, without it being marked uptodate.
242 *
243 * Not only is this an optimisation, but it is also required to
244 * check that the address is actually valid, when atomic
245 * usercopies are used below.
246 *
247 * We rely on the page being held onto long enough by the LRU
248 * that we can grab it below if this causes it to be read.
249 */
250 ret = -EFAULT;
251 if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
252 break;
253
c38f4e96 254 folio = netfs_grab_folio_for_write(mapping, pos, part);
843609df
DC
255 if (IS_ERR(folio)) {
256 ret = PTR_ERR(folio);
c38f4e96 257 break;
843609df 258 }
c38f4e96
DH
259
260 flen = folio_size(folio);
261 offset = pos & (flen - 1);
262 part = min_t(size_t, flen - offset, part);
263
264 if (signal_pending(current)) {
265 ret = written ? -EINTR : -ERESTARTSYS;
266 goto error_folio_unlock;
267 }
268
269 /* See if we need to prefetch the area we're going to modify.
270 * We need to do this before we get a lock on the folio in case
271 * there's more than one writer competing for the same cache
272 * block.
273 */
274 howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
275 flen, offset, part, maybe_trouble);
276 _debug("howto %u", howto);
277 switch (howto) {
278 case NETFS_JUST_PREFETCH:
279 ret = netfs_prefetch_for_write(file, folio, offset, part);
280 if (ret < 0) {
281 _debug("prefetch = %zd", ret);
282 goto error_folio_unlock;
283 }
284 break;
285 case NETFS_FOLIO_IS_UPTODATE:
286 case NETFS_WHOLE_FOLIO_MODIFY:
287 case NETFS_STREAMING_WRITE_CONT:
288 break;
289 case NETFS_MODIFY_AND_CLEAR:
290 zero_user_segment(&folio->page, 0, offset);
291 break;
292 case NETFS_STREAMING_WRITE:
293 ret = -EIO;
294 if (WARN_ON(folio_get_private(folio)))
295 goto error_folio_unlock;
296 break;
297 case NETFS_FLUSH_CONTENT:
298 trace_netfs_folio(folio, netfs_flush_content);
299 from = folio_pos(folio);
300 to = from + folio_size(folio) - 1;
301 folio_unlock(folio);
302 folio_put(folio);
303 ret = filemap_write_and_wait_range(mapping, from, to);
304 if (ret < 0)
305 goto error_folio_unlock;
306 continue;
307 }
308
309 if (mapping_writably_mapped(mapping))
310 flush_dcache_folio(folio);
311
312 copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
313
314 flush_dcache_folio(folio);
315
316 /* Deal with a (partially) failed copy */
317 if (copied == 0) {
318 ret = -EFAULT;
319 goto error_folio_unlock;
320 }
321
322 trace = (enum netfs_folio_trace)howto;
323 switch (howto) {
324 case NETFS_FOLIO_IS_UPTODATE:
325 case NETFS_JUST_PREFETCH:
326 netfs_set_group(folio, netfs_group);
327 break;
328 case NETFS_MODIFY_AND_CLEAR:
329 zero_user_segment(&folio->page, offset + copied, flen);
330 netfs_set_group(folio, netfs_group);
331 folio_mark_uptodate(folio);
332 break;
333 case NETFS_WHOLE_FOLIO_MODIFY:
334 if (unlikely(copied < part)) {
335 maybe_trouble = true;
336 iov_iter_revert(iter, copied);
337 copied = 0;
338 goto retry;
339 }
340 netfs_set_group(folio, netfs_group);
341 folio_mark_uptodate(folio);
342 break;
343 case NETFS_STREAMING_WRITE:
344 if (offset == 0 && copied == flen) {
345 netfs_set_group(folio, netfs_group);
346 folio_mark_uptodate(folio);
347 trace = netfs_streaming_filled_page;
348 break;
349 }
350 finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
351 if (!finfo) {
352 iov_iter_revert(iter, copied);
353 ret = -ENOMEM;
354 goto error_folio_unlock;
355 }
356 finfo->netfs_group = netfs_get_group(netfs_group);
357 finfo->dirty_offset = offset;
358 finfo->dirty_len = copied;
359 folio_attach_private(folio, (void *)((unsigned long)finfo |
360 NETFS_FOLIO_INFO));
361 break;
362 case NETFS_STREAMING_WRITE_CONT:
363 finfo = netfs_folio_info(folio);
364 finfo->dirty_len += copied;
365 if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) {
366 if (finfo->netfs_group)
367 folio_change_private(folio, finfo->netfs_group);
368 else
369 folio_detach_private(folio);
370 folio_mark_uptodate(folio);
371 kfree(finfo);
372 trace = netfs_streaming_cont_filled_page;
373 }
374 break;
375 default:
376 WARN(true, "Unexpected modify type %u ix=%lx\n",
202bc57b 377 howto, folio->index);
c38f4e96
DH
378 ret = -EIO;
379 goto error_folio_unlock;
380 }
381
382 trace_netfs_folio(folio, trace);
383
384 /* Update the inode size if we moved the EOF marker */
c38f4e96 385 pos += copied;
5f24162f
DH
386 i_size = i_size_read(inode);
387 if (pos > i_size)
388 netfs_update_i_size(ctx, inode, i_size, pos, copied);
c38f4e96
DH
389 written += copied;
390
41d8e767
DH
391 if (likely(!wreq)) {
392 folio_mark_dirty(folio);
393 } else {
394 if (folio_test_dirty(folio))
395 /* Sigh. mmap. */
396 folio_clear_dirty_for_io(folio);
397 /* We make multiple writes to the folio... */
398 if (!folio_test_writeback(folio)) {
399 folio_wait_fscache(folio);
400 folio_start_writeback(folio);
401 folio_start_fscache(folio);
402 if (wreq->iter.count == 0)
403 trace_netfs_folio(folio, netfs_folio_trace_wthru);
404 else
405 trace_netfs_folio(folio, netfs_folio_trace_wthru_plus);
406 }
407 netfs_advance_writethrough(wreq, copied,
408 offset + copied == flen);
409 }
c38f4e96
DH
410 retry:
411 folio_unlock(folio);
412 folio_put(folio);
413 folio = NULL;
414
415 cond_resched();
416 } while (iov_iter_count(iter));
417
418out:
41d8e767 419 if (unlikely(wreq)) {
619606a7 420 ret2 = netfs_end_writethrough(wreq, iocb);
41d8e767 421 wbc_detach_inode(&wbc);
619606a7
DH
422 if (ret2 == -EIOCBQUEUED)
423 return ret2;
424 if (ret == 0)
425 ret = ret2;
c38f4e96
DH
426 }
427
41d8e767 428 iocb->ki_pos += written;
c38f4e96
DH
429 _leave(" = %zd [%zd]", written, ret);
430 return written ? written : ret;
431
432error_folio_unlock:
433 folio_unlock(folio);
434 folio_put(folio);
435 goto out;
436}
437EXPORT_SYMBOL(netfs_perform_write);
938e13a7
DH
438
439/**
440 * netfs_buffered_write_iter_locked - write data to a file
441 * @iocb: IO state structure (file, offset, etc.)
442 * @from: iov_iter with data to write
443 * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
444 *
445 * This function does all the work needed for actually writing data to a
446 * file. It does all basic checks, removes SUID from the file, updates
447 * modification times and calls proper subroutines depending on whether we
448 * do direct IO or a standard buffered write.
449 *
450 * The caller must hold appropriate locks around this function and have called
451 * generic_write_checks() already. The caller is also responsible for doing
452 * any necessary syncing afterwards.
453 *
454 * This function does *not* take care of syncing data in case of O_SYNC write.
455 * A caller has to handle it. This is mainly due to the fact that we want to
456 * avoid syncing under i_rwsem.
457 *
458 * Return:
459 * * number of bytes written, even for truncated writes
460 * * negative error code if no data has been written at all
461 */
462ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
463 struct netfs_group *netfs_group)
464{
465 struct file *file = iocb->ki_filp;
466 ssize_t ret;
467
468 trace_netfs_write_iter(iocb, from);
469
470 ret = file_remove_privs(file);
471 if (ret)
472 return ret;
473
474 ret = file_update_time(file);
475 if (ret)
476 return ret;
477
478 return netfs_perform_write(iocb, from, netfs_group);
479}
480EXPORT_SYMBOL(netfs_buffered_write_iter_locked);
481
482/**
483 * netfs_file_write_iter - write data to a file
484 * @iocb: IO state structure
485 * @from: iov_iter with data to write
486 *
487 * Perform a write to a file, writing into the pagecache if possible and doing
488 * an unbuffered write instead if not.
489 *
490 * Return:
491 * * Negative error code if no data has been written at all of
492 * vfs_fsync_range() failed for a synchronous write
493 * * Number of bytes written, even for truncated writes
494 */
495ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
496{
497 struct file *file = iocb->ki_filp;
498 struct inode *inode = file->f_mapping->host;
499 struct netfs_inode *ictx = netfs_inode(inode);
500 ssize_t ret;
501
502 _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
503
ca9ca1a5
DH
504 if (!iov_iter_count(from))
505 return 0;
506
938e13a7
DH
507 if ((iocb->ki_flags & IOCB_DIRECT) ||
508 test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
509 return netfs_unbuffered_write_iter(iocb, from);
510
511 ret = netfs_start_io_write(inode);
512 if (ret < 0)
513 return ret;
514
515 ret = generic_write_checks(iocb, from);
516 if (ret > 0)
517 ret = netfs_buffered_write_iter_locked(iocb, from, NULL);
518 netfs_end_io_write(inode);
519 if (ret > 0)
520 ret = generic_write_sync(iocb, ret);
521 return ret;
522}
523EXPORT_SYMBOL(netfs_file_write_iter);
102a7e2c
DH
524
525/*
526 * Notification that a previously read-only page is about to become writable.
527 * Note that the caller indicates a single page of a multipage folio.
528 */
529vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
530{
531 struct folio *folio = page_folio(vmf->page);
532 struct file *file = vmf->vma->vm_file;
533 struct inode *inode = file_inode(file);
534 vm_fault_t ret = VM_FAULT_RETRY;
535 int err;
536
537 _enter("%lx", folio->index);
538
539 sb_start_pagefault(inode->i_sb);
540
541 if (folio_wait_writeback_killable(folio))
542 goto out;
543
544 if (folio_lock_killable(folio) < 0)
545 goto out;
546
547 /* Can we see a streaming write here? */
548 if (WARN_ON(!folio_test_uptodate(folio))) {
549 ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED;
550 goto out;
551 }
552
553 if (netfs_folio_group(folio) != netfs_group) {
554 folio_unlock(folio);
555 err = filemap_fdatawait_range(inode->i_mapping,
556 folio_pos(folio),
557 folio_pos(folio) + folio_size(folio));
558 switch (err) {
559 case 0:
560 ret = VM_FAULT_RETRY;
561 goto out;
562 case -ENOMEM:
563 ret = VM_FAULT_OOM;
564 goto out;
565 default:
566 ret = VM_FAULT_SIGBUS;
567 goto out;
568 }
569 }
570
571 if (folio_test_dirty(folio))
572 trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus);
573 else
574 trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
575 netfs_set_group(folio, netfs_group);
576 file_update_time(file);
577 ret = VM_FAULT_LOCKED;
578out:
579 sb_end_pagefault(inode->i_sb);
580 return ret;
581}
582EXPORT_SYMBOL(netfs_page_mkwrite);
62c3b748
DH
583
584/*
585 * Kill all the pages in the given range
586 */
587static void netfs_kill_pages(struct address_space *mapping,
588 loff_t start, loff_t len)
589{
590 struct folio *folio;
591 pgoff_t index = start / PAGE_SIZE;
592 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
593
594 _enter("%llx-%llx", start, start + len - 1);
595
596 do {
597 _debug("kill %lx (to %lx)", index, last);
598
599 folio = filemap_get_folio(mapping, index);
600 if (IS_ERR(folio)) {
601 next = index + 1;
602 continue;
603 }
604
605 next = folio_next_index(folio);
606
607 trace_netfs_folio(folio, netfs_folio_trace_kill);
608 folio_clear_uptodate(folio);
609 if (folio_test_fscache(folio))
610 folio_end_fscache(folio);
611 folio_end_writeback(folio);
612 folio_lock(folio);
16df6e07 613 generic_error_remove_folio(mapping, folio);
62c3b748
DH
614 folio_unlock(folio);
615 folio_put(folio);
616
617 } while (index = next, index <= last);
618
619 _leave("");
620}
621
622/*
623 * Redirty all the pages in a given range.
624 */
625static void netfs_redirty_pages(struct address_space *mapping,
626 loff_t start, loff_t len)
627{
628 struct folio *folio;
629 pgoff_t index = start / PAGE_SIZE;
630 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
631
632 _enter("%llx-%llx", start, start + len - 1);
633
634 do {
635 _debug("redirty %llx @%llx", len, start);
636
637 folio = filemap_get_folio(mapping, index);
638 if (IS_ERR(folio)) {
639 next = index + 1;
640 continue;
641 }
642
643 next = folio_next_index(folio);
644 trace_netfs_folio(folio, netfs_folio_trace_redirty);
645 filemap_dirty_folio(mapping, folio);
646 if (folio_test_fscache(folio))
647 folio_end_fscache(folio);
648 folio_end_writeback(folio);
649 folio_put(folio);
650 } while (index = next, index <= last);
651
652 balance_dirty_pages_ratelimited(mapping);
653
654 _leave("");
655}
656
657/*
658 * Completion of write to server
659 */
660static void netfs_pages_written_back(struct netfs_io_request *wreq)
661{
662 struct address_space *mapping = wreq->mapping;
663 struct netfs_folio *finfo;
664 struct netfs_group *group = NULL;
665 struct folio *folio;
666 pgoff_t last;
667 int gcount = 0;
668
669 XA_STATE(xas, &mapping->i_pages, wreq->start / PAGE_SIZE);
670
671 _enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
672
673 rcu_read_lock();
674
675 last = (wreq->start + wreq->len - 1) / PAGE_SIZE;
676 xas_for_each(&xas, folio, last) {
677 WARN(!folio_test_writeback(folio),
678 "bad %zx @%llx page %lx %lx\n",
202bc57b 679 wreq->len, wreq->start, folio->index, last);
62c3b748
DH
680
681 if ((finfo = netfs_folio_info(folio))) {
682 /* Streaming writes cannot be redirtied whilst under
683 * writeback, so discard the streaming record.
684 */
685 folio_detach_private(folio);
686 group = finfo->netfs_group;
687 gcount++;
688 trace_netfs_folio(folio, netfs_folio_trace_clear_s);
689 kfree(finfo);
690 } else if ((group = netfs_folio_group(folio))) {
691 /* Need to detach the group pointer if the page didn't
692 * get redirtied. If it has been redirtied, then it
693 * must be within the same group.
694 */
695 if (folio_test_dirty(folio)) {
696 trace_netfs_folio(folio, netfs_folio_trace_redirtied);
697 goto end_wb;
698 }
699 if (folio_trylock(folio)) {
700 if (!folio_test_dirty(folio)) {
701 folio_detach_private(folio);
702 gcount++;
703 trace_netfs_folio(folio, netfs_folio_trace_clear_g);
704 } else {
705 trace_netfs_folio(folio, netfs_folio_trace_redirtied);
706 }
707 folio_unlock(folio);
708 goto end_wb;
709 }
710
711 xas_pause(&xas);
712 rcu_read_unlock();
713 folio_lock(folio);
714 if (!folio_test_dirty(folio)) {
715 folio_detach_private(folio);
716 gcount++;
717 trace_netfs_folio(folio, netfs_folio_trace_clear_g);
718 } else {
719 trace_netfs_folio(folio, netfs_folio_trace_redirtied);
720 }
721 folio_unlock(folio);
722 rcu_read_lock();
723 } else {
724 trace_netfs_folio(folio, netfs_folio_trace_clear);
725 }
726 end_wb:
727 if (folio_test_fscache(folio))
728 folio_end_fscache(folio);
807c6d09 729 xas_advance(&xas, folio_next_index(folio) - 1);
62c3b748
DH
730 folio_end_writeback(folio);
731 }
732
733 rcu_read_unlock();
734 netfs_put_group_many(group, gcount);
735 _leave("");
736}
737
738/*
739 * Deal with the disposition of the folios that are under writeback to close
740 * out the operation.
741 */
742static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq)
743{
744 struct address_space *mapping = wreq->mapping;
745
746 _enter("");
747
748 switch (wreq->error) {
749 case 0:
750 netfs_pages_written_back(wreq);
751 break;
752
753 default:
754 pr_notice("R=%08x Unexpected error %d\n", wreq->debug_id, wreq->error);
755 fallthrough;
756 case -EACCES:
757 case -EPERM:
758 case -ENOKEY:
759 case -EKEYEXPIRED:
760 case -EKEYREJECTED:
761 case -EKEYREVOKED:
762 case -ENETRESET:
763 case -EDQUOT:
764 case -ENOSPC:
765 netfs_redirty_pages(mapping, wreq->start, wreq->len);
766 break;
767
768 case -EROFS:
769 case -EIO:
770 case -EREMOTEIO:
771 case -EFBIG:
772 case -ENOENT:
773 case -ENOMEDIUM:
774 case -ENXIO:
775 netfs_kill_pages(mapping, wreq->start, wreq->len);
776 break;
777 }
778
779 if (wreq->error)
780 mapping_set_error(mapping, wreq->error);
781 if (wreq->netfs_ops->done)
782 wreq->netfs_ops->done(wreq);
783}
784
785/*
786 * Extend the region to be written back to include subsequent contiguously
787 * dirty pages if possible, but don't sleep while doing so.
788 *
789 * If this page holds new content, then we can include filler zeros in the
790 * writeback.
791 */
792static void netfs_extend_writeback(struct address_space *mapping,
793 struct netfs_group *group,
794 struct xa_state *xas,
795 long *_count,
796 loff_t start,
797 loff_t max_len,
798 bool caching,
799 size_t *_len,
800 size_t *_top)
801{
802 struct netfs_folio *finfo;
803 struct folio_batch fbatch;
804 struct folio *folio;
805 unsigned int i;
806 pgoff_t index = (start + *_len) / PAGE_SIZE;
807 size_t len;
808 void *priv;
809 bool stop = true;
810
811 folio_batch_init(&fbatch);
812
813 do {
814 /* Firstly, we gather up a batch of contiguous dirty pages
815 * under the RCU read lock - but we can't clear the dirty flags
816 * there if any of those pages are mapped.
817 */
818 rcu_read_lock();
819
820 xas_for_each(xas, folio, ULONG_MAX) {
821 stop = true;
822 if (xas_retry(xas, folio))
823 continue;
824 if (xa_is_value(folio))
825 break;
202bc57b 826 if (folio->index != index) {
62c3b748
DH
827 xas_reset(xas);
828 break;
829 }
830
831 if (!folio_try_get_rcu(folio)) {
832 xas_reset(xas);
833 continue;
834 }
835
836 /* Has the folio moved or been split? */
837 if (unlikely(folio != xas_reload(xas))) {
838 folio_put(folio);
839 xas_reset(xas);
840 break;
841 }
842
843 if (!folio_trylock(folio)) {
844 folio_put(folio);
845 xas_reset(xas);
846 break;
847 }
848 if (!folio_test_dirty(folio) ||
849 folio_test_writeback(folio) ||
850 folio_test_fscache(folio)) {
851 folio_unlock(folio);
852 folio_put(folio);
853 xas_reset(xas);
854 break;
855 }
856
857 stop = false;
858 len = folio_size(folio);
859 priv = folio_get_private(folio);
860 if ((const struct netfs_group *)priv != group) {
861 stop = true;
862 finfo = netfs_folio_info(folio);
863 if (finfo->netfs_group != group ||
864 finfo->dirty_offset > 0) {
865 folio_unlock(folio);
866 folio_put(folio);
867 xas_reset(xas);
868 break;
869 }
870 len = finfo->dirty_len;
871 }
872
873 *_top += folio_size(folio);
874 index += folio_nr_pages(folio);
875 *_count -= folio_nr_pages(folio);
876 *_len += len;
877 if (*_len >= max_len || *_count <= 0)
878 stop = true;
879
880 if (!folio_batch_add(&fbatch, folio))
881 break;
882 if (stop)
883 break;
884 }
885
886 xas_pause(xas);
887 rcu_read_unlock();
888
889 /* Now, if we obtained any folios, we can shift them to being
890 * writable and mark them for caching.
891 */
892 if (!folio_batch_count(&fbatch))
893 break;
894
895 for (i = 0; i < folio_batch_count(&fbatch); i++) {
896 folio = fbatch.folios[i];
897 trace_netfs_folio(folio, netfs_folio_trace_store_plus);
898
899 if (!folio_clear_dirty_for_io(folio))
900 BUG();
901 folio_start_writeback(folio);
902 netfs_folio_start_fscache(caching, folio);
903 folio_unlock(folio);
904 }
905
906 folio_batch_release(&fbatch);
907 cond_resched();
908 } while (!stop);
909}
910
911/*
912 * Synchronously write back the locked page and any subsequent non-locked dirty
913 * pages.
914 */
915static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping,
916 struct writeback_control *wbc,
917 struct netfs_group *group,
918 struct xa_state *xas,
919 struct folio *folio,
920 unsigned long long start,
921 unsigned long long end)
922{
923 struct netfs_io_request *wreq;
924 struct netfs_folio *finfo;
925 struct netfs_inode *ctx = netfs_inode(mapping->host);
926 unsigned long long i_size = i_size_read(&ctx->inode);
927 size_t len, max_len;
928 bool caching = netfs_is_cache_enabled(ctx);
929 long count = wbc->nr_to_write;
930 int ret;
931
202bc57b 932 _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching);
62c3b748
DH
933
934 wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio),
935 NETFS_WRITEBACK);
936 if (IS_ERR(wreq)) {
937 folio_unlock(folio);
938 return PTR_ERR(wreq);
939 }
940
941 if (!folio_clear_dirty_for_io(folio))
942 BUG();
943 folio_start_writeback(folio);
944 netfs_folio_start_fscache(caching, folio);
945
946 count -= folio_nr_pages(folio);
947
948 /* Find all consecutive lockable dirty pages that have contiguous
949 * written regions, stopping when we find a page that is not
950 * immediately lockable, is not dirty or is missing, or we reach the
951 * end of the range.
952 */
953 trace_netfs_folio(folio, netfs_folio_trace_store);
954
955 len = wreq->len;
956 finfo = netfs_folio_info(folio);
957 if (finfo) {
958 start += finfo->dirty_offset;
959 if (finfo->dirty_offset + finfo->dirty_len != len) {
960 len = finfo->dirty_len;
961 goto cant_expand;
962 }
963 len = finfo->dirty_len;
964 }
965
966 if (start < i_size) {
967 /* Trim the write to the EOF; the extra data is ignored. Also
968 * put an upper limit on the size of a single storedata op.
969 */
970 max_len = 65536 * 4096;
971 max_len = min_t(unsigned long long, max_len, end - start + 1);
972 max_len = min_t(unsigned long long, max_len, i_size - start);
973
974 if (len < max_len)
975 netfs_extend_writeback(mapping, group, xas, &count, start,
976 max_len, caching, &len, &wreq->upper_len);
977 }
978
979cant_expand:
980 len = min_t(unsigned long long, len, i_size - start);
981
982 /* We now have a contiguous set of dirty pages, each with writeback
983 * set; the first page is still locked at this point, but all the rest
984 * have been unlocked.
985 */
986 folio_unlock(folio);
987 wreq->start = start;
988 wreq->len = len;
989
990 if (start < i_size) {
991 _debug("write back %zx @%llx [%llx]", len, start, i_size);
992
993 /* Speculatively write to the cache. We have to fix this up
994 * later if the store fails.
995 */
996 wreq->cleanup = netfs_cleanup_buffered_write;
997
998 iov_iter_xarray(&wreq->iter, ITER_SOURCE, &mapping->i_pages, start,
999 wreq->upper_len);
1000 __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
1001 ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback);
1002 if (ret == 0 || ret == -EIOCBQUEUED)
1003 wbc->nr_to_write -= len / PAGE_SIZE;
1004 } else {
1005 _debug("write discard %zx @%llx [%llx]", len, start, i_size);
1006
1007 /* The dirty region was entirely beyond the EOF. */
1008 fscache_clear_page_bits(mapping, start, len, caching);
1009 netfs_pages_written_back(wreq);
1010 ret = 0;
1011 }
1012
1013 netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
1014 _leave(" = 1");
1015 return 1;
1016}
1017
1018/*
1019 * Write a region of pages back to the server
1020 */
1021static ssize_t netfs_writepages_begin(struct address_space *mapping,
1022 struct writeback_control *wbc,
1023 struct netfs_group *group,
1024 struct xa_state *xas,
1025 unsigned long long *_start,
1026 unsigned long long end)
1027{
1028 const struct netfs_folio *finfo;
1029 struct folio *folio;
1030 unsigned long long start = *_start;
1031 ssize_t ret;
1032 void *priv;
1033 int skips = 0;
1034
1035 _enter("%llx,%llx,", start, end);
1036
1037search_again:
1038 /* Find the first dirty page in the group. */
1039 rcu_read_lock();
1040
1041 for (;;) {
1042 folio = xas_find_marked(xas, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY);
1043 if (xas_retry(xas, folio) || xa_is_value(folio))
1044 continue;
1045 if (!folio)
1046 break;
1047
1048 if (!folio_try_get_rcu(folio)) {
1049 xas_reset(xas);
1050 continue;
1051 }
1052
1053 if (unlikely(folio != xas_reload(xas))) {
1054 folio_put(folio);
1055 xas_reset(xas);
1056 continue;
1057 }
1058
1059 /* Skip any dirty folio that's not in the group of interest. */
1060 priv = folio_get_private(folio);
1061 if ((const struct netfs_group *)priv != group) {
1062 finfo = netfs_folio_info(folio);
1063 if (finfo->netfs_group != group) {
1064 folio_put(folio);
1065 continue;
1066 }
1067 }
1068
1069 xas_pause(xas);
1070 break;
1071 }
1072 rcu_read_unlock();
1073 if (!folio)
1074 return 0;
1075
1076 start = folio_pos(folio); /* May regress with THPs */
1077
202bc57b 1078 _debug("wback %lx", folio->index);
62c3b748
DH
1079
1080 /* At this point we hold neither the i_pages lock nor the page lock:
1081 * the page may be truncated or invalidated (changing page->mapping to
1082 * NULL), or even swizzled back from swapper_space to tmpfs file
1083 * mapping
1084 */
1085lock_again:
1086 if (wbc->sync_mode != WB_SYNC_NONE) {
1087 ret = folio_lock_killable(folio);
1088 if (ret < 0)
1089 return ret;
1090 } else {
1091 if (!folio_trylock(folio))
1092 goto search_again;
1093 }
1094
1095 if (folio->mapping != mapping ||
1096 !folio_test_dirty(folio)) {
1097 start += folio_size(folio);
1098 folio_unlock(folio);
1099 goto search_again;
1100 }
1101
1102 if (folio_test_writeback(folio) ||
1103 folio_test_fscache(folio)) {
1104 folio_unlock(folio);
1105 if (wbc->sync_mode != WB_SYNC_NONE) {
1106 folio_wait_writeback(folio);
e2bdb527 1107#ifdef CONFIG_FSCACHE
62c3b748
DH
1108 folio_wait_fscache(folio);
1109#endif
1110 goto lock_again;
1111 }
1112
1113 start += folio_size(folio);
1114 if (wbc->sync_mode == WB_SYNC_NONE) {
1115 if (skips >= 5 || need_resched()) {
1116 ret = 0;
1117 goto out;
1118 }
1119 skips++;
1120 }
1121 goto search_again;
1122 }
1123
1124 ret = netfs_write_back_from_locked_folio(mapping, wbc, group, xas,
1125 folio, start, end);
1126out:
1127 if (ret > 0)
1128 *_start = start + ret;
1129 _leave(" = %zd [%llx]", ret, *_start);
1130 return ret;
1131}
1132
1133/*
1134 * Write a region of pages back to the server
1135 */
1136static int netfs_writepages_region(struct address_space *mapping,
1137 struct writeback_control *wbc,
1138 struct netfs_group *group,
1139 unsigned long long *_start,
1140 unsigned long long end)
1141{
1142 ssize_t ret;
1143
1144 XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE);
1145
1146 do {
1147 ret = netfs_writepages_begin(mapping, wbc, group, &xas,
1148 _start, end);
1149 if (ret > 0 && wbc->nr_to_write > 0)
1150 cond_resched();
1151 } while (ret > 0 && wbc->nr_to_write > 0);
1152
1153 return ret > 0 ? 0 : ret;
1154}
1155
1156/*
1157 * write some of the pending data back to the server
1158 */
1159int netfs_writepages(struct address_space *mapping,
1160 struct writeback_control *wbc)
1161{
1162 struct netfs_group *group = NULL;
1163 loff_t start, end;
1164 int ret;
1165
1166 _enter("");
1167
1168 /* We have to be careful as we can end up racing with setattr()
1169 * truncating the pagecache since the caller doesn't take a lock here
1170 * to prevent it.
1171 */
1172
1173 if (wbc->range_cyclic && mapping->writeback_index) {
1174 start = mapping->writeback_index * PAGE_SIZE;
1175 ret = netfs_writepages_region(mapping, wbc, group,
1176 &start, LLONG_MAX);
1177 if (ret < 0)
1178 goto out;
1179
1180 if (wbc->nr_to_write <= 0) {
1181 mapping->writeback_index = start / PAGE_SIZE;
1182 goto out;
1183 }
1184
1185 start = 0;
1186 end = mapping->writeback_index * PAGE_SIZE;
1187 mapping->writeback_index = 0;
1188 ret = netfs_writepages_region(mapping, wbc, group, &start, end);
1189 if (ret == 0)
1190 mapping->writeback_index = start / PAGE_SIZE;
1191 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
1192 start = 0;
1193 ret = netfs_writepages_region(mapping, wbc, group,
1194 &start, LLONG_MAX);
1195 if (wbc->nr_to_write > 0 && ret == 0)
1196 mapping->writeback_index = start / PAGE_SIZE;
1197 } else {
1198 start = wbc->range_start;
1199 ret = netfs_writepages_region(mapping, wbc, group,
1200 &start, wbc->range_end);
1201 }
1202
1203out:
1204 _leave(" = %d", ret);
1205 return ret;
1206}
1207EXPORT_SYMBOL(netfs_writepages);
4a79616c
DH
1208
1209/*
1210 * Deal with the disposition of a laundered folio.
1211 */
1212static void netfs_cleanup_launder_folio(struct netfs_io_request *wreq)
1213{
1214 if (wreq->error) {
1215 pr_notice("R=%08x Laundering error %d\n", wreq->debug_id, wreq->error);
1216 mapping_set_error(wreq->mapping, wreq->error);
1217 }
1218}
1219
1220/**
1221 * netfs_launder_folio - Clean up a dirty folio that's being invalidated
1222 * @folio: The folio to clean
1223 *
1224 * This is called to write back a folio that's being invalidated when an inode
1225 * is getting torn down. Ideally, writepages would be used instead.
1226 */
1227int netfs_launder_folio(struct folio *folio)
1228{
1229 struct netfs_io_request *wreq;
1230 struct address_space *mapping = folio->mapping;
1231 struct netfs_folio *finfo = netfs_folio_info(folio);
1232 struct netfs_group *group = netfs_folio_group(folio);
1233 struct bio_vec bvec;
1234 unsigned long long i_size = i_size_read(mapping->host);
1235 unsigned long long start = folio_pos(folio);
1236 size_t offset = 0, len;
1237 int ret = 0;
1238
1239 if (finfo) {
1240 offset = finfo->dirty_offset;
1241 start += offset;
1242 len = finfo->dirty_len;
1243 } else {
1244 len = folio_size(folio);
1245 }
1246 len = min_t(unsigned long long, len, i_size - start);
1247
1248 wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE);
1249 if (IS_ERR(wreq)) {
1250 ret = PTR_ERR(wreq);
1251 goto out;
1252 }
1253
1254 if (!folio_clear_dirty_for_io(folio))
1255 goto out_put;
1256
1257 trace_netfs_folio(folio, netfs_folio_trace_launder);
1258
1259 _debug("launder %llx-%llx", start, start + len - 1);
1260
1261 /* Speculatively write to the cache. We have to fix this up later if
1262 * the store fails.
1263 */
1264 wreq->cleanup = netfs_cleanup_launder_folio;
1265
1266 bvec_set_folio(&bvec, folio, len, offset);
1267 iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len);
1268 __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
1269 ret = netfs_begin_write(wreq, true, netfs_write_trace_launder);
1270
1271out_put:
1272 folio_detach_private(folio);
1273 netfs_put_group(group);
1274 kfree(finfo);
1275 netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
1276out:
1277 folio_wait_fscache(folio);
1278 _leave(" = %d", ret);
1279 return ret;
1280}
1281EXPORT_SYMBOL(netfs_launder_folio);