1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem high-level write support.
4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/export.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/pagevec.h>
17 * Determined write method. Adjust netfs_folio_traces if this is changed.
19 enum netfs_how_to_modify {
20 NETFS_FOLIO_IS_UPTODATE, /* Folio is uptodate already */
21 NETFS_JUST_PREFETCH, /* We have to read the folio anyway */
22 NETFS_WHOLE_FOLIO_MODIFY, /* We're going to overwrite the whole folio */
23 NETFS_MODIFY_AND_CLEAR, /* We can assume there is no data to be downloaded. */
24 NETFS_STREAMING_WRITE, /* Store incomplete data in non-uptodate page. */
25 NETFS_STREAMING_WRITE_CONT, /* Continue streaming write. */
26 NETFS_FLUSH_CONTENT, /* Flush incompatible content. */
29 static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq);
31 static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
33 if (netfs_group && !folio_get_private(folio))
34 folio_attach_private(folio, netfs_get_group(netfs_group));
37 #if IS_ENABLED(CONFIG_FSCACHE)
38 static void netfs_folio_start_fscache(bool caching, struct folio *folio)
41 folio_start_fscache(folio);
44 static void netfs_folio_start_fscache(bool caching, struct folio *folio)
50 * Decide how we should modify a folio. We might be attempting to do
51 * write-streaming, in which case we don't want to a local RMW cycle if we can
52 * avoid it. If we're doing local caching or content crypto, we award that
53 * priority over avoiding RMW. If the file is open readably, then we also
54 * assume that we may want to read what we wrote.
56 static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
65 struct netfs_folio *finfo = netfs_folio_info(folio);
66 loff_t pos = folio_file_pos(folio);
70 if (netfs_folio_group(folio) != netfs_group)
71 return NETFS_FLUSH_CONTENT;
73 if (folio_test_uptodate(folio))
74 return NETFS_FOLIO_IS_UPTODATE;
76 if (pos >= ctx->zero_point)
77 return NETFS_MODIFY_AND_CLEAR;
79 if (!maybe_trouble && offset == 0 && len >= flen)
80 return NETFS_WHOLE_FOLIO_MODIFY;
82 if (file->f_mode & FMODE_READ)
83 goto no_write_streaming;
84 if (test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags))
85 goto no_write_streaming;
87 if (netfs_is_cache_enabled(ctx)) {
88 /* We don't want to get a streaming write on a file that loses
89 * caching service temporarily because the backing store got
92 if (!test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags))
93 set_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags);
94 goto no_write_streaming;
98 return NETFS_STREAMING_WRITE;
100 /* We can continue a streaming write only if it continues on from the
101 * previous. If it overlaps, we must flush lest we suffer a partial
102 * copy and disjoint dirty regions.
104 if (offset == finfo->dirty_offset + finfo->dirty_len)
105 return NETFS_STREAMING_WRITE_CONT;
106 return NETFS_FLUSH_CONTENT;
110 netfs_stat(&netfs_n_wh_wstream_conflict);
111 return NETFS_FLUSH_CONTENT;
113 return NETFS_JUST_PREFETCH;
117 * Grab a folio for writing and lock it. Attempt to allocate as large a folio
118 * as possible to hold as much of the remaining length as possible in one go.
120 static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
121 loff_t pos, size_t part)
123 pgoff_t index = pos / PAGE_SIZE;
124 fgf_t fgp_flags = FGP_WRITEBEGIN;
126 if (mapping_large_folio_support(mapping))
127 fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part);
129 return __filemap_get_folio(mapping, index, fgp_flags,
130 mapping_gfp_mask(mapping));
134 * netfs_perform_write - Copy data into the pagecache.
135 * @iocb: The operation parameters
136 * @iter: The source buffer
137 * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
139 * Copy data into pagecache pages attached to the inode specified by @iocb.
140 * The caller must hold appropriate inode locks.
142 * Dirty pages are tagged with a netfs_folio struct if they're not up to date
143 * to indicate the range modified. Dirty pages may also be tagged with a
144 * netfs-specific grouping such that data from an old group gets flushed before
145 * a new one is started.
147 ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
148 struct netfs_group *netfs_group)
150 struct file *file = iocb->ki_filp;
151 struct inode *inode = file_inode(file);
152 struct address_space *mapping = inode->i_mapping;
153 struct netfs_inode *ctx = netfs_inode(inode);
154 struct writeback_control wbc = {
155 .sync_mode = WB_SYNC_NONE,
157 .nr_to_write = LONG_MAX,
158 .range_start = iocb->ki_pos,
159 .range_end = iocb->ki_pos + iter->count,
161 struct netfs_io_request *wreq = NULL;
162 struct netfs_folio *finfo;
164 enum netfs_how_to_modify howto;
165 enum netfs_folio_trace trace;
166 unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
167 ssize_t written = 0, ret;
168 loff_t i_size, pos = iocb->ki_pos, from, to;
169 size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
170 bool maybe_trouble = false;
172 if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
173 iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
175 if (pos < i_size_read(inode)) {
176 ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
182 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
184 wreq = netfs_begin_writethrough(iocb, iter->count);
186 wbc_detach_inode(&wbc);
191 if (!is_sync_kiocb(iocb))
193 wreq->cleanup = netfs_cleanup_buffered_write;
198 size_t offset; /* Offset into pagecache folio */
199 size_t part; /* Bytes to write to folio */
200 size_t copied; /* Bytes copied from user */
202 ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
203 if (unlikely(ret < 0))
206 offset = pos & (max_chunk - 1);
207 part = min(max_chunk - offset, iov_iter_count(iter));
209 /* Bring in the user pages that we will copy from _first_ lest
210 * we hit a nasty deadlock on copying from the same page as
211 * we're writing to, without it being marked uptodate.
213 * Not only is this an optimisation, but it is also required to
214 * check that the address is actually valid, when atomic
215 * usercopies are used below.
217 * We rely on the page being held onto long enough by the LRU
218 * that we can grab it below if this causes it to be read.
221 if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
224 folio = netfs_grab_folio_for_write(mapping, pos, part);
226 ret = PTR_ERR(folio);
230 flen = folio_size(folio);
231 offset = pos & (flen - 1);
232 part = min_t(size_t, flen - offset, part);
234 if (signal_pending(current)) {
235 ret = written ? -EINTR : -ERESTARTSYS;
236 goto error_folio_unlock;
239 /* See if we need to prefetch the area we're going to modify.
240 * We need to do this before we get a lock on the folio in case
241 * there's more than one writer competing for the same cache
244 howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
245 flen, offset, part, maybe_trouble);
246 _debug("howto %u", howto);
248 case NETFS_JUST_PREFETCH:
249 ret = netfs_prefetch_for_write(file, folio, offset, part);
251 _debug("prefetch = %zd", ret);
252 goto error_folio_unlock;
255 case NETFS_FOLIO_IS_UPTODATE:
256 case NETFS_WHOLE_FOLIO_MODIFY:
257 case NETFS_STREAMING_WRITE_CONT:
259 case NETFS_MODIFY_AND_CLEAR:
260 zero_user_segment(&folio->page, 0, offset);
262 case NETFS_STREAMING_WRITE:
264 if (WARN_ON(folio_get_private(folio)))
265 goto error_folio_unlock;
267 case NETFS_FLUSH_CONTENT:
268 trace_netfs_folio(folio, netfs_flush_content);
269 from = folio_pos(folio);
270 to = from + folio_size(folio) - 1;
273 ret = filemap_write_and_wait_range(mapping, from, to);
275 goto error_folio_unlock;
279 if (mapping_writably_mapped(mapping))
280 flush_dcache_folio(folio);
282 copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
284 flush_dcache_folio(folio);
286 /* Deal with a (partially) failed copy */
289 goto error_folio_unlock;
292 trace = (enum netfs_folio_trace)howto;
294 case NETFS_FOLIO_IS_UPTODATE:
295 case NETFS_JUST_PREFETCH:
296 netfs_set_group(folio, netfs_group);
298 case NETFS_MODIFY_AND_CLEAR:
299 zero_user_segment(&folio->page, offset + copied, flen);
300 netfs_set_group(folio, netfs_group);
301 folio_mark_uptodate(folio);
303 case NETFS_WHOLE_FOLIO_MODIFY:
304 if (unlikely(copied < part)) {
305 maybe_trouble = true;
306 iov_iter_revert(iter, copied);
310 netfs_set_group(folio, netfs_group);
311 folio_mark_uptodate(folio);
313 case NETFS_STREAMING_WRITE:
314 if (offset == 0 && copied == flen) {
315 netfs_set_group(folio, netfs_group);
316 folio_mark_uptodate(folio);
317 trace = netfs_streaming_filled_page;
320 finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
322 iov_iter_revert(iter, copied);
324 goto error_folio_unlock;
326 finfo->netfs_group = netfs_get_group(netfs_group);
327 finfo->dirty_offset = offset;
328 finfo->dirty_len = copied;
329 folio_attach_private(folio, (void *)((unsigned long)finfo |
332 case NETFS_STREAMING_WRITE_CONT:
333 finfo = netfs_folio_info(folio);
334 finfo->dirty_len += copied;
335 if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) {
336 if (finfo->netfs_group)
337 folio_change_private(folio, finfo->netfs_group);
339 folio_detach_private(folio);
340 folio_mark_uptodate(folio);
342 trace = netfs_streaming_cont_filled_page;
346 WARN(true, "Unexpected modify type %u ix=%lx\n",
347 howto, folio->index);
349 goto error_folio_unlock;
352 trace_netfs_folio(folio, trace);
354 /* Update the inode size if we moved the EOF marker */
355 i_size = i_size_read(inode);
358 if (ctx->ops->update_i_size) {
359 ctx->ops->update_i_size(inode, pos);
361 i_size_write(inode, pos);
362 #if IS_ENABLED(CONFIG_FSCACHE)
363 fscache_update_cookie(ctx->cache, NULL, &pos);
370 folio_mark_dirty(folio);
372 if (folio_test_dirty(folio))
374 folio_clear_dirty_for_io(folio);
375 /* We make multiple writes to the folio... */
376 if (!folio_test_writeback(folio)) {
377 folio_wait_fscache(folio);
378 folio_start_writeback(folio);
379 folio_start_fscache(folio);
380 if (wreq->iter.count == 0)
381 trace_netfs_folio(folio, netfs_folio_trace_wthru);
383 trace_netfs_folio(folio, netfs_folio_trace_wthru_plus);
385 netfs_advance_writethrough(wreq, copied,
386 offset + copied == flen);
394 } while (iov_iter_count(iter));
397 if (unlikely(wreq)) {
398 ret = netfs_end_writethrough(wreq, iocb);
399 wbc_detach_inode(&wbc);
400 if (ret == -EIOCBQUEUED)
404 iocb->ki_pos += written;
405 _leave(" = %zd [%zd]", written, ret);
406 return written ? written : ret;
413 EXPORT_SYMBOL(netfs_perform_write);
416 * netfs_buffered_write_iter_locked - write data to a file
417 * @iocb: IO state structure (file, offset, etc.)
418 * @from: iov_iter with data to write
419 * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
421 * This function does all the work needed for actually writing data to a
422 * file. It does all basic checks, removes SUID from the file, updates
423 * modification times and calls proper subroutines depending on whether we
424 * do direct IO or a standard buffered write.
426 * The caller must hold appropriate locks around this function and have called
427 * generic_write_checks() already. The caller is also responsible for doing
428 * any necessary syncing afterwards.
430 * This function does *not* take care of syncing data in case of O_SYNC write.
431 * A caller has to handle it. This is mainly due to the fact that we want to
432 * avoid syncing under i_rwsem.
435 * * number of bytes written, even for truncated writes
436 * * negative error code if no data has been written at all
438 ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
439 struct netfs_group *netfs_group)
441 struct file *file = iocb->ki_filp;
444 trace_netfs_write_iter(iocb, from);
446 ret = file_remove_privs(file);
450 ret = file_update_time(file);
454 return netfs_perform_write(iocb, from, netfs_group);
456 EXPORT_SYMBOL(netfs_buffered_write_iter_locked);
459 * netfs_file_write_iter - write data to a file
460 * @iocb: IO state structure
461 * @from: iov_iter with data to write
463 * Perform a write to a file, writing into the pagecache if possible and doing
464 * an unbuffered write instead if not.
467 * * Negative error code if no data has been written at all of
468 * vfs_fsync_range() failed for a synchronous write
469 * * Number of bytes written, even for truncated writes
471 ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
473 struct file *file = iocb->ki_filp;
474 struct inode *inode = file->f_mapping->host;
475 struct netfs_inode *ictx = netfs_inode(inode);
478 _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
480 if (!iov_iter_count(from))
483 if ((iocb->ki_flags & IOCB_DIRECT) ||
484 test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
485 return netfs_unbuffered_write_iter(iocb, from);
487 ret = netfs_start_io_write(inode);
491 ret = generic_write_checks(iocb, from);
493 ret = netfs_buffered_write_iter_locked(iocb, from, NULL);
494 netfs_end_io_write(inode);
496 ret = generic_write_sync(iocb, ret);
499 EXPORT_SYMBOL(netfs_file_write_iter);
502 * Notification that a previously read-only page is about to become writable.
503 * Note that the caller indicates a single page of a multipage folio.
505 vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
507 struct folio *folio = page_folio(vmf->page);
508 struct file *file = vmf->vma->vm_file;
509 struct inode *inode = file_inode(file);
510 vm_fault_t ret = VM_FAULT_RETRY;
513 _enter("%lx", folio->index);
515 sb_start_pagefault(inode->i_sb);
517 if (folio_wait_writeback_killable(folio))
520 if (folio_lock_killable(folio) < 0)
523 /* Can we see a streaming write here? */
524 if (WARN_ON(!folio_test_uptodate(folio))) {
525 ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED;
529 if (netfs_folio_group(folio) != netfs_group) {
531 err = filemap_fdatawait_range(inode->i_mapping,
533 folio_pos(folio) + folio_size(folio));
536 ret = VM_FAULT_RETRY;
542 ret = VM_FAULT_SIGBUS;
547 if (folio_test_dirty(folio))
548 trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus);
550 trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
551 netfs_set_group(folio, netfs_group);
552 file_update_time(file);
553 ret = VM_FAULT_LOCKED;
555 sb_end_pagefault(inode->i_sb);
558 EXPORT_SYMBOL(netfs_page_mkwrite);
561 * Kill all the pages in the given range
563 static void netfs_kill_pages(struct address_space *mapping,
564 loff_t start, loff_t len)
567 pgoff_t index = start / PAGE_SIZE;
568 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
570 _enter("%llx-%llx", start, start + len - 1);
573 _debug("kill %lx (to %lx)", index, last);
575 folio = filemap_get_folio(mapping, index);
581 next = folio_next_index(folio);
583 trace_netfs_folio(folio, netfs_folio_trace_kill);
584 folio_clear_uptodate(folio);
585 if (folio_test_fscache(folio))
586 folio_end_fscache(folio);
587 folio_end_writeback(folio);
589 generic_error_remove_folio(mapping, folio);
593 } while (index = next, index <= last);
599 * Redirty all the pages in a given range.
601 static void netfs_redirty_pages(struct address_space *mapping,
602 loff_t start, loff_t len)
605 pgoff_t index = start / PAGE_SIZE;
606 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
608 _enter("%llx-%llx", start, start + len - 1);
611 _debug("redirty %llx @%llx", len, start);
613 folio = filemap_get_folio(mapping, index);
619 next = folio_next_index(folio);
620 trace_netfs_folio(folio, netfs_folio_trace_redirty);
621 filemap_dirty_folio(mapping, folio);
622 if (folio_test_fscache(folio))
623 folio_end_fscache(folio);
624 folio_end_writeback(folio);
626 } while (index = next, index <= last);
628 balance_dirty_pages_ratelimited(mapping);
634 * Completion of write to server
636 static void netfs_pages_written_back(struct netfs_io_request *wreq)
638 struct address_space *mapping = wreq->mapping;
639 struct netfs_folio *finfo;
640 struct netfs_group *group = NULL;
645 XA_STATE(xas, &mapping->i_pages, wreq->start / PAGE_SIZE);
647 _enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
651 last = (wreq->start + wreq->len - 1) / PAGE_SIZE;
652 xas_for_each(&xas, folio, last) {
653 WARN(!folio_test_writeback(folio),
654 "bad %zx @%llx page %lx %lx\n",
655 wreq->len, wreq->start, folio->index, last);
657 if ((finfo = netfs_folio_info(folio))) {
658 /* Streaming writes cannot be redirtied whilst under
659 * writeback, so discard the streaming record.
661 folio_detach_private(folio);
662 group = finfo->netfs_group;
664 trace_netfs_folio(folio, netfs_folio_trace_clear_s);
666 } else if ((group = netfs_folio_group(folio))) {
667 /* Need to detach the group pointer if the page didn't
668 * get redirtied. If it has been redirtied, then it
669 * must be within the same group.
671 if (folio_test_dirty(folio)) {
672 trace_netfs_folio(folio, netfs_folio_trace_redirtied);
675 if (folio_trylock(folio)) {
676 if (!folio_test_dirty(folio)) {
677 folio_detach_private(folio);
679 trace_netfs_folio(folio, netfs_folio_trace_clear_g);
681 trace_netfs_folio(folio, netfs_folio_trace_redirtied);
690 if (!folio_test_dirty(folio)) {
691 folio_detach_private(folio);
693 trace_netfs_folio(folio, netfs_folio_trace_clear_g);
695 trace_netfs_folio(folio, netfs_folio_trace_redirtied);
700 trace_netfs_folio(folio, netfs_folio_trace_clear);
703 if (folio_test_fscache(folio))
704 folio_end_fscache(folio);
705 xas_advance(&xas, folio_next_index(folio) - 1);
706 folio_end_writeback(folio);
710 netfs_put_group_many(group, gcount);
715 * Deal with the disposition of the folios that are under writeback to close
718 static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq)
720 struct address_space *mapping = wreq->mapping;
724 switch (wreq->error) {
726 netfs_pages_written_back(wreq);
730 pr_notice("R=%08x Unexpected error %d\n", wreq->debug_id, wreq->error);
741 netfs_redirty_pages(mapping, wreq->start, wreq->len);
751 netfs_kill_pages(mapping, wreq->start, wreq->len);
756 mapping_set_error(mapping, wreq->error);
757 if (wreq->netfs_ops->done)
758 wreq->netfs_ops->done(wreq);
762 * Extend the region to be written back to include subsequent contiguously
763 * dirty pages if possible, but don't sleep while doing so.
765 * If this page holds new content, then we can include filler zeros in the
768 static void netfs_extend_writeback(struct address_space *mapping,
769 struct netfs_group *group,
770 struct xa_state *xas,
778 struct netfs_folio *finfo;
779 struct folio_batch fbatch;
782 pgoff_t index = (start + *_len) / PAGE_SIZE;
787 folio_batch_init(&fbatch);
790 /* Firstly, we gather up a batch of contiguous dirty pages
791 * under the RCU read lock - but we can't clear the dirty flags
792 * there if any of those pages are mapped.
796 xas_for_each(xas, folio, ULONG_MAX) {
798 if (xas_retry(xas, folio))
800 if (xa_is_value(folio))
802 if (folio->index != index) {
807 if (!folio_try_get_rcu(folio)) {
812 /* Has the folio moved or been split? */
813 if (unlikely(folio != xas_reload(xas))) {
819 if (!folio_trylock(folio)) {
824 if (!folio_test_dirty(folio) ||
825 folio_test_writeback(folio) ||
826 folio_test_fscache(folio)) {
834 len = folio_size(folio);
835 priv = folio_get_private(folio);
836 if ((const struct netfs_group *)priv != group) {
838 finfo = netfs_folio_info(folio);
839 if (finfo->netfs_group != group ||
840 finfo->dirty_offset > 0) {
846 len = finfo->dirty_len;
849 *_top += folio_size(folio);
850 index += folio_nr_pages(folio);
851 *_count -= folio_nr_pages(folio);
853 if (*_len >= max_len || *_count <= 0)
856 if (!folio_batch_add(&fbatch, folio))
865 /* Now, if we obtained any folios, we can shift them to being
866 * writable and mark them for caching.
868 if (!folio_batch_count(&fbatch))
871 for (i = 0; i < folio_batch_count(&fbatch); i++) {
872 folio = fbatch.folios[i];
873 trace_netfs_folio(folio, netfs_folio_trace_store_plus);
875 if (!folio_clear_dirty_for_io(folio))
877 folio_start_writeback(folio);
878 netfs_folio_start_fscache(caching, folio);
882 folio_batch_release(&fbatch);
888 * Synchronously write back the locked page and any subsequent non-locked dirty
891 static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping,
892 struct writeback_control *wbc,
893 struct netfs_group *group,
894 struct xa_state *xas,
896 unsigned long long start,
897 unsigned long long end)
899 struct netfs_io_request *wreq;
900 struct netfs_folio *finfo;
901 struct netfs_inode *ctx = netfs_inode(mapping->host);
902 unsigned long long i_size = i_size_read(&ctx->inode);
904 bool caching = netfs_is_cache_enabled(ctx);
905 long count = wbc->nr_to_write;
908 _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching);
910 wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio),
914 return PTR_ERR(wreq);
917 if (!folio_clear_dirty_for_io(folio))
919 folio_start_writeback(folio);
920 netfs_folio_start_fscache(caching, folio);
922 count -= folio_nr_pages(folio);
924 /* Find all consecutive lockable dirty pages that have contiguous
925 * written regions, stopping when we find a page that is not
926 * immediately lockable, is not dirty or is missing, or we reach the
929 trace_netfs_folio(folio, netfs_folio_trace_store);
932 finfo = netfs_folio_info(folio);
934 start += finfo->dirty_offset;
935 if (finfo->dirty_offset + finfo->dirty_len != len) {
936 len = finfo->dirty_len;
939 len = finfo->dirty_len;
942 if (start < i_size) {
943 /* Trim the write to the EOF; the extra data is ignored. Also
944 * put an upper limit on the size of a single storedata op.
946 max_len = 65536 * 4096;
947 max_len = min_t(unsigned long long, max_len, end - start + 1);
948 max_len = min_t(unsigned long long, max_len, i_size - start);
951 netfs_extend_writeback(mapping, group, xas, &count, start,
952 max_len, caching, &len, &wreq->upper_len);
956 len = min_t(unsigned long long, len, i_size - start);
958 /* We now have a contiguous set of dirty pages, each with writeback
959 * set; the first page is still locked at this point, but all the rest
960 * have been unlocked.
966 if (start < i_size) {
967 _debug("write back %zx @%llx [%llx]", len, start, i_size);
969 /* Speculatively write to the cache. We have to fix this up
970 * later if the store fails.
972 wreq->cleanup = netfs_cleanup_buffered_write;
974 iov_iter_xarray(&wreq->iter, ITER_SOURCE, &mapping->i_pages, start,
976 __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
977 ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback);
978 if (ret == 0 || ret == -EIOCBQUEUED)
979 wbc->nr_to_write -= len / PAGE_SIZE;
981 _debug("write discard %zx @%llx [%llx]", len, start, i_size);
983 /* The dirty region was entirely beyond the EOF. */
984 fscache_clear_page_bits(mapping, start, len, caching);
985 netfs_pages_written_back(wreq);
989 netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
995 * Write a region of pages back to the server
997 static ssize_t netfs_writepages_begin(struct address_space *mapping,
998 struct writeback_control *wbc,
999 struct netfs_group *group,
1000 struct xa_state *xas,
1001 unsigned long long *_start,
1002 unsigned long long end)
1004 const struct netfs_folio *finfo;
1005 struct folio *folio;
1006 unsigned long long start = *_start;
1011 _enter("%llx,%llx,", start, end);
1014 /* Find the first dirty page in the group. */
1018 folio = xas_find_marked(xas, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY);
1019 if (xas_retry(xas, folio) || xa_is_value(folio))
1024 if (!folio_try_get_rcu(folio)) {
1029 if (unlikely(folio != xas_reload(xas))) {
1035 /* Skip any dirty folio that's not in the group of interest. */
1036 priv = folio_get_private(folio);
1037 if ((const struct netfs_group *)priv != group) {
1038 finfo = netfs_folio_info(folio);
1039 if (finfo->netfs_group != group) {
1052 start = folio_pos(folio); /* May regress with THPs */
1054 _debug("wback %lx", folio->index);
1056 /* At this point we hold neither the i_pages lock nor the page lock:
1057 * the page may be truncated or invalidated (changing page->mapping to
1058 * NULL), or even swizzled back from swapper_space to tmpfs file
1062 if (wbc->sync_mode != WB_SYNC_NONE) {
1063 ret = folio_lock_killable(folio);
1067 if (!folio_trylock(folio))
1071 if (folio->mapping != mapping ||
1072 !folio_test_dirty(folio)) {
1073 start += folio_size(folio);
1074 folio_unlock(folio);
1078 if (folio_test_writeback(folio) ||
1079 folio_test_fscache(folio)) {
1080 folio_unlock(folio);
1081 if (wbc->sync_mode != WB_SYNC_NONE) {
1082 folio_wait_writeback(folio);
1083 #ifdef CONFIG_FSCACHE
1084 folio_wait_fscache(folio);
1089 start += folio_size(folio);
1090 if (wbc->sync_mode == WB_SYNC_NONE) {
1091 if (skips >= 5 || need_resched()) {
1100 ret = netfs_write_back_from_locked_folio(mapping, wbc, group, xas,
1104 *_start = start + ret;
1105 _leave(" = %zd [%llx]", ret, *_start);
1110 * Write a region of pages back to the server
1112 static int netfs_writepages_region(struct address_space *mapping,
1113 struct writeback_control *wbc,
1114 struct netfs_group *group,
1115 unsigned long long *_start,
1116 unsigned long long end)
1120 XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE);
1123 ret = netfs_writepages_begin(mapping, wbc, group, &xas,
1125 if (ret > 0 && wbc->nr_to_write > 0)
1127 } while (ret > 0 && wbc->nr_to_write > 0);
1129 return ret > 0 ? 0 : ret;
1133 * write some of the pending data back to the server
1135 int netfs_writepages(struct address_space *mapping,
1136 struct writeback_control *wbc)
1138 struct netfs_group *group = NULL;
1144 /* We have to be careful as we can end up racing with setattr()
1145 * truncating the pagecache since the caller doesn't take a lock here
1149 if (wbc->range_cyclic && mapping->writeback_index) {
1150 start = mapping->writeback_index * PAGE_SIZE;
1151 ret = netfs_writepages_region(mapping, wbc, group,
1156 if (wbc->nr_to_write <= 0) {
1157 mapping->writeback_index = start / PAGE_SIZE;
1162 end = mapping->writeback_index * PAGE_SIZE;
1163 mapping->writeback_index = 0;
1164 ret = netfs_writepages_region(mapping, wbc, group, &start, end);
1166 mapping->writeback_index = start / PAGE_SIZE;
1167 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
1169 ret = netfs_writepages_region(mapping, wbc, group,
1171 if (wbc->nr_to_write > 0 && ret == 0)
1172 mapping->writeback_index = start / PAGE_SIZE;
1174 start = wbc->range_start;
1175 ret = netfs_writepages_region(mapping, wbc, group,
1176 &start, wbc->range_end);
1180 _leave(" = %d", ret);
1183 EXPORT_SYMBOL(netfs_writepages);
1186 * Deal with the disposition of a laundered folio.
1188 static void netfs_cleanup_launder_folio(struct netfs_io_request *wreq)
1191 pr_notice("R=%08x Laundering error %d\n", wreq->debug_id, wreq->error);
1192 mapping_set_error(wreq->mapping, wreq->error);
1197 * netfs_launder_folio - Clean up a dirty folio that's being invalidated
1198 * @folio: The folio to clean
1200 * This is called to write back a folio that's being invalidated when an inode
1201 * is getting torn down. Ideally, writepages would be used instead.
1203 int netfs_launder_folio(struct folio *folio)
1205 struct netfs_io_request *wreq;
1206 struct address_space *mapping = folio->mapping;
1207 struct netfs_folio *finfo = netfs_folio_info(folio);
1208 struct netfs_group *group = netfs_folio_group(folio);
1209 struct bio_vec bvec;
1210 unsigned long long i_size = i_size_read(mapping->host);
1211 unsigned long long start = folio_pos(folio);
1212 size_t offset = 0, len;
1216 offset = finfo->dirty_offset;
1218 len = finfo->dirty_len;
1220 len = folio_size(folio);
1222 len = min_t(unsigned long long, len, i_size - start);
1224 wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE);
1226 ret = PTR_ERR(wreq);
1230 if (!folio_clear_dirty_for_io(folio))
1233 trace_netfs_folio(folio, netfs_folio_trace_launder);
1235 _debug("launder %llx-%llx", start, start + len - 1);
1237 /* Speculatively write to the cache. We have to fix this up later if
1240 wreq->cleanup = netfs_cleanup_launder_folio;
1242 bvec_set_folio(&bvec, folio, len, offset);
1243 iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len);
1244 __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
1245 ret = netfs_begin_write(wreq, true, netfs_write_trace_launder);
1248 folio_detach_private(folio);
1249 netfs_put_group(group);
1251 netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
1253 folio_wait_fscache(folio);
1254 _leave(" = %d", ret);
1257 EXPORT_SYMBOL(netfs_launder_folio);