Commit | Line | Data |
---|---|---|
c38f4e96 DH |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Network filesystem high-level write support. | |
3 | * | |
4 | * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
6 | */ | |
7 | ||
8 | #include <linux/export.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/pagemap.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/pagevec.h> | |
14 | #include "internal.h" | |
15 | ||
16 | /* | |
17 | * Determined write method. Adjust netfs_folio_traces if this is changed. | |
18 | */ | |
19 | enum netfs_how_to_modify { | |
20 | NETFS_FOLIO_IS_UPTODATE, /* Folio is uptodate already */ | |
21 | NETFS_JUST_PREFETCH, /* We have to read the folio anyway */ | |
22 | NETFS_WHOLE_FOLIO_MODIFY, /* We're going to overwrite the whole folio */ | |
23 | NETFS_MODIFY_AND_CLEAR, /* We can assume there is no data to be downloaded. */ | |
24 | NETFS_STREAMING_WRITE, /* Store incomplete data in non-uptodate page. */ | |
25 | NETFS_STREAMING_WRITE_CONT, /* Continue streaming write. */ | |
26 | NETFS_FLUSH_CONTENT, /* Flush incompatible content. */ | |
27 | }; | |
28 | ||
29 | static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) | |
30 | { | |
31 | if (netfs_group && !folio_get_private(folio)) | |
32 | folio_attach_private(folio, netfs_get_group(netfs_group)); | |
33 | } | |
34 | ||
62c3b748 DH |
35 | #if IS_ENABLED(CONFIG_FSCACHE) |
36 | static void netfs_folio_start_fscache(bool caching, struct folio *folio) | |
37 | { | |
38 | if (caching) | |
39 | folio_start_fscache(folio); | |
40 | } | |
41 | #else | |
42 | static void netfs_folio_start_fscache(bool caching, struct folio *folio) | |
43 | { | |
44 | } | |
45 | #endif | |
46 | ||
c38f4e96 DH |
47 | /* |
48 | * Decide how we should modify a folio. We might be attempting to do | |
49 | * write-streaming, in which case we don't want to a local RMW cycle if we can | |
50 | * avoid it. If we're doing local caching or content crypto, we award that | |
51 | * priority over avoiding RMW. If the file is open readably, then we also | |
52 | * assume that we may want to read what we wrote. | |
53 | */ | |
54 | static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx, | |
55 | struct file *file, | |
56 | struct folio *folio, | |
57 | void *netfs_group, | |
58 | size_t flen, | |
59 | size_t offset, | |
60 | size_t len, | |
61 | bool maybe_trouble) | |
62 | { | |
63 | struct netfs_folio *finfo = netfs_folio_info(folio); | |
64 | loff_t pos = folio_file_pos(folio); | |
65 | ||
66 | _enter(""); | |
67 | ||
68 | if (netfs_folio_group(folio) != netfs_group) | |
69 | return NETFS_FLUSH_CONTENT; | |
70 | ||
71 | if (folio_test_uptodate(folio)) | |
72 | return NETFS_FOLIO_IS_UPTODATE; | |
73 | ||
74 | if (pos >= ctx->remote_i_size) | |
75 | return NETFS_MODIFY_AND_CLEAR; | |
76 | ||
77 | if (!maybe_trouble && offset == 0 && len >= flen) | |
78 | return NETFS_WHOLE_FOLIO_MODIFY; | |
79 | ||
80 | if (file->f_mode & FMODE_READ) | |
81 | return NETFS_JUST_PREFETCH; | |
82 | ||
83 | if (netfs_is_cache_enabled(ctx)) | |
84 | return NETFS_JUST_PREFETCH; | |
85 | ||
86 | if (!finfo) | |
87 | return NETFS_STREAMING_WRITE; | |
88 | ||
89 | /* We can continue a streaming write only if it continues on from the | |
90 | * previous. If it overlaps, we must flush lest we suffer a partial | |
91 | * copy and disjoint dirty regions. | |
92 | */ | |
93 | if (offset == finfo->dirty_offset + finfo->dirty_len) | |
94 | return NETFS_STREAMING_WRITE_CONT; | |
95 | return NETFS_FLUSH_CONTENT; | |
96 | } | |
97 | ||
98 | /* | |
e2e2e839 DH |
99 | * Grab a folio for writing and lock it. Attempt to allocate as large a folio |
100 | * as possible to hold as much of the remaining length as possible in one go. | |
c38f4e96 DH |
101 | */ |
102 | static struct folio *netfs_grab_folio_for_write(struct address_space *mapping, | |
103 | loff_t pos, size_t part) | |
104 | { | |
105 | pgoff_t index = pos / PAGE_SIZE; | |
e2e2e839 | 106 | fgf_t fgp_flags = FGP_WRITEBEGIN; |
c38f4e96 | 107 | |
e2e2e839 DH |
108 | if (mapping_large_folio_support(mapping)) |
109 | fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part); | |
110 | ||
111 | return __filemap_get_folio(mapping, index, fgp_flags, | |
c38f4e96 DH |
112 | mapping_gfp_mask(mapping)); |
113 | } | |
114 | ||
115 | /** | |
116 | * netfs_perform_write - Copy data into the pagecache. | |
117 | * @iocb: The operation parameters | |
118 | * @iter: The source buffer | |
119 | * @netfs_group: Grouping for dirty pages (eg. ceph snaps). | |
120 | * | |
121 | * Copy data into pagecache pages attached to the inode specified by @iocb. | |
122 | * The caller must hold appropriate inode locks. | |
123 | * | |
124 | * Dirty pages are tagged with a netfs_folio struct if they're not up to date | |
125 | * to indicate the range modified. Dirty pages may also be tagged with a | |
126 | * netfs-specific grouping such that data from an old group gets flushed before | |
127 | * a new one is started. | |
128 | */ | |
129 | ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, | |
130 | struct netfs_group *netfs_group) | |
131 | { | |
132 | struct file *file = iocb->ki_filp; | |
133 | struct inode *inode = file_inode(file); | |
134 | struct address_space *mapping = inode->i_mapping; | |
135 | struct netfs_inode *ctx = netfs_inode(inode); | |
136 | struct netfs_folio *finfo; | |
137 | struct folio *folio; | |
138 | enum netfs_how_to_modify howto; | |
139 | enum netfs_folio_trace trace; | |
140 | unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC; | |
141 | ssize_t written = 0, ret; | |
142 | loff_t i_size, pos = iocb->ki_pos, from, to; | |
143 | size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER; | |
144 | bool maybe_trouble = false; | |
145 | ||
146 | do { | |
147 | size_t flen; | |
148 | size_t offset; /* Offset into pagecache folio */ | |
149 | size_t part; /* Bytes to write to folio */ | |
150 | size_t copied; /* Bytes copied from user */ | |
151 | ||
152 | ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags); | |
153 | if (unlikely(ret < 0)) | |
154 | break; | |
155 | ||
156 | offset = pos & (max_chunk - 1); | |
157 | part = min(max_chunk - offset, iov_iter_count(iter)); | |
158 | ||
159 | /* Bring in the user pages that we will copy from _first_ lest | |
160 | * we hit a nasty deadlock on copying from the same page as | |
161 | * we're writing to, without it being marked uptodate. | |
162 | * | |
163 | * Not only is this an optimisation, but it is also required to | |
164 | * check that the address is actually valid, when atomic | |
165 | * usercopies are used below. | |
166 | * | |
167 | * We rely on the page being held onto long enough by the LRU | |
168 | * that we can grab it below if this causes it to be read. | |
169 | */ | |
170 | ret = -EFAULT; | |
171 | if (unlikely(fault_in_iov_iter_readable(iter, part) == part)) | |
172 | break; | |
173 | ||
174 | ret = -ENOMEM; | |
175 | folio = netfs_grab_folio_for_write(mapping, pos, part); | |
176 | if (!folio) | |
177 | break; | |
178 | ||
179 | flen = folio_size(folio); | |
180 | offset = pos & (flen - 1); | |
181 | part = min_t(size_t, flen - offset, part); | |
182 | ||
183 | if (signal_pending(current)) { | |
184 | ret = written ? -EINTR : -ERESTARTSYS; | |
185 | goto error_folio_unlock; | |
186 | } | |
187 | ||
188 | /* See if we need to prefetch the area we're going to modify. | |
189 | * We need to do this before we get a lock on the folio in case | |
190 | * there's more than one writer competing for the same cache | |
191 | * block. | |
192 | */ | |
193 | howto = netfs_how_to_modify(ctx, file, folio, netfs_group, | |
194 | flen, offset, part, maybe_trouble); | |
195 | _debug("howto %u", howto); | |
196 | switch (howto) { | |
197 | case NETFS_JUST_PREFETCH: | |
198 | ret = netfs_prefetch_for_write(file, folio, offset, part); | |
199 | if (ret < 0) { | |
200 | _debug("prefetch = %zd", ret); | |
201 | goto error_folio_unlock; | |
202 | } | |
203 | break; | |
204 | case NETFS_FOLIO_IS_UPTODATE: | |
205 | case NETFS_WHOLE_FOLIO_MODIFY: | |
206 | case NETFS_STREAMING_WRITE_CONT: | |
207 | break; | |
208 | case NETFS_MODIFY_AND_CLEAR: | |
209 | zero_user_segment(&folio->page, 0, offset); | |
210 | break; | |
211 | case NETFS_STREAMING_WRITE: | |
212 | ret = -EIO; | |
213 | if (WARN_ON(folio_get_private(folio))) | |
214 | goto error_folio_unlock; | |
215 | break; | |
216 | case NETFS_FLUSH_CONTENT: | |
217 | trace_netfs_folio(folio, netfs_flush_content); | |
218 | from = folio_pos(folio); | |
219 | to = from + folio_size(folio) - 1; | |
220 | folio_unlock(folio); | |
221 | folio_put(folio); | |
222 | ret = filemap_write_and_wait_range(mapping, from, to); | |
223 | if (ret < 0) | |
224 | goto error_folio_unlock; | |
225 | continue; | |
226 | } | |
227 | ||
228 | if (mapping_writably_mapped(mapping)) | |
229 | flush_dcache_folio(folio); | |
230 | ||
231 | copied = copy_folio_from_iter_atomic(folio, offset, part, iter); | |
232 | ||
233 | flush_dcache_folio(folio); | |
234 | ||
235 | /* Deal with a (partially) failed copy */ | |
236 | if (copied == 0) { | |
237 | ret = -EFAULT; | |
238 | goto error_folio_unlock; | |
239 | } | |
240 | ||
241 | trace = (enum netfs_folio_trace)howto; | |
242 | switch (howto) { | |
243 | case NETFS_FOLIO_IS_UPTODATE: | |
244 | case NETFS_JUST_PREFETCH: | |
245 | netfs_set_group(folio, netfs_group); | |
246 | break; | |
247 | case NETFS_MODIFY_AND_CLEAR: | |
248 | zero_user_segment(&folio->page, offset + copied, flen); | |
249 | netfs_set_group(folio, netfs_group); | |
250 | folio_mark_uptodate(folio); | |
251 | break; | |
252 | case NETFS_WHOLE_FOLIO_MODIFY: | |
253 | if (unlikely(copied < part)) { | |
254 | maybe_trouble = true; | |
255 | iov_iter_revert(iter, copied); | |
256 | copied = 0; | |
257 | goto retry; | |
258 | } | |
259 | netfs_set_group(folio, netfs_group); | |
260 | folio_mark_uptodate(folio); | |
261 | break; | |
262 | case NETFS_STREAMING_WRITE: | |
263 | if (offset == 0 && copied == flen) { | |
264 | netfs_set_group(folio, netfs_group); | |
265 | folio_mark_uptodate(folio); | |
266 | trace = netfs_streaming_filled_page; | |
267 | break; | |
268 | } | |
269 | finfo = kzalloc(sizeof(*finfo), GFP_KERNEL); | |
270 | if (!finfo) { | |
271 | iov_iter_revert(iter, copied); | |
272 | ret = -ENOMEM; | |
273 | goto error_folio_unlock; | |
274 | } | |
275 | finfo->netfs_group = netfs_get_group(netfs_group); | |
276 | finfo->dirty_offset = offset; | |
277 | finfo->dirty_len = copied; | |
278 | folio_attach_private(folio, (void *)((unsigned long)finfo | | |
279 | NETFS_FOLIO_INFO)); | |
280 | break; | |
281 | case NETFS_STREAMING_WRITE_CONT: | |
282 | finfo = netfs_folio_info(folio); | |
283 | finfo->dirty_len += copied; | |
284 | if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) { | |
285 | if (finfo->netfs_group) | |
286 | folio_change_private(folio, finfo->netfs_group); | |
287 | else | |
288 | folio_detach_private(folio); | |
289 | folio_mark_uptodate(folio); | |
290 | kfree(finfo); | |
291 | trace = netfs_streaming_cont_filled_page; | |
292 | } | |
293 | break; | |
294 | default: | |
295 | WARN(true, "Unexpected modify type %u ix=%lx\n", | |
296 | howto, folio_index(folio)); | |
297 | ret = -EIO; | |
298 | goto error_folio_unlock; | |
299 | } | |
300 | ||
301 | trace_netfs_folio(folio, trace); | |
302 | ||
303 | /* Update the inode size if we moved the EOF marker */ | |
304 | i_size = i_size_read(inode); | |
305 | pos += copied; | |
306 | if (pos > i_size) { | |
307 | if (ctx->ops->update_i_size) { | |
308 | ctx->ops->update_i_size(inode, pos); | |
309 | } else { | |
310 | i_size_write(inode, pos); | |
311 | #if IS_ENABLED(CONFIG_FSCACHE) | |
312 | fscache_update_cookie(ctx->cache, NULL, &pos); | |
313 | #endif | |
314 | } | |
315 | } | |
316 | written += copied; | |
317 | ||
318 | folio_mark_dirty(folio); | |
319 | retry: | |
320 | folio_unlock(folio); | |
321 | folio_put(folio); | |
322 | folio = NULL; | |
323 | ||
324 | cond_resched(); | |
325 | } while (iov_iter_count(iter)); | |
326 | ||
327 | out: | |
328 | if (likely(written)) { | |
329 | /* Flush and wait for a write that requires immediate synchronisation. */ | |
330 | if (iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) { | |
331 | _debug("dsync"); | |
332 | ret = filemap_fdatawait_range(mapping, iocb->ki_pos, | |
333 | iocb->ki_pos + written); | |
334 | } | |
335 | ||
336 | iocb->ki_pos += written; | |
337 | } | |
338 | ||
339 | _leave(" = %zd [%zd]", written, ret); | |
340 | return written ? written : ret; | |
341 | ||
342 | error_folio_unlock: | |
343 | folio_unlock(folio); | |
344 | folio_put(folio); | |
345 | goto out; | |
346 | } | |
347 | EXPORT_SYMBOL(netfs_perform_write); | |
938e13a7 DH |
348 | |
349 | /** | |
350 | * netfs_buffered_write_iter_locked - write data to a file | |
351 | * @iocb: IO state structure (file, offset, etc.) | |
352 | * @from: iov_iter with data to write | |
353 | * @netfs_group: Grouping for dirty pages (eg. ceph snaps). | |
354 | * | |
355 | * This function does all the work needed for actually writing data to a | |
356 | * file. It does all basic checks, removes SUID from the file, updates | |
357 | * modification times and calls proper subroutines depending on whether we | |
358 | * do direct IO or a standard buffered write. | |
359 | * | |
360 | * The caller must hold appropriate locks around this function and have called | |
361 | * generic_write_checks() already. The caller is also responsible for doing | |
362 | * any necessary syncing afterwards. | |
363 | * | |
364 | * This function does *not* take care of syncing data in case of O_SYNC write. | |
365 | * A caller has to handle it. This is mainly due to the fact that we want to | |
366 | * avoid syncing under i_rwsem. | |
367 | * | |
368 | * Return: | |
369 | * * number of bytes written, even for truncated writes | |
370 | * * negative error code if no data has been written at all | |
371 | */ | |
372 | ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from, | |
373 | struct netfs_group *netfs_group) | |
374 | { | |
375 | struct file *file = iocb->ki_filp; | |
376 | ssize_t ret; | |
377 | ||
378 | trace_netfs_write_iter(iocb, from); | |
379 | ||
380 | ret = file_remove_privs(file); | |
381 | if (ret) | |
382 | return ret; | |
383 | ||
384 | ret = file_update_time(file); | |
385 | if (ret) | |
386 | return ret; | |
387 | ||
388 | return netfs_perform_write(iocb, from, netfs_group); | |
389 | } | |
390 | EXPORT_SYMBOL(netfs_buffered_write_iter_locked); | |
391 | ||
392 | /** | |
393 | * netfs_file_write_iter - write data to a file | |
394 | * @iocb: IO state structure | |
395 | * @from: iov_iter with data to write | |
396 | * | |
397 | * Perform a write to a file, writing into the pagecache if possible and doing | |
398 | * an unbuffered write instead if not. | |
399 | * | |
400 | * Return: | |
401 | * * Negative error code if no data has been written at all of | |
402 | * vfs_fsync_range() failed for a synchronous write | |
403 | * * Number of bytes written, even for truncated writes | |
404 | */ | |
405 | ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |
406 | { | |
407 | struct file *file = iocb->ki_filp; | |
408 | struct inode *inode = file->f_mapping->host; | |
409 | struct netfs_inode *ictx = netfs_inode(inode); | |
410 | ssize_t ret; | |
411 | ||
412 | _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode)); | |
413 | ||
414 | if ((iocb->ki_flags & IOCB_DIRECT) || | |
415 | test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags)) | |
416 | return netfs_unbuffered_write_iter(iocb, from); | |
417 | ||
418 | ret = netfs_start_io_write(inode); | |
419 | if (ret < 0) | |
420 | return ret; | |
421 | ||
422 | ret = generic_write_checks(iocb, from); | |
423 | if (ret > 0) | |
424 | ret = netfs_buffered_write_iter_locked(iocb, from, NULL); | |
425 | netfs_end_io_write(inode); | |
426 | if (ret > 0) | |
427 | ret = generic_write_sync(iocb, ret); | |
428 | return ret; | |
429 | } | |
430 | EXPORT_SYMBOL(netfs_file_write_iter); | |
102a7e2c DH |
431 | |
432 | /* | |
433 | * Notification that a previously read-only page is about to become writable. | |
434 | * Note that the caller indicates a single page of a multipage folio. | |
435 | */ | |
436 | vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group) | |
437 | { | |
438 | struct folio *folio = page_folio(vmf->page); | |
439 | struct file *file = vmf->vma->vm_file; | |
440 | struct inode *inode = file_inode(file); | |
441 | vm_fault_t ret = VM_FAULT_RETRY; | |
442 | int err; | |
443 | ||
444 | _enter("%lx", folio->index); | |
445 | ||
446 | sb_start_pagefault(inode->i_sb); | |
447 | ||
448 | if (folio_wait_writeback_killable(folio)) | |
449 | goto out; | |
450 | ||
451 | if (folio_lock_killable(folio) < 0) | |
452 | goto out; | |
453 | ||
454 | /* Can we see a streaming write here? */ | |
455 | if (WARN_ON(!folio_test_uptodate(folio))) { | |
456 | ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED; | |
457 | goto out; | |
458 | } | |
459 | ||
460 | if (netfs_folio_group(folio) != netfs_group) { | |
461 | folio_unlock(folio); | |
462 | err = filemap_fdatawait_range(inode->i_mapping, | |
463 | folio_pos(folio), | |
464 | folio_pos(folio) + folio_size(folio)); | |
465 | switch (err) { | |
466 | case 0: | |
467 | ret = VM_FAULT_RETRY; | |
468 | goto out; | |
469 | case -ENOMEM: | |
470 | ret = VM_FAULT_OOM; | |
471 | goto out; | |
472 | default: | |
473 | ret = VM_FAULT_SIGBUS; | |
474 | goto out; | |
475 | } | |
476 | } | |
477 | ||
478 | if (folio_test_dirty(folio)) | |
479 | trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus); | |
480 | else | |
481 | trace_netfs_folio(folio, netfs_folio_trace_mkwrite); | |
482 | netfs_set_group(folio, netfs_group); | |
483 | file_update_time(file); | |
484 | ret = VM_FAULT_LOCKED; | |
485 | out: | |
486 | sb_end_pagefault(inode->i_sb); | |
487 | return ret; | |
488 | } | |
489 | EXPORT_SYMBOL(netfs_page_mkwrite); | |
62c3b748 DH |
490 | |
491 | /* | |
492 | * Kill all the pages in the given range | |
493 | */ | |
494 | static void netfs_kill_pages(struct address_space *mapping, | |
495 | loff_t start, loff_t len) | |
496 | { | |
497 | struct folio *folio; | |
498 | pgoff_t index = start / PAGE_SIZE; | |
499 | pgoff_t last = (start + len - 1) / PAGE_SIZE, next; | |
500 | ||
501 | _enter("%llx-%llx", start, start + len - 1); | |
502 | ||
503 | do { | |
504 | _debug("kill %lx (to %lx)", index, last); | |
505 | ||
506 | folio = filemap_get_folio(mapping, index); | |
507 | if (IS_ERR(folio)) { | |
508 | next = index + 1; | |
509 | continue; | |
510 | } | |
511 | ||
512 | next = folio_next_index(folio); | |
513 | ||
514 | trace_netfs_folio(folio, netfs_folio_trace_kill); | |
515 | folio_clear_uptodate(folio); | |
516 | if (folio_test_fscache(folio)) | |
517 | folio_end_fscache(folio); | |
518 | folio_end_writeback(folio); | |
519 | folio_lock(folio); | |
520 | generic_error_remove_page(mapping, &folio->page); | |
521 | folio_unlock(folio); | |
522 | folio_put(folio); | |
523 | ||
524 | } while (index = next, index <= last); | |
525 | ||
526 | _leave(""); | |
527 | } | |
528 | ||
529 | /* | |
530 | * Redirty all the pages in a given range. | |
531 | */ | |
532 | static void netfs_redirty_pages(struct address_space *mapping, | |
533 | loff_t start, loff_t len) | |
534 | { | |
535 | struct folio *folio; | |
536 | pgoff_t index = start / PAGE_SIZE; | |
537 | pgoff_t last = (start + len - 1) / PAGE_SIZE, next; | |
538 | ||
539 | _enter("%llx-%llx", start, start + len - 1); | |
540 | ||
541 | do { | |
542 | _debug("redirty %llx @%llx", len, start); | |
543 | ||
544 | folio = filemap_get_folio(mapping, index); | |
545 | if (IS_ERR(folio)) { | |
546 | next = index + 1; | |
547 | continue; | |
548 | } | |
549 | ||
550 | next = folio_next_index(folio); | |
551 | trace_netfs_folio(folio, netfs_folio_trace_redirty); | |
552 | filemap_dirty_folio(mapping, folio); | |
553 | if (folio_test_fscache(folio)) | |
554 | folio_end_fscache(folio); | |
555 | folio_end_writeback(folio); | |
556 | folio_put(folio); | |
557 | } while (index = next, index <= last); | |
558 | ||
559 | balance_dirty_pages_ratelimited(mapping); | |
560 | ||
561 | _leave(""); | |
562 | } | |
563 | ||
564 | /* | |
565 | * Completion of write to server | |
566 | */ | |
567 | static void netfs_pages_written_back(struct netfs_io_request *wreq) | |
568 | { | |
569 | struct address_space *mapping = wreq->mapping; | |
570 | struct netfs_folio *finfo; | |
571 | struct netfs_group *group = NULL; | |
572 | struct folio *folio; | |
573 | pgoff_t last; | |
574 | int gcount = 0; | |
575 | ||
576 | XA_STATE(xas, &mapping->i_pages, wreq->start / PAGE_SIZE); | |
577 | ||
578 | _enter("%llx-%llx", wreq->start, wreq->start + wreq->len); | |
579 | ||
580 | rcu_read_lock(); | |
581 | ||
582 | last = (wreq->start + wreq->len - 1) / PAGE_SIZE; | |
583 | xas_for_each(&xas, folio, last) { | |
584 | WARN(!folio_test_writeback(folio), | |
585 | "bad %zx @%llx page %lx %lx\n", | |
586 | wreq->len, wreq->start, folio_index(folio), last); | |
587 | ||
588 | if ((finfo = netfs_folio_info(folio))) { | |
589 | /* Streaming writes cannot be redirtied whilst under | |
590 | * writeback, so discard the streaming record. | |
591 | */ | |
592 | folio_detach_private(folio); | |
593 | group = finfo->netfs_group; | |
594 | gcount++; | |
595 | trace_netfs_folio(folio, netfs_folio_trace_clear_s); | |
596 | kfree(finfo); | |
597 | } else if ((group = netfs_folio_group(folio))) { | |
598 | /* Need to detach the group pointer if the page didn't | |
599 | * get redirtied. If it has been redirtied, then it | |
600 | * must be within the same group. | |
601 | */ | |
602 | if (folio_test_dirty(folio)) { | |
603 | trace_netfs_folio(folio, netfs_folio_trace_redirtied); | |
604 | goto end_wb; | |
605 | } | |
606 | if (folio_trylock(folio)) { | |
607 | if (!folio_test_dirty(folio)) { | |
608 | folio_detach_private(folio); | |
609 | gcount++; | |
610 | trace_netfs_folio(folio, netfs_folio_trace_clear_g); | |
611 | } else { | |
612 | trace_netfs_folio(folio, netfs_folio_trace_redirtied); | |
613 | } | |
614 | folio_unlock(folio); | |
615 | goto end_wb; | |
616 | } | |
617 | ||
618 | xas_pause(&xas); | |
619 | rcu_read_unlock(); | |
620 | folio_lock(folio); | |
621 | if (!folio_test_dirty(folio)) { | |
622 | folio_detach_private(folio); | |
623 | gcount++; | |
624 | trace_netfs_folio(folio, netfs_folio_trace_clear_g); | |
625 | } else { | |
626 | trace_netfs_folio(folio, netfs_folio_trace_redirtied); | |
627 | } | |
628 | folio_unlock(folio); | |
629 | rcu_read_lock(); | |
630 | } else { | |
631 | trace_netfs_folio(folio, netfs_folio_trace_clear); | |
632 | } | |
633 | end_wb: | |
634 | if (folio_test_fscache(folio)) | |
635 | folio_end_fscache(folio); | |
636 | folio_end_writeback(folio); | |
637 | } | |
638 | ||
639 | rcu_read_unlock(); | |
640 | netfs_put_group_many(group, gcount); | |
641 | _leave(""); | |
642 | } | |
643 | ||
644 | /* | |
645 | * Deal with the disposition of the folios that are under writeback to close | |
646 | * out the operation. | |
647 | */ | |
648 | static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq) | |
649 | { | |
650 | struct address_space *mapping = wreq->mapping; | |
651 | ||
652 | _enter(""); | |
653 | ||
654 | switch (wreq->error) { | |
655 | case 0: | |
656 | netfs_pages_written_back(wreq); | |
657 | break; | |
658 | ||
659 | default: | |
660 | pr_notice("R=%08x Unexpected error %d\n", wreq->debug_id, wreq->error); | |
661 | fallthrough; | |
662 | case -EACCES: | |
663 | case -EPERM: | |
664 | case -ENOKEY: | |
665 | case -EKEYEXPIRED: | |
666 | case -EKEYREJECTED: | |
667 | case -EKEYREVOKED: | |
668 | case -ENETRESET: | |
669 | case -EDQUOT: | |
670 | case -ENOSPC: | |
671 | netfs_redirty_pages(mapping, wreq->start, wreq->len); | |
672 | break; | |
673 | ||
674 | case -EROFS: | |
675 | case -EIO: | |
676 | case -EREMOTEIO: | |
677 | case -EFBIG: | |
678 | case -ENOENT: | |
679 | case -ENOMEDIUM: | |
680 | case -ENXIO: | |
681 | netfs_kill_pages(mapping, wreq->start, wreq->len); | |
682 | break; | |
683 | } | |
684 | ||
685 | if (wreq->error) | |
686 | mapping_set_error(mapping, wreq->error); | |
687 | if (wreq->netfs_ops->done) | |
688 | wreq->netfs_ops->done(wreq); | |
689 | } | |
690 | ||
691 | /* | |
692 | * Extend the region to be written back to include subsequent contiguously | |
693 | * dirty pages if possible, but don't sleep while doing so. | |
694 | * | |
695 | * If this page holds new content, then we can include filler zeros in the | |
696 | * writeback. | |
697 | */ | |
698 | static void netfs_extend_writeback(struct address_space *mapping, | |
699 | struct netfs_group *group, | |
700 | struct xa_state *xas, | |
701 | long *_count, | |
702 | loff_t start, | |
703 | loff_t max_len, | |
704 | bool caching, | |
705 | size_t *_len, | |
706 | size_t *_top) | |
707 | { | |
708 | struct netfs_folio *finfo; | |
709 | struct folio_batch fbatch; | |
710 | struct folio *folio; | |
711 | unsigned int i; | |
712 | pgoff_t index = (start + *_len) / PAGE_SIZE; | |
713 | size_t len; | |
714 | void *priv; | |
715 | bool stop = true; | |
716 | ||
717 | folio_batch_init(&fbatch); | |
718 | ||
719 | do { | |
720 | /* Firstly, we gather up a batch of contiguous dirty pages | |
721 | * under the RCU read lock - but we can't clear the dirty flags | |
722 | * there if any of those pages are mapped. | |
723 | */ | |
724 | rcu_read_lock(); | |
725 | ||
726 | xas_for_each(xas, folio, ULONG_MAX) { | |
727 | stop = true; | |
728 | if (xas_retry(xas, folio)) | |
729 | continue; | |
730 | if (xa_is_value(folio)) | |
731 | break; | |
732 | if (folio_index(folio) != index) { | |
733 | xas_reset(xas); | |
734 | break; | |
735 | } | |
736 | ||
737 | if (!folio_try_get_rcu(folio)) { | |
738 | xas_reset(xas); | |
739 | continue; | |
740 | } | |
741 | ||
742 | /* Has the folio moved or been split? */ | |
743 | if (unlikely(folio != xas_reload(xas))) { | |
744 | folio_put(folio); | |
745 | xas_reset(xas); | |
746 | break; | |
747 | } | |
748 | ||
749 | if (!folio_trylock(folio)) { | |
750 | folio_put(folio); | |
751 | xas_reset(xas); | |
752 | break; | |
753 | } | |
754 | if (!folio_test_dirty(folio) || | |
755 | folio_test_writeback(folio) || | |
756 | folio_test_fscache(folio)) { | |
757 | folio_unlock(folio); | |
758 | folio_put(folio); | |
759 | xas_reset(xas); | |
760 | break; | |
761 | } | |
762 | ||
763 | stop = false; | |
764 | len = folio_size(folio); | |
765 | priv = folio_get_private(folio); | |
766 | if ((const struct netfs_group *)priv != group) { | |
767 | stop = true; | |
768 | finfo = netfs_folio_info(folio); | |
769 | if (finfo->netfs_group != group || | |
770 | finfo->dirty_offset > 0) { | |
771 | folio_unlock(folio); | |
772 | folio_put(folio); | |
773 | xas_reset(xas); | |
774 | break; | |
775 | } | |
776 | len = finfo->dirty_len; | |
777 | } | |
778 | ||
779 | *_top += folio_size(folio); | |
780 | index += folio_nr_pages(folio); | |
781 | *_count -= folio_nr_pages(folio); | |
782 | *_len += len; | |
783 | if (*_len >= max_len || *_count <= 0) | |
784 | stop = true; | |
785 | ||
786 | if (!folio_batch_add(&fbatch, folio)) | |
787 | break; | |
788 | if (stop) | |
789 | break; | |
790 | } | |
791 | ||
792 | xas_pause(xas); | |
793 | rcu_read_unlock(); | |
794 | ||
795 | /* Now, if we obtained any folios, we can shift them to being | |
796 | * writable and mark them for caching. | |
797 | */ | |
798 | if (!folio_batch_count(&fbatch)) | |
799 | break; | |
800 | ||
801 | for (i = 0; i < folio_batch_count(&fbatch); i++) { | |
802 | folio = fbatch.folios[i]; | |
803 | trace_netfs_folio(folio, netfs_folio_trace_store_plus); | |
804 | ||
805 | if (!folio_clear_dirty_for_io(folio)) | |
806 | BUG(); | |
807 | folio_start_writeback(folio); | |
808 | netfs_folio_start_fscache(caching, folio); | |
809 | folio_unlock(folio); | |
810 | } | |
811 | ||
812 | folio_batch_release(&fbatch); | |
813 | cond_resched(); | |
814 | } while (!stop); | |
815 | } | |
816 | ||
817 | /* | |
818 | * Synchronously write back the locked page and any subsequent non-locked dirty | |
819 | * pages. | |
820 | */ | |
821 | static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping, | |
822 | struct writeback_control *wbc, | |
823 | struct netfs_group *group, | |
824 | struct xa_state *xas, | |
825 | struct folio *folio, | |
826 | unsigned long long start, | |
827 | unsigned long long end) | |
828 | { | |
829 | struct netfs_io_request *wreq; | |
830 | struct netfs_folio *finfo; | |
831 | struct netfs_inode *ctx = netfs_inode(mapping->host); | |
832 | unsigned long long i_size = i_size_read(&ctx->inode); | |
833 | size_t len, max_len; | |
834 | bool caching = netfs_is_cache_enabled(ctx); | |
835 | long count = wbc->nr_to_write; | |
836 | int ret; | |
837 | ||
838 | _enter(",%lx,%llx-%llx,%u", folio_index(folio), start, end, caching); | |
839 | ||
840 | wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio), | |
841 | NETFS_WRITEBACK); | |
842 | if (IS_ERR(wreq)) { | |
843 | folio_unlock(folio); | |
844 | return PTR_ERR(wreq); | |
845 | } | |
846 | ||
847 | if (!folio_clear_dirty_for_io(folio)) | |
848 | BUG(); | |
849 | folio_start_writeback(folio); | |
850 | netfs_folio_start_fscache(caching, folio); | |
851 | ||
852 | count -= folio_nr_pages(folio); | |
853 | ||
854 | /* Find all consecutive lockable dirty pages that have contiguous | |
855 | * written regions, stopping when we find a page that is not | |
856 | * immediately lockable, is not dirty or is missing, or we reach the | |
857 | * end of the range. | |
858 | */ | |
859 | trace_netfs_folio(folio, netfs_folio_trace_store); | |
860 | ||
861 | len = wreq->len; | |
862 | finfo = netfs_folio_info(folio); | |
863 | if (finfo) { | |
864 | start += finfo->dirty_offset; | |
865 | if (finfo->dirty_offset + finfo->dirty_len != len) { | |
866 | len = finfo->dirty_len; | |
867 | goto cant_expand; | |
868 | } | |
869 | len = finfo->dirty_len; | |
870 | } | |
871 | ||
872 | if (start < i_size) { | |
873 | /* Trim the write to the EOF; the extra data is ignored. Also | |
874 | * put an upper limit on the size of a single storedata op. | |
875 | */ | |
876 | max_len = 65536 * 4096; | |
877 | max_len = min_t(unsigned long long, max_len, end - start + 1); | |
878 | max_len = min_t(unsigned long long, max_len, i_size - start); | |
879 | ||
880 | if (len < max_len) | |
881 | netfs_extend_writeback(mapping, group, xas, &count, start, | |
882 | max_len, caching, &len, &wreq->upper_len); | |
883 | } | |
884 | ||
885 | cant_expand: | |
886 | len = min_t(unsigned long long, len, i_size - start); | |
887 | ||
888 | /* We now have a contiguous set of dirty pages, each with writeback | |
889 | * set; the first page is still locked at this point, but all the rest | |
890 | * have been unlocked. | |
891 | */ | |
892 | folio_unlock(folio); | |
893 | wreq->start = start; | |
894 | wreq->len = len; | |
895 | ||
896 | if (start < i_size) { | |
897 | _debug("write back %zx @%llx [%llx]", len, start, i_size); | |
898 | ||
899 | /* Speculatively write to the cache. We have to fix this up | |
900 | * later if the store fails. | |
901 | */ | |
902 | wreq->cleanup = netfs_cleanup_buffered_write; | |
903 | ||
904 | iov_iter_xarray(&wreq->iter, ITER_SOURCE, &mapping->i_pages, start, | |
905 | wreq->upper_len); | |
906 | __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); | |
907 | ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback); | |
908 | if (ret == 0 || ret == -EIOCBQUEUED) | |
909 | wbc->nr_to_write -= len / PAGE_SIZE; | |
910 | } else { | |
911 | _debug("write discard %zx @%llx [%llx]", len, start, i_size); | |
912 | ||
913 | /* The dirty region was entirely beyond the EOF. */ | |
914 | fscache_clear_page_bits(mapping, start, len, caching); | |
915 | netfs_pages_written_back(wreq); | |
916 | ret = 0; | |
917 | } | |
918 | ||
919 | netfs_put_request(wreq, false, netfs_rreq_trace_put_return); | |
920 | _leave(" = 1"); | |
921 | return 1; | |
922 | } | |
923 | ||
924 | /* | |
925 | * Write a region of pages back to the server | |
926 | */ | |
927 | static ssize_t netfs_writepages_begin(struct address_space *mapping, | |
928 | struct writeback_control *wbc, | |
929 | struct netfs_group *group, | |
930 | struct xa_state *xas, | |
931 | unsigned long long *_start, | |
932 | unsigned long long end) | |
933 | { | |
934 | const struct netfs_folio *finfo; | |
935 | struct folio *folio; | |
936 | unsigned long long start = *_start; | |
937 | ssize_t ret; | |
938 | void *priv; | |
939 | int skips = 0; | |
940 | ||
941 | _enter("%llx,%llx,", start, end); | |
942 | ||
943 | search_again: | |
944 | /* Find the first dirty page in the group. */ | |
945 | rcu_read_lock(); | |
946 | ||
947 | for (;;) { | |
948 | folio = xas_find_marked(xas, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY); | |
949 | if (xas_retry(xas, folio) || xa_is_value(folio)) | |
950 | continue; | |
951 | if (!folio) | |
952 | break; | |
953 | ||
954 | if (!folio_try_get_rcu(folio)) { | |
955 | xas_reset(xas); | |
956 | continue; | |
957 | } | |
958 | ||
959 | if (unlikely(folio != xas_reload(xas))) { | |
960 | folio_put(folio); | |
961 | xas_reset(xas); | |
962 | continue; | |
963 | } | |
964 | ||
965 | /* Skip any dirty folio that's not in the group of interest. */ | |
966 | priv = folio_get_private(folio); | |
967 | if ((const struct netfs_group *)priv != group) { | |
968 | finfo = netfs_folio_info(folio); | |
969 | if (finfo->netfs_group != group) { | |
970 | folio_put(folio); | |
971 | continue; | |
972 | } | |
973 | } | |
974 | ||
975 | xas_pause(xas); | |
976 | break; | |
977 | } | |
978 | rcu_read_unlock(); | |
979 | if (!folio) | |
980 | return 0; | |
981 | ||
982 | start = folio_pos(folio); /* May regress with THPs */ | |
983 | ||
984 | _debug("wback %lx", folio_index(folio)); | |
985 | ||
986 | /* At this point we hold neither the i_pages lock nor the page lock: | |
987 | * the page may be truncated or invalidated (changing page->mapping to | |
988 | * NULL), or even swizzled back from swapper_space to tmpfs file | |
989 | * mapping | |
990 | */ | |
991 | lock_again: | |
992 | if (wbc->sync_mode != WB_SYNC_NONE) { | |
993 | ret = folio_lock_killable(folio); | |
994 | if (ret < 0) | |
995 | return ret; | |
996 | } else { | |
997 | if (!folio_trylock(folio)) | |
998 | goto search_again; | |
999 | } | |
1000 | ||
1001 | if (folio->mapping != mapping || | |
1002 | !folio_test_dirty(folio)) { | |
1003 | start += folio_size(folio); | |
1004 | folio_unlock(folio); | |
1005 | goto search_again; | |
1006 | } | |
1007 | ||
1008 | if (folio_test_writeback(folio) || | |
1009 | folio_test_fscache(folio)) { | |
1010 | folio_unlock(folio); | |
1011 | if (wbc->sync_mode != WB_SYNC_NONE) { | |
1012 | folio_wait_writeback(folio); | |
1013 | #ifdef CONFIG_NETFS_FSCACHE | |
1014 | folio_wait_fscache(folio); | |
1015 | #endif | |
1016 | goto lock_again; | |
1017 | } | |
1018 | ||
1019 | start += folio_size(folio); | |
1020 | if (wbc->sync_mode == WB_SYNC_NONE) { | |
1021 | if (skips >= 5 || need_resched()) { | |
1022 | ret = 0; | |
1023 | goto out; | |
1024 | } | |
1025 | skips++; | |
1026 | } | |
1027 | goto search_again; | |
1028 | } | |
1029 | ||
1030 | ret = netfs_write_back_from_locked_folio(mapping, wbc, group, xas, | |
1031 | folio, start, end); | |
1032 | out: | |
1033 | if (ret > 0) | |
1034 | *_start = start + ret; | |
1035 | _leave(" = %zd [%llx]", ret, *_start); | |
1036 | return ret; | |
1037 | } | |
1038 | ||
1039 | /* | |
1040 | * Write a region of pages back to the server | |
1041 | */ | |
1042 | static int netfs_writepages_region(struct address_space *mapping, | |
1043 | struct writeback_control *wbc, | |
1044 | struct netfs_group *group, | |
1045 | unsigned long long *_start, | |
1046 | unsigned long long end) | |
1047 | { | |
1048 | ssize_t ret; | |
1049 | ||
1050 | XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE); | |
1051 | ||
1052 | do { | |
1053 | ret = netfs_writepages_begin(mapping, wbc, group, &xas, | |
1054 | _start, end); | |
1055 | if (ret > 0 && wbc->nr_to_write > 0) | |
1056 | cond_resched(); | |
1057 | } while (ret > 0 && wbc->nr_to_write > 0); | |
1058 | ||
1059 | return ret > 0 ? 0 : ret; | |
1060 | } | |
1061 | ||
1062 | /* | |
1063 | * write some of the pending data back to the server | |
1064 | */ | |
1065 | int netfs_writepages(struct address_space *mapping, | |
1066 | struct writeback_control *wbc) | |
1067 | { | |
1068 | struct netfs_group *group = NULL; | |
1069 | loff_t start, end; | |
1070 | int ret; | |
1071 | ||
1072 | _enter(""); | |
1073 | ||
1074 | /* We have to be careful as we can end up racing with setattr() | |
1075 | * truncating the pagecache since the caller doesn't take a lock here | |
1076 | * to prevent it. | |
1077 | */ | |
1078 | ||
1079 | if (wbc->range_cyclic && mapping->writeback_index) { | |
1080 | start = mapping->writeback_index * PAGE_SIZE; | |
1081 | ret = netfs_writepages_region(mapping, wbc, group, | |
1082 | &start, LLONG_MAX); | |
1083 | if (ret < 0) | |
1084 | goto out; | |
1085 | ||
1086 | if (wbc->nr_to_write <= 0) { | |
1087 | mapping->writeback_index = start / PAGE_SIZE; | |
1088 | goto out; | |
1089 | } | |
1090 | ||
1091 | start = 0; | |
1092 | end = mapping->writeback_index * PAGE_SIZE; | |
1093 | mapping->writeback_index = 0; | |
1094 | ret = netfs_writepages_region(mapping, wbc, group, &start, end); | |
1095 | if (ret == 0) | |
1096 | mapping->writeback_index = start / PAGE_SIZE; | |
1097 | } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { | |
1098 | start = 0; | |
1099 | ret = netfs_writepages_region(mapping, wbc, group, | |
1100 | &start, LLONG_MAX); | |
1101 | if (wbc->nr_to_write > 0 && ret == 0) | |
1102 | mapping->writeback_index = start / PAGE_SIZE; | |
1103 | } else { | |
1104 | start = wbc->range_start; | |
1105 | ret = netfs_writepages_region(mapping, wbc, group, | |
1106 | &start, wbc->range_end); | |
1107 | } | |
1108 | ||
1109 | out: | |
1110 | _leave(" = %d", ret); | |
1111 | return ret; | |
1112 | } | |
1113 | EXPORT_SYMBOL(netfs_writepages); | |
4a79616c DH |
1114 | |
1115 | /* | |
1116 | * Deal with the disposition of a laundered folio. | |
1117 | */ | |
1118 | static void netfs_cleanup_launder_folio(struct netfs_io_request *wreq) | |
1119 | { | |
1120 | if (wreq->error) { | |
1121 | pr_notice("R=%08x Laundering error %d\n", wreq->debug_id, wreq->error); | |
1122 | mapping_set_error(wreq->mapping, wreq->error); | |
1123 | } | |
1124 | } | |
1125 | ||
1126 | /** | |
1127 | * netfs_launder_folio - Clean up a dirty folio that's being invalidated | |
1128 | * @folio: The folio to clean | |
1129 | * | |
1130 | * This is called to write back a folio that's being invalidated when an inode | |
1131 | * is getting torn down. Ideally, writepages would be used instead. | |
1132 | */ | |
1133 | int netfs_launder_folio(struct folio *folio) | |
1134 | { | |
1135 | struct netfs_io_request *wreq; | |
1136 | struct address_space *mapping = folio->mapping; | |
1137 | struct netfs_folio *finfo = netfs_folio_info(folio); | |
1138 | struct netfs_group *group = netfs_folio_group(folio); | |
1139 | struct bio_vec bvec; | |
1140 | unsigned long long i_size = i_size_read(mapping->host); | |
1141 | unsigned long long start = folio_pos(folio); | |
1142 | size_t offset = 0, len; | |
1143 | int ret = 0; | |
1144 | ||
1145 | if (finfo) { | |
1146 | offset = finfo->dirty_offset; | |
1147 | start += offset; | |
1148 | len = finfo->dirty_len; | |
1149 | } else { | |
1150 | len = folio_size(folio); | |
1151 | } | |
1152 | len = min_t(unsigned long long, len, i_size - start); | |
1153 | ||
1154 | wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE); | |
1155 | if (IS_ERR(wreq)) { | |
1156 | ret = PTR_ERR(wreq); | |
1157 | goto out; | |
1158 | } | |
1159 | ||
1160 | if (!folio_clear_dirty_for_io(folio)) | |
1161 | goto out_put; | |
1162 | ||
1163 | trace_netfs_folio(folio, netfs_folio_trace_launder); | |
1164 | ||
1165 | _debug("launder %llx-%llx", start, start + len - 1); | |
1166 | ||
1167 | /* Speculatively write to the cache. We have to fix this up later if | |
1168 | * the store fails. | |
1169 | */ | |
1170 | wreq->cleanup = netfs_cleanup_launder_folio; | |
1171 | ||
1172 | bvec_set_folio(&bvec, folio, len, offset); | |
1173 | iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len); | |
1174 | __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); | |
1175 | ret = netfs_begin_write(wreq, true, netfs_write_trace_launder); | |
1176 | ||
1177 | out_put: | |
1178 | folio_detach_private(folio); | |
1179 | netfs_put_group(group); | |
1180 | kfree(finfo); | |
1181 | netfs_put_request(wreq, false, netfs_rreq_trace_put_return); | |
1182 | out: | |
1183 | folio_wait_fscache(folio); | |
1184 | _leave(" = %d", ret); | |
1185 | return ret; | |
1186 | } | |
1187 | EXPORT_SYMBOL(netfs_launder_folio); |