Commit | Line | Data |
---|---|---|
c38f4e96 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
d41ca44c | 2 | /* Network filesystem high-level buffered write support. |
c38f4e96 DH |
3 | * |
4 | * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
6 | */ | |
7 | ||
8 | #include <linux/export.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/pagemap.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/pagevec.h> | |
14 | #include "internal.h" | |
15 | ||
16 | /* | |
17 | * Determined write method. Adjust netfs_folio_traces if this is changed. | |
18 | */ | |
19 | enum netfs_how_to_modify { | |
20 | NETFS_FOLIO_IS_UPTODATE, /* Folio is uptodate already */ | |
21 | NETFS_JUST_PREFETCH, /* We have to read the folio anyway */ | |
22 | NETFS_WHOLE_FOLIO_MODIFY, /* We're going to overwrite the whole folio */ | |
23 | NETFS_MODIFY_AND_CLEAR, /* We can assume there is no data to be downloaded. */ | |
24 | NETFS_STREAMING_WRITE, /* Store incomplete data in non-uptodate page. */ | |
25 | NETFS_STREAMING_WRITE_CONT, /* Continue streaming write. */ | |
26 | NETFS_FLUSH_CONTENT, /* Flush incompatible content. */ | |
27 | }; | |
28 | ||
29 | static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) | |
30 | { | |
2ff1e975 | 31 | void *priv = folio_get_private(folio); |
c38f4e96 | 32 | |
2ff1e975 DH |
33 | if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE)) |
34 | folio_attach_private(folio, netfs_get_group(netfs_group)); | |
35 | else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE) | |
36 | folio_detach_private(folio); | |
62c3b748 | 37 | } |
62c3b748 | 38 | |
c38f4e96 DH |
39 | /* |
40 | * Decide how we should modify a folio. We might be attempting to do | |
41 | * write-streaming, in which case we don't want to a local RMW cycle if we can | |
42 | * avoid it. If we're doing local caching or content crypto, we award that | |
43 | * priority over avoiding RMW. If the file is open readably, then we also | |
44 | * assume that we may want to read what we wrote. | |
45 | */ | |
46 | static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx, | |
47 | struct file *file, | |
48 | struct folio *folio, | |
49 | void *netfs_group, | |
50 | size_t flen, | |
51 | size_t offset, | |
52 | size_t len, | |
53 | bool maybe_trouble) | |
54 | { | |
55 | struct netfs_folio *finfo = netfs_folio_info(folio); | |
2ff1e975 | 56 | struct netfs_group *group = netfs_folio_group(folio); |
c38f4e96 DH |
57 | loff_t pos = folio_file_pos(folio); |
58 | ||
59 | _enter(""); | |
60 | ||
2ff1e975 | 61 | if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) |
c38f4e96 DH |
62 | return NETFS_FLUSH_CONTENT; |
63 | ||
64 | if (folio_test_uptodate(folio)) | |
65 | return NETFS_FOLIO_IS_UPTODATE; | |
66 | ||
100ccd18 | 67 | if (pos >= ctx->zero_point) |
c38f4e96 DH |
68 | return NETFS_MODIFY_AND_CLEAR; |
69 | ||
70 | if (!maybe_trouble && offset == 0 && len >= flen) | |
71 | return NETFS_WHOLE_FOLIO_MODIFY; | |
72 | ||
73 | if (file->f_mode & FMODE_READ) | |
92a714d7 | 74 | goto no_write_streaming; |
92a714d7 DH |
75 | |
76 | if (netfs_is_cache_enabled(ctx)) { | |
77 | /* We don't want to get a streaming write on a file that loses | |
78 | * caching service temporarily because the backing store got | |
79 | * culled. | |
80 | */ | |
92a714d7 DH |
81 | goto no_write_streaming; |
82 | } | |
c38f4e96 DH |
83 | |
84 | if (!finfo) | |
85 | return NETFS_STREAMING_WRITE; | |
86 | ||
87 | /* We can continue a streaming write only if it continues on from the | |
88 | * previous. If it overlaps, we must flush lest we suffer a partial | |
89 | * copy and disjoint dirty regions. | |
90 | */ | |
91 | if (offset == finfo->dirty_offset + finfo->dirty_len) | |
92 | return NETFS_STREAMING_WRITE_CONT; | |
93 | return NETFS_FLUSH_CONTENT; | |
92a714d7 DH |
94 | |
95 | no_write_streaming: | |
96 | if (finfo) { | |
97 | netfs_stat(&netfs_n_wh_wstream_conflict); | |
98 | return NETFS_FLUSH_CONTENT; | |
99 | } | |
100 | return NETFS_JUST_PREFETCH; | |
c38f4e96 DH |
101 | } |
102 | ||
103 | /* | |
e2e2e839 DH |
104 | * Grab a folio for writing and lock it. Attempt to allocate as large a folio |
105 | * as possible to hold as much of the remaining length as possible in one go. | |
c38f4e96 DH |
106 | */ |
107 | static struct folio *netfs_grab_folio_for_write(struct address_space *mapping, | |
108 | loff_t pos, size_t part) | |
109 | { | |
110 | pgoff_t index = pos / PAGE_SIZE; | |
e2e2e839 | 111 | fgf_t fgp_flags = FGP_WRITEBEGIN; |
c38f4e96 | 112 | |
e2e2e839 DH |
113 | if (mapping_large_folio_support(mapping)) |
114 | fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part); | |
115 | ||
116 | return __filemap_get_folio(mapping, index, fgp_flags, | |
c38f4e96 DH |
117 | mapping_gfp_mask(mapping)); |
118 | } | |
119 | ||
5f24162f DH |
120 | /* |
121 | * Update i_size and estimate the update to i_blocks to reflect the additional | |
122 | * data written into the pagecache until we can find out from the server what | |
123 | * the values actually are. | |
124 | */ | |
125 | static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode, | |
126 | loff_t i_size, loff_t pos, size_t copied) | |
127 | { | |
128 | blkcnt_t add; | |
129 | size_t gap; | |
130 | ||
131 | if (ctx->ops->update_i_size) { | |
132 | ctx->ops->update_i_size(inode, pos); | |
133 | return; | |
134 | } | |
135 | ||
136 | i_size_write(inode, pos); | |
137 | #if IS_ENABLED(CONFIG_FSCACHE) | |
138 | fscache_update_cookie(ctx->cache, NULL, &pos); | |
139 | #endif | |
140 | ||
141 | gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1)); | |
142 | if (copied > gap) { | |
143 | add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE); | |
144 | ||
145 | inode->i_blocks = min_t(blkcnt_t, | |
146 | DIV_ROUND_UP(pos, SECTOR_SIZE), | |
147 | inode->i_blocks + add); | |
148 | } | |
149 | } | |
150 | ||
c38f4e96 DH |
151 | /** |
152 | * netfs_perform_write - Copy data into the pagecache. | |
153 | * @iocb: The operation parameters | |
154 | * @iter: The source buffer | |
155 | * @netfs_group: Grouping for dirty pages (eg. ceph snaps). | |
156 | * | |
157 | * Copy data into pagecache pages attached to the inode specified by @iocb. | |
158 | * The caller must hold appropriate inode locks. | |
159 | * | |
160 | * Dirty pages are tagged with a netfs_folio struct if they're not up to date | |
161 | * to indicate the range modified. Dirty pages may also be tagged with a | |
162 | * netfs-specific grouping such that data from an old group gets flushed before | |
163 | * a new one is started. | |
164 | */ | |
165 | ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, | |
166 | struct netfs_group *netfs_group) | |
167 | { | |
168 | struct file *file = iocb->ki_filp; | |
169 | struct inode *inode = file_inode(file); | |
170 | struct address_space *mapping = inode->i_mapping; | |
171 | struct netfs_inode *ctx = netfs_inode(inode); | |
41d8e767 DH |
172 | struct writeback_control wbc = { |
173 | .sync_mode = WB_SYNC_NONE, | |
174 | .for_sync = true, | |
175 | .nr_to_write = LONG_MAX, | |
176 | .range_start = iocb->ki_pos, | |
177 | .range_end = iocb->ki_pos + iter->count, | |
178 | }; | |
179 | struct netfs_io_request *wreq = NULL; | |
c38f4e96 | 180 | struct netfs_folio *finfo; |
2df86547 | 181 | struct folio *folio, *writethrough = NULL; |
c38f4e96 DH |
182 | enum netfs_how_to_modify howto; |
183 | enum netfs_folio_trace trace; | |
184 | unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC; | |
619606a7 | 185 | ssize_t written = 0, ret, ret2; |
c38f4e96 DH |
186 | loff_t i_size, pos = iocb->ki_pos, from, to; |
187 | size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER; | |
188 | bool maybe_trouble = false; | |
189 | ||
41d8e767 DH |
190 | if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) || |
191 | iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) | |
192 | ) { | |
41d8e767 DH |
193 | wbc_attach_fdatawrite_inode(&wbc, mapping->host); |
194 | ||
c97f59e2 DH |
195 | ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count); |
196 | if (ret < 0) { | |
197 | wbc_detach_inode(&wbc); | |
198 | goto out; | |
199 | } | |
200 | ||
41d8e767 DH |
201 | wreq = netfs_begin_writethrough(iocb, iter->count); |
202 | if (IS_ERR(wreq)) { | |
203 | wbc_detach_inode(&wbc); | |
204 | ret = PTR_ERR(wreq); | |
205 | wreq = NULL; | |
206 | goto out; | |
207 | } | |
208 | if (!is_sync_kiocb(iocb)) | |
209 | wreq->iocb = iocb; | |
4824e591 DH |
210 | netfs_stat(&netfs_n_wh_writethrough); |
211 | } else { | |
212 | netfs_stat(&netfs_n_wh_buffered_write); | |
41d8e767 DH |
213 | } |
214 | ||
c38f4e96 DH |
215 | do { |
216 | size_t flen; | |
217 | size_t offset; /* Offset into pagecache folio */ | |
218 | size_t part; /* Bytes to write to folio */ | |
219 | size_t copied; /* Bytes copied from user */ | |
220 | ||
221 | ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags); | |
222 | if (unlikely(ret < 0)) | |
223 | break; | |
224 | ||
225 | offset = pos & (max_chunk - 1); | |
226 | part = min(max_chunk - offset, iov_iter_count(iter)); | |
227 | ||
228 | /* Bring in the user pages that we will copy from _first_ lest | |
229 | * we hit a nasty deadlock on copying from the same page as | |
230 | * we're writing to, without it being marked uptodate. | |
231 | * | |
232 | * Not only is this an optimisation, but it is also required to | |
233 | * check that the address is actually valid, when atomic | |
234 | * usercopies are used below. | |
235 | * | |
236 | * We rely on the page being held onto long enough by the LRU | |
237 | * that we can grab it below if this causes it to be read. | |
238 | */ | |
239 | ret = -EFAULT; | |
240 | if (unlikely(fault_in_iov_iter_readable(iter, part) == part)) | |
241 | break; | |
242 | ||
c38f4e96 | 243 | folio = netfs_grab_folio_for_write(mapping, pos, part); |
843609df DC |
244 | if (IS_ERR(folio)) { |
245 | ret = PTR_ERR(folio); | |
c38f4e96 | 246 | break; |
843609df | 247 | } |
c38f4e96 DH |
248 | |
249 | flen = folio_size(folio); | |
250 | offset = pos & (flen - 1); | |
251 | part = min_t(size_t, flen - offset, part); | |
252 | ||
2df86547 DH |
253 | /* Wait for writeback to complete. The writeback engine owns |
254 | * the info in folio->private and may change it until it | |
255 | * removes the WB mark. | |
256 | */ | |
257 | if (folio_get_private(folio) && | |
258 | folio_wait_writeback_killable(folio)) { | |
259 | ret = written ? -EINTR : -ERESTARTSYS; | |
260 | goto error_folio_unlock; | |
261 | } | |
262 | ||
c38f4e96 DH |
263 | if (signal_pending(current)) { |
264 | ret = written ? -EINTR : -ERESTARTSYS; | |
265 | goto error_folio_unlock; | |
266 | } | |
267 | ||
268 | /* See if we need to prefetch the area we're going to modify. | |
269 | * We need to do this before we get a lock on the folio in case | |
270 | * there's more than one writer competing for the same cache | |
271 | * block. | |
272 | */ | |
273 | howto = netfs_how_to_modify(ctx, file, folio, netfs_group, | |
274 | flen, offset, part, maybe_trouble); | |
275 | _debug("howto %u", howto); | |
276 | switch (howto) { | |
277 | case NETFS_JUST_PREFETCH: | |
278 | ret = netfs_prefetch_for_write(file, folio, offset, part); | |
279 | if (ret < 0) { | |
280 | _debug("prefetch = %zd", ret); | |
281 | goto error_folio_unlock; | |
282 | } | |
283 | break; | |
284 | case NETFS_FOLIO_IS_UPTODATE: | |
285 | case NETFS_WHOLE_FOLIO_MODIFY: | |
286 | case NETFS_STREAMING_WRITE_CONT: | |
287 | break; | |
288 | case NETFS_MODIFY_AND_CLEAR: | |
289 | zero_user_segment(&folio->page, 0, offset); | |
290 | break; | |
291 | case NETFS_STREAMING_WRITE: | |
292 | ret = -EIO; | |
293 | if (WARN_ON(folio_get_private(folio))) | |
294 | goto error_folio_unlock; | |
295 | break; | |
296 | case NETFS_FLUSH_CONTENT: | |
297 | trace_netfs_folio(folio, netfs_flush_content); | |
298 | from = folio_pos(folio); | |
299 | to = from + folio_size(folio) - 1; | |
300 | folio_unlock(folio); | |
301 | folio_put(folio); | |
302 | ret = filemap_write_and_wait_range(mapping, from, to); | |
303 | if (ret < 0) | |
304 | goto error_folio_unlock; | |
305 | continue; | |
306 | } | |
307 | ||
308 | if (mapping_writably_mapped(mapping)) | |
309 | flush_dcache_folio(folio); | |
310 | ||
311 | copied = copy_folio_from_iter_atomic(folio, offset, part, iter); | |
312 | ||
313 | flush_dcache_folio(folio); | |
314 | ||
315 | /* Deal with a (partially) failed copy */ | |
316 | if (copied == 0) { | |
317 | ret = -EFAULT; | |
318 | goto error_folio_unlock; | |
319 | } | |
320 | ||
321 | trace = (enum netfs_folio_trace)howto; | |
322 | switch (howto) { | |
323 | case NETFS_FOLIO_IS_UPTODATE: | |
324 | case NETFS_JUST_PREFETCH: | |
325 | netfs_set_group(folio, netfs_group); | |
326 | break; | |
327 | case NETFS_MODIFY_AND_CLEAR: | |
328 | zero_user_segment(&folio->page, offset + copied, flen); | |
329 | netfs_set_group(folio, netfs_group); | |
330 | folio_mark_uptodate(folio); | |
331 | break; | |
332 | case NETFS_WHOLE_FOLIO_MODIFY: | |
333 | if (unlikely(copied < part)) { | |
334 | maybe_trouble = true; | |
335 | iov_iter_revert(iter, copied); | |
336 | copied = 0; | |
2df86547 | 337 | folio_unlock(folio); |
c38f4e96 DH |
338 | goto retry; |
339 | } | |
340 | netfs_set_group(folio, netfs_group); | |
341 | folio_mark_uptodate(folio); | |
342 | break; | |
343 | case NETFS_STREAMING_WRITE: | |
344 | if (offset == 0 && copied == flen) { | |
345 | netfs_set_group(folio, netfs_group); | |
346 | folio_mark_uptodate(folio); | |
347 | trace = netfs_streaming_filled_page; | |
348 | break; | |
349 | } | |
350 | finfo = kzalloc(sizeof(*finfo), GFP_KERNEL); | |
351 | if (!finfo) { | |
352 | iov_iter_revert(iter, copied); | |
353 | ret = -ENOMEM; | |
354 | goto error_folio_unlock; | |
355 | } | |
356 | finfo->netfs_group = netfs_get_group(netfs_group); | |
357 | finfo->dirty_offset = offset; | |
358 | finfo->dirty_len = copied; | |
359 | folio_attach_private(folio, (void *)((unsigned long)finfo | | |
360 | NETFS_FOLIO_INFO)); | |
361 | break; | |
362 | case NETFS_STREAMING_WRITE_CONT: | |
363 | finfo = netfs_folio_info(folio); | |
364 | finfo->dirty_len += copied; | |
365 | if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) { | |
366 | if (finfo->netfs_group) | |
367 | folio_change_private(folio, finfo->netfs_group); | |
368 | else | |
369 | folio_detach_private(folio); | |
370 | folio_mark_uptodate(folio); | |
371 | kfree(finfo); | |
372 | trace = netfs_streaming_cont_filled_page; | |
373 | } | |
374 | break; | |
375 | default: | |
376 | WARN(true, "Unexpected modify type %u ix=%lx\n", | |
202bc57b | 377 | howto, folio->index); |
c38f4e96 DH |
378 | ret = -EIO; |
379 | goto error_folio_unlock; | |
380 | } | |
381 | ||
382 | trace_netfs_folio(folio, trace); | |
383 | ||
384 | /* Update the inode size if we moved the EOF marker */ | |
c38f4e96 | 385 | pos += copied; |
5f24162f DH |
386 | i_size = i_size_read(inode); |
387 | if (pos > i_size) | |
388 | netfs_update_i_size(ctx, inode, i_size, pos, copied); | |
c38f4e96 DH |
389 | written += copied; |
390 | ||
41d8e767 DH |
391 | if (likely(!wreq)) { |
392 | folio_mark_dirty(folio); | |
2df86547 | 393 | folio_unlock(folio); |
41d8e767 | 394 | } else { |
2df86547 DH |
395 | netfs_advance_writethrough(wreq, &wbc, folio, copied, |
396 | offset + copied == flen, | |
397 | &writethrough); | |
398 | /* Folio unlocked */ | |
41d8e767 | 399 | } |
c38f4e96 | 400 | retry: |
c38f4e96 DH |
401 | folio_put(folio); |
402 | folio = NULL; | |
403 | ||
404 | cond_resched(); | |
405 | } while (iov_iter_count(iter)); | |
406 | ||
407 | out: | |
69c3c023 DH |
408 | if (likely(written) && ctx->ops->post_modify) |
409 | ctx->ops->post_modify(inode); | |
410 | ||
41d8e767 | 411 | if (unlikely(wreq)) { |
2df86547 | 412 | ret2 = netfs_end_writethrough(wreq, &wbc, writethrough); |
41d8e767 | 413 | wbc_detach_inode(&wbc); |
619606a7 DH |
414 | if (ret2 == -EIOCBQUEUED) |
415 | return ret2; | |
416 | if (ret == 0) | |
417 | ret = ret2; | |
c38f4e96 DH |
418 | } |
419 | ||
41d8e767 | 420 | iocb->ki_pos += written; |
c38f4e96 DH |
421 | _leave(" = %zd [%zd]", written, ret); |
422 | return written ? written : ret; | |
423 | ||
424 | error_folio_unlock: | |
425 | folio_unlock(folio); | |
426 | folio_put(folio); | |
427 | goto out; | |
428 | } | |
429 | EXPORT_SYMBOL(netfs_perform_write); | |
938e13a7 DH |
430 | |
431 | /** | |
432 | * netfs_buffered_write_iter_locked - write data to a file | |
433 | * @iocb: IO state structure (file, offset, etc.) | |
434 | * @from: iov_iter with data to write | |
435 | * @netfs_group: Grouping for dirty pages (eg. ceph snaps). | |
436 | * | |
437 | * This function does all the work needed for actually writing data to a | |
438 | * file. It does all basic checks, removes SUID from the file, updates | |
439 | * modification times and calls proper subroutines depending on whether we | |
440 | * do direct IO or a standard buffered write. | |
441 | * | |
442 | * The caller must hold appropriate locks around this function and have called | |
443 | * generic_write_checks() already. The caller is also responsible for doing | |
444 | * any necessary syncing afterwards. | |
445 | * | |
446 | * This function does *not* take care of syncing data in case of O_SYNC write. | |
447 | * A caller has to handle it. This is mainly due to the fact that we want to | |
448 | * avoid syncing under i_rwsem. | |
449 | * | |
450 | * Return: | |
451 | * * number of bytes written, even for truncated writes | |
452 | * * negative error code if no data has been written at all | |
453 | */ | |
454 | ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from, | |
455 | struct netfs_group *netfs_group) | |
456 | { | |
457 | struct file *file = iocb->ki_filp; | |
458 | ssize_t ret; | |
459 | ||
460 | trace_netfs_write_iter(iocb, from); | |
461 | ||
462 | ret = file_remove_privs(file); | |
463 | if (ret) | |
464 | return ret; | |
465 | ||
466 | ret = file_update_time(file); | |
467 | if (ret) | |
468 | return ret; | |
469 | ||
470 | return netfs_perform_write(iocb, from, netfs_group); | |
471 | } | |
472 | EXPORT_SYMBOL(netfs_buffered_write_iter_locked); | |
473 | ||
474 | /** | |
475 | * netfs_file_write_iter - write data to a file | |
476 | * @iocb: IO state structure | |
477 | * @from: iov_iter with data to write | |
478 | * | |
479 | * Perform a write to a file, writing into the pagecache if possible and doing | |
480 | * an unbuffered write instead if not. | |
481 | * | |
482 | * Return: | |
483 | * * Negative error code if no data has been written at all of | |
484 | * vfs_fsync_range() failed for a synchronous write | |
485 | * * Number of bytes written, even for truncated writes | |
486 | */ | |
487 | ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |
488 | { | |
489 | struct file *file = iocb->ki_filp; | |
490 | struct inode *inode = file->f_mapping->host; | |
491 | struct netfs_inode *ictx = netfs_inode(inode); | |
492 | ssize_t ret; | |
493 | ||
494 | _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode)); | |
495 | ||
ca9ca1a5 DH |
496 | if (!iov_iter_count(from)) |
497 | return 0; | |
498 | ||
938e13a7 DH |
499 | if ((iocb->ki_flags & IOCB_DIRECT) || |
500 | test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags)) | |
501 | return netfs_unbuffered_write_iter(iocb, from); | |
502 | ||
503 | ret = netfs_start_io_write(inode); | |
504 | if (ret < 0) | |
505 | return ret; | |
506 | ||
507 | ret = generic_write_checks(iocb, from); | |
508 | if (ret > 0) | |
509 | ret = netfs_buffered_write_iter_locked(iocb, from, NULL); | |
510 | netfs_end_io_write(inode); | |
511 | if (ret > 0) | |
512 | ret = generic_write_sync(iocb, ret); | |
513 | return ret; | |
514 | } | |
515 | EXPORT_SYMBOL(netfs_file_write_iter); | |
102a7e2c DH |
516 | |
517 | /* | |
518 | * Notification that a previously read-only page is about to become writable. | |
519 | * Note that the caller indicates a single page of a multipage folio. | |
520 | */ | |
521 | vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group) | |
522 | { | |
2ff1e975 | 523 | struct netfs_group *group; |
102a7e2c DH |
524 | struct folio *folio = page_folio(vmf->page); |
525 | struct file *file = vmf->vma->vm_file; | |
526 | struct inode *inode = file_inode(file); | |
69c3c023 | 527 | struct netfs_inode *ictx = netfs_inode(inode); |
102a7e2c DH |
528 | vm_fault_t ret = VM_FAULT_RETRY; |
529 | int err; | |
530 | ||
531 | _enter("%lx", folio->index); | |
532 | ||
533 | sb_start_pagefault(inode->i_sb); | |
534 | ||
2df86547 | 535 | if (folio_lock_killable(folio) < 0) |
102a7e2c DH |
536 | goto out; |
537 | ||
2df86547 DH |
538 | if (folio_wait_writeback_killable(folio)) { |
539 | ret = VM_FAULT_LOCKED; | |
102a7e2c | 540 | goto out; |
2df86547 | 541 | } |
102a7e2c DH |
542 | |
543 | /* Can we see a streaming write here? */ | |
544 | if (WARN_ON(!folio_test_uptodate(folio))) { | |
545 | ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED; | |
546 | goto out; | |
547 | } | |
548 | ||
2ff1e975 DH |
549 | group = netfs_folio_group(folio); |
550 | if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) { | |
102a7e2c DH |
551 | folio_unlock(folio); |
552 | err = filemap_fdatawait_range(inode->i_mapping, | |
553 | folio_pos(folio), | |
554 | folio_pos(folio) + folio_size(folio)); | |
555 | switch (err) { | |
556 | case 0: | |
557 | ret = VM_FAULT_RETRY; | |
558 | goto out; | |
559 | case -ENOMEM: | |
560 | ret = VM_FAULT_OOM; | |
561 | goto out; | |
562 | default: | |
563 | ret = VM_FAULT_SIGBUS; | |
564 | goto out; | |
565 | } | |
566 | } | |
567 | ||
568 | if (folio_test_dirty(folio)) | |
569 | trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus); | |
570 | else | |
571 | trace_netfs_folio(folio, netfs_folio_trace_mkwrite); | |
572 | netfs_set_group(folio, netfs_group); | |
573 | file_update_time(file); | |
69c3c023 DH |
574 | if (ictx->ops->post_modify) |
575 | ictx->ops->post_modify(inode); | |
102a7e2c DH |
576 | ret = VM_FAULT_LOCKED; |
577 | out: | |
578 | sb_end_pagefault(inode->i_sb); | |
579 | return ret; | |
580 | } | |
581 | EXPORT_SYMBOL(netfs_page_mkwrite); |