Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
31143d5d DH |
2 | /* handling of writes to regular files and writing back to the server |
3 | * | |
4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
31143d5d | 6 | */ |
4343d008 | 7 | |
4af3c9cc | 8 | #include <linux/backing-dev.h> |
31143d5d DH |
9 | #include <linux/slab.h> |
10 | #include <linux/fs.h> | |
11 | #include <linux/pagemap.h> | |
12 | #include <linux/writeback.h> | |
13 | #include <linux/pagevec.h> | |
3003bbd0 | 14 | #include <linux/netfs.h> |
31143d5d DH |
15 | #include "internal.h" |
16 | ||
c7f75ef3 DH |
17 | static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len, |
18 | loff_t i_size, bool caching); | |
19 | ||
20 | #ifdef CONFIG_AFS_FSCACHE | |
31143d5d | 21 | /* |
c7f75ef3 DH |
22 | * Mark a page as having been made dirty and thus needing writeback. We also |
23 | * need to pin the cache object to write back to. | |
31143d5d | 24 | */ |
8fb72b4a | 25 | bool afs_dirty_folio(struct address_space *mapping, struct folio *folio) |
31143d5d | 26 | { |
8fb72b4a MWO |
27 | return fscache_dirty_folio(mapping, folio, |
28 | afs_vnode_cache(AFS_FS_I(mapping->host))); | |
c7f75ef3 DH |
29 | } |
30 | static void afs_folio_start_fscache(bool caching, struct folio *folio) | |
31 | { | |
32 | if (caching) | |
33 | folio_start_fscache(folio); | |
34 | } | |
35 | #else | |
36 | static void afs_folio_start_fscache(bool caching, struct folio *folio) | |
37 | { | |
31143d5d | 38 | } |
c7f75ef3 | 39 | #endif |
31143d5d | 40 | |
31143d5d DH |
41 | /* |
42 | * prepare to perform part of a write to a page | |
31143d5d | 43 | */ |
15b4650e NP |
44 | int afs_write_begin(struct file *file, struct address_space *mapping, |
45 | loff_t pos, unsigned len, unsigned flags, | |
21db2cdc | 46 | struct page **_page, void **fsdata) |
31143d5d | 47 | { |
496ad9aa | 48 | struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); |
78525c74 | 49 | struct folio *folio; |
4343d008 | 50 | unsigned long priv; |
e87b03f5 DH |
51 | unsigned f, from; |
52 | unsigned t, to; | |
53 | pgoff_t index; | |
31143d5d DH |
54 | int ret; |
55 | ||
e87b03f5 DH |
56 | _enter("{%llx:%llu},%llx,%x", |
57 | vnode->fid.vid, vnode->fid.vnode, pos, len); | |
31143d5d | 58 | |
3003bbd0 DH |
59 | /* Prefetch area to be written into the cache if we're caching this |
60 | * file. We need to do this before we get a lock on the page in case | |
61 | * there's more than one writer competing for the same cache block. | |
62 | */ | |
78525c74 | 63 | ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata, |
3003bbd0 DH |
64 | &afs_req_ops, NULL); |
65 | if (ret < 0) | |
66 | return ret; | |
630f5dda | 67 | |
78525c74 | 68 | index = folio_index(folio); |
e87b03f5 DH |
69 | from = pos - index * PAGE_SIZE; |
70 | to = from + len; | |
71 | ||
31143d5d | 72 | try_again: |
4343d008 DH |
73 | /* See if this page is already partially written in a way that we can |
74 | * merge the new write with. | |
75 | */ | |
78525c74 DH |
76 | if (folio_test_private(folio)) { |
77 | priv = (unsigned long)folio_get_private(folio); | |
78 | f = afs_folio_dirty_from(folio, priv); | |
79 | t = afs_folio_dirty_to(folio, priv); | |
4343d008 | 80 | ASSERTCMP(f, <=, t); |
31143d5d | 81 | |
78525c74 DH |
82 | if (folio_test_writeback(folio)) { |
83 | trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio); | |
5a039c32 DH |
84 | goto flush_conflicting_write; |
85 | } | |
5a813276 DH |
86 | /* If the file is being filled locally, allow inter-write |
87 | * spaces to be merged into writes. If it's not, only write | |
88 | * back what the user gives us. | |
89 | */ | |
90 | if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) && | |
91 | (to < f || from > t)) | |
4343d008 | 92 | goto flush_conflicting_write; |
31143d5d DH |
93 | } |
94 | ||
78525c74 | 95 | *_page = &folio->page; |
4343d008 | 96 | _leave(" = 0"); |
31143d5d DH |
97 | return 0; |
98 | ||
4343d008 DH |
99 | /* The previous write and this write aren't adjacent or overlapping, so |
100 | * flush the page out. | |
101 | */ | |
102 | flush_conflicting_write: | |
31143d5d | 103 | _debug("flush conflict"); |
78525c74 | 104 | ret = folio_write_one(folio); |
21db2cdc DH |
105 | if (ret < 0) |
106 | goto error; | |
31143d5d | 107 | |
78525c74 | 108 | ret = folio_lock_killable(folio); |
21db2cdc DH |
109 | if (ret < 0) |
110 | goto error; | |
31143d5d | 111 | goto try_again; |
21db2cdc DH |
112 | |
113 | error: | |
78525c74 | 114 | folio_put(folio); |
21db2cdc DH |
115 | _leave(" = %d", ret); |
116 | return ret; | |
31143d5d DH |
117 | } |
118 | ||
119 | /* | |
120 | * finalise part of a write to a page | |
121 | */ | |
15b4650e NP |
122 | int afs_write_end(struct file *file, struct address_space *mapping, |
123 | loff_t pos, unsigned len, unsigned copied, | |
78525c74 | 124 | struct page *subpage, void *fsdata) |
31143d5d | 125 | { |
78525c74 | 126 | struct folio *folio = page_folio(subpage); |
496ad9aa | 127 | struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); |
f792e3ac | 128 | unsigned long priv; |
78525c74 | 129 | unsigned int f, from = offset_in_folio(folio, pos); |
f792e3ac | 130 | unsigned int t, to = from + copied; |
c7f75ef3 | 131 | loff_t i_size, write_end_pos; |
31143d5d | 132 | |
3b6492df | 133 | _enter("{%llx:%llu},{%lx}", |
78525c74 | 134 | vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); |
31143d5d | 135 | |
78525c74 | 136 | if (!folio_test_uptodate(folio)) { |
66e9c6a8 DH |
137 | if (copied < len) { |
138 | copied = 0; | |
139 | goto out; | |
140 | } | |
141 | ||
78525c74 | 142 | folio_mark_uptodate(folio); |
66e9c6a8 DH |
143 | } |
144 | ||
3ad216ee DH |
145 | if (copied == 0) |
146 | goto out; | |
147 | ||
c7f75ef3 | 148 | write_end_pos = pos + copied; |
31143d5d DH |
149 | |
150 | i_size = i_size_read(&vnode->vfs_inode); | |
c7f75ef3 | 151 | if (write_end_pos > i_size) { |
1f32ef79 | 152 | write_seqlock(&vnode->cb_lock); |
31143d5d | 153 | i_size = i_size_read(&vnode->vfs_inode); |
c7f75ef3 DH |
154 | if (write_end_pos > i_size) |
155 | afs_set_i_size(vnode, write_end_pos); | |
1f32ef79 | 156 | write_sequnlock(&vnode->cb_lock); |
c7f75ef3 | 157 | fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos); |
31143d5d DH |
158 | } |
159 | ||
78525c74 DH |
160 | if (folio_test_private(folio)) { |
161 | priv = (unsigned long)folio_get_private(folio); | |
162 | f = afs_folio_dirty_from(folio, priv); | |
163 | t = afs_folio_dirty_to(folio, priv); | |
f792e3ac DH |
164 | if (from < f) |
165 | f = from; | |
166 | if (to > t) | |
167 | t = to; | |
78525c74 DH |
168 | priv = afs_folio_dirty(folio, f, t); |
169 | folio_change_private(folio, (void *)priv); | |
170 | trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio); | |
f792e3ac | 171 | } else { |
78525c74 DH |
172 | priv = afs_folio_dirty(folio, from, to); |
173 | folio_attach_private(folio, (void *)priv); | |
174 | trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio); | |
f792e3ac DH |
175 | } |
176 | ||
78525c74 DH |
177 | if (folio_mark_dirty(folio)) |
178 | _debug("dirtied %lx", folio_index(folio)); | |
afae457d DH |
179 | |
180 | out: | |
78525c74 DH |
181 | folio_unlock(folio); |
182 | folio_put(folio); | |
3003bbd0 | 183 | return copied; |
31143d5d DH |
184 | } |
185 | ||
186 | /* | |
187 | * kill all the pages in the given range | |
188 | */ | |
4343d008 | 189 | static void afs_kill_pages(struct address_space *mapping, |
e87b03f5 | 190 | loff_t start, loff_t len) |
31143d5d | 191 | { |
4343d008 | 192 | struct afs_vnode *vnode = AFS_FS_I(mapping->host); |
78525c74 DH |
193 | struct folio *folio; |
194 | pgoff_t index = start / PAGE_SIZE; | |
195 | pgoff_t last = (start + len - 1) / PAGE_SIZE, next; | |
31143d5d | 196 | |
e87b03f5 DH |
197 | _enter("{%llx:%llu},%llx @%llx", |
198 | vnode->fid.vid, vnode->fid.vnode, len, start); | |
31143d5d | 199 | |
31143d5d | 200 | do { |
78525c74 | 201 | _debug("kill %lx (to %lx)", index, last); |
31143d5d | 202 | |
78525c74 DH |
203 | folio = filemap_get_folio(mapping, index); |
204 | if (!folio) { | |
205 | next = index + 1; | |
206 | continue; | |
207 | } | |
e87b03f5 | 208 | |
78525c74 | 209 | next = folio_next_index(folio); |
e87b03f5 | 210 | |
78525c74 DH |
211 | folio_clear_uptodate(folio); |
212 | folio_end_writeback(folio); | |
213 | folio_lock(folio); | |
214 | generic_error_remove_page(mapping, &folio->page); | |
215 | folio_unlock(folio); | |
216 | folio_put(folio); | |
31143d5d | 217 | |
78525c74 | 218 | } while (index = next, index <= last); |
31143d5d DH |
219 | |
220 | _leave(""); | |
221 | } | |
222 | ||
223 | /* | |
4343d008 DH |
224 | * Redirty all the pages in a given range. |
225 | */ | |
226 | static void afs_redirty_pages(struct writeback_control *wbc, | |
227 | struct address_space *mapping, | |
e87b03f5 | 228 | loff_t start, loff_t len) |
4343d008 DH |
229 | { |
230 | struct afs_vnode *vnode = AFS_FS_I(mapping->host); | |
78525c74 DH |
231 | struct folio *folio; |
232 | pgoff_t index = start / PAGE_SIZE; | |
233 | pgoff_t last = (start + len - 1) / PAGE_SIZE, next; | |
4343d008 | 234 | |
e87b03f5 DH |
235 | _enter("{%llx:%llu},%llx @%llx", |
236 | vnode->fid.vid, vnode->fid.vnode, len, start); | |
4343d008 | 237 | |
4343d008 | 238 | do { |
e87b03f5 | 239 | _debug("redirty %llx @%llx", len, start); |
4343d008 | 240 | |
78525c74 DH |
241 | folio = filemap_get_folio(mapping, index); |
242 | if (!folio) { | |
243 | next = index + 1; | |
244 | continue; | |
4343d008 DH |
245 | } |
246 | ||
78525c74 DH |
247 | next = index + folio_nr_pages(folio); |
248 | folio_redirty_for_writepage(wbc, folio); | |
249 | folio_end_writeback(folio); | |
250 | folio_put(folio); | |
251 | } while (index = next, index <= last); | |
31143d5d DH |
252 | |
253 | _leave(""); | |
254 | } | |
255 | ||
a58823ac DH |
256 | /* |
257 | * completion of write to server | |
258 | */ | |
e87b03f5 | 259 | static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) |
a58823ac | 260 | { |
bd80d8a8 | 261 | struct address_space *mapping = vnode->vfs_inode.i_mapping; |
78525c74 | 262 | struct folio *folio; |
e87b03f5 | 263 | pgoff_t end; |
bd80d8a8 | 264 | |
e87b03f5 | 265 | XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); |
a58823ac | 266 | |
e87b03f5 DH |
267 | _enter("{%llx:%llu},{%x @%llx}", |
268 | vnode->fid.vid, vnode->fid.vnode, len, start); | |
a58823ac | 269 | |
bd80d8a8 | 270 | rcu_read_lock(); |
a58823ac | 271 | |
e87b03f5 | 272 | end = (start + len - 1) / PAGE_SIZE; |
78525c74 DH |
273 | xas_for_each(&xas, folio, end) { |
274 | if (!folio_test_writeback(folio)) { | |
275 | kdebug("bad %x @%llx page %lx %lx", | |
276 | len, start, folio_index(folio), end); | |
277 | ASSERT(folio_test_writeback(folio)); | |
e87b03f5 | 278 | } |
a58823ac | 279 | |
78525c74 DH |
280 | trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio); |
281 | folio_detach_private(folio); | |
282 | folio_end_writeback(folio); | |
bd80d8a8 | 283 | } |
a58823ac | 284 | |
bd80d8a8 | 285 | rcu_read_unlock(); |
a58823ac DH |
286 | |
287 | afs_prune_wb_keys(vnode); | |
288 | _leave(""); | |
289 | } | |
290 | ||
d2ddc776 | 291 | /* |
e49c7b2f DH |
292 | * Find a key to use for the writeback. We cached the keys used to author the |
293 | * writes on the vnode. *_wbk will contain the last writeback key used or NULL | |
294 | * and we need to start from there if it's set. | |
d2ddc776 | 295 | */ |
e49c7b2f DH |
296 | static int afs_get_writeback_key(struct afs_vnode *vnode, |
297 | struct afs_wb_key **_wbk) | |
d2ddc776 | 298 | { |
4343d008 DH |
299 | struct afs_wb_key *wbk = NULL; |
300 | struct list_head *p; | |
301 | int ret = -ENOKEY, ret2; | |
d2ddc776 | 302 | |
4343d008 | 303 | spin_lock(&vnode->wb_lock); |
e49c7b2f DH |
304 | if (*_wbk) |
305 | p = (*_wbk)->vnode_link.next; | |
306 | else | |
307 | p = vnode->wb_keys.next; | |
4343d008 | 308 | |
4343d008 DH |
309 | while (p != &vnode->wb_keys) { |
310 | wbk = list_entry(p, struct afs_wb_key, vnode_link); | |
311 | _debug("wbk %u", key_serial(wbk->key)); | |
312 | ret2 = key_validate(wbk->key); | |
e49c7b2f DH |
313 | if (ret2 == 0) { |
314 | refcount_inc(&wbk->usage); | |
315 | _debug("USE WB KEY %u", key_serial(wbk->key)); | |
316 | break; | |
317 | } | |
318 | ||
319 | wbk = NULL; | |
4343d008 DH |
320 | if (ret == -ENOKEY) |
321 | ret = ret2; | |
322 | p = p->next; | |
323 | } | |
324 | ||
325 | spin_unlock(&vnode->wb_lock); | |
e49c7b2f DH |
326 | if (*_wbk) |
327 | afs_put_wb_key(*_wbk); | |
328 | *_wbk = wbk; | |
329 | return 0; | |
330 | } | |
4343d008 | 331 | |
e49c7b2f DH |
332 | static void afs_store_data_success(struct afs_operation *op) |
333 | { | |
334 | struct afs_vnode *vnode = op->file[0].vnode; | |
4343d008 | 335 | |
da8d0755 | 336 | op->ctime = op->file[0].scb.status.mtime_client; |
e49c7b2f DH |
337 | afs_vnode_commit_status(op, &op->file[0]); |
338 | if (op->error == 0) { | |
d383e346 | 339 | if (!op->store.laundering) |
e87b03f5 | 340 | afs_pages_written_back(vnode, op->store.pos, op->store.size); |
e49c7b2f | 341 | afs_stat_v(vnode, n_stores); |
bd80d8a8 | 342 | atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); |
e49c7b2f DH |
343 | } |
344 | } | |
4343d008 | 345 | |
e49c7b2f DH |
346 | static const struct afs_operation_ops afs_store_data_operation = { |
347 | .issue_afs_rpc = afs_fs_store_data, | |
348 | .issue_yfs_rpc = yfs_fs_store_data, | |
349 | .success = afs_store_data_success, | |
350 | }; | |
a58823ac | 351 | |
e49c7b2f DH |
352 | /* |
353 | * write to a file | |
354 | */ | |
e87b03f5 | 355 | static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, |
bd80d8a8 | 356 | bool laundering) |
e49c7b2f | 357 | { |
e49c7b2f DH |
358 | struct afs_operation *op; |
359 | struct afs_wb_key *wbk = NULL; | |
bd80d8a8 DH |
360 | loff_t size = iov_iter_count(iter), i_size; |
361 | int ret = -ENOKEY; | |
e49c7b2f | 362 | |
bd80d8a8 | 363 | _enter("%s{%llx:%llu.%u},%llx,%llx", |
e49c7b2f DH |
364 | vnode->volume->name, |
365 | vnode->fid.vid, | |
366 | vnode->fid.vnode, | |
367 | vnode->fid.unique, | |
bd80d8a8 | 368 | size, pos); |
d2ddc776 | 369 | |
e49c7b2f DH |
370 | ret = afs_get_writeback_key(vnode, &wbk); |
371 | if (ret) { | |
372 | _leave(" = %d [no keys]", ret); | |
373 | return ret; | |
d2ddc776 DH |
374 | } |
375 | ||
e49c7b2f DH |
376 | op = afs_alloc_operation(wbk->key, vnode->volume); |
377 | if (IS_ERR(op)) { | |
378 | afs_put_wb_key(wbk); | |
379 | return -ENOMEM; | |
380 | } | |
381 | ||
bd80d8a8 DH |
382 | i_size = i_size_read(&vnode->vfs_inode); |
383 | ||
e49c7b2f DH |
384 | afs_op_set_vnode(op, 0, vnode); |
385 | op->file[0].dv_delta = 1; | |
22650f14 | 386 | op->file[0].modification = true; |
bd80d8a8 DH |
387 | op->store.write_iter = iter; |
388 | op->store.pos = pos; | |
bd80d8a8 DH |
389 | op->store.size = size; |
390 | op->store.i_size = max(pos + size, i_size); | |
d383e346 | 391 | op->store.laundering = laundering; |
b3597945 | 392 | op->mtime = vnode->vfs_inode.i_mtime; |
811f04ba | 393 | op->flags |= AFS_OPERATION_UNINTR; |
e49c7b2f DH |
394 | op->ops = &afs_store_data_operation; |
395 | ||
396 | try_next_key: | |
397 | afs_begin_vnode_operation(op); | |
398 | afs_wait_for_operation(op); | |
399 | ||
400 | switch (op->error) { | |
4343d008 DH |
401 | case -EACCES: |
402 | case -EPERM: | |
403 | case -ENOKEY: | |
404 | case -EKEYEXPIRED: | |
405 | case -EKEYREJECTED: | |
406 | case -EKEYREVOKED: | |
407 | _debug("next"); | |
e49c7b2f DH |
408 | |
409 | ret = afs_get_writeback_key(vnode, &wbk); | |
410 | if (ret == 0) { | |
411 | key_put(op->key); | |
412 | op->key = key_get(wbk->key); | |
413 | goto try_next_key; | |
414 | } | |
415 | break; | |
4343d008 DH |
416 | } |
417 | ||
418 | afs_put_wb_key(wbk); | |
e49c7b2f DH |
419 | _leave(" = %d", op->error); |
420 | return afs_put_operation(op); | |
d2ddc776 DH |
421 | } |
422 | ||
31143d5d | 423 | /* |
810caa3e DH |
424 | * Extend the region to be written back to include subsequent contiguously |
425 | * dirty pages if possible, but don't sleep while doing so. | |
426 | * | |
427 | * If this page holds new content, then we can include filler zeros in the | |
428 | * writeback. | |
31143d5d | 429 | */ |
810caa3e DH |
430 | static void afs_extend_writeback(struct address_space *mapping, |
431 | struct afs_vnode *vnode, | |
432 | long *_count, | |
e87b03f5 DH |
433 | loff_t start, |
434 | loff_t max_len, | |
435 | bool new_content, | |
c7f75ef3 | 436 | bool caching, |
e87b03f5 | 437 | unsigned int *_len) |
31143d5d | 438 | { |
e87b03f5 | 439 | struct pagevec pvec; |
78525c74 | 440 | struct folio *folio; |
e87b03f5 DH |
441 | unsigned long priv; |
442 | unsigned int psize, filler = 0; | |
443 | unsigned int f, t; | |
444 | loff_t len = *_len; | |
445 | pgoff_t index = (start + len) / PAGE_SIZE; | |
446 | bool stop = true; | |
447 | unsigned int i; | |
448 | ||
449 | XA_STATE(xas, &mapping->i_pages, index); | |
450 | pagevec_init(&pvec); | |
4343d008 | 451 | |
31143d5d | 452 | do { |
e87b03f5 DH |
453 | /* Firstly, we gather up a batch of contiguous dirty pages |
454 | * under the RCU read lock - but we can't clear the dirty flags | |
455 | * there if any of those pages are mapped. | |
456 | */ | |
457 | rcu_read_lock(); | |
31143d5d | 458 | |
78525c74 | 459 | xas_for_each(&xas, folio, ULONG_MAX) { |
e87b03f5 | 460 | stop = true; |
78525c74 | 461 | if (xas_retry(&xas, folio)) |
e87b03f5 | 462 | continue; |
78525c74 | 463 | if (xa_is_value(folio)) |
e87b03f5 | 464 | break; |
78525c74 | 465 | if (folio_index(folio) != index) |
5a813276 | 466 | break; |
e87b03f5 | 467 | |
78525c74 | 468 | if (!folio_try_get_rcu(folio)) { |
e87b03f5 DH |
469 | xas_reset(&xas); |
470 | continue; | |
471 | } | |
472 | ||
473 | /* Has the page moved or been split? */ | |
78525c74 DH |
474 | if (unlikely(folio != xas_reload(&xas))) { |
475 | folio_put(folio); | |
31143d5d | 476 | break; |
581b2027 | 477 | } |
e87b03f5 | 478 | |
78525c74 DH |
479 | if (!folio_trylock(folio)) { |
480 | folio_put(folio); | |
31143d5d | 481 | break; |
581b2027 | 482 | } |
c7f75ef3 DH |
483 | if (!folio_test_dirty(folio) || |
484 | folio_test_writeback(folio) || | |
485 | folio_test_fscache(folio)) { | |
78525c74 DH |
486 | folio_unlock(folio); |
487 | folio_put(folio); | |
31143d5d DH |
488 | break; |
489 | } | |
4343d008 | 490 | |
78525c74 DH |
491 | psize = folio_size(folio); |
492 | priv = (unsigned long)folio_get_private(folio); | |
493 | f = afs_folio_dirty_from(folio, priv); | |
494 | t = afs_folio_dirty_to(folio, priv); | |
810caa3e | 495 | if (f != 0 && !new_content) { |
78525c74 DH |
496 | folio_unlock(folio); |
497 | folio_put(folio); | |
31143d5d DH |
498 | break; |
499 | } | |
4343d008 | 500 | |
e87b03f5 DH |
501 | len += filler + t; |
502 | filler = psize - t; | |
503 | if (len >= max_len || *_count <= 0) | |
504 | stop = true; | |
505 | else if (t == psize || new_content) | |
506 | stop = false; | |
507 | ||
78525c74 DH |
508 | index += folio_nr_pages(folio); |
509 | if (!pagevec_add(&pvec, &folio->page)) | |
e87b03f5 DH |
510 | break; |
511 | if (stop) | |
512 | break; | |
513 | } | |
514 | ||
515 | if (!stop) | |
516 | xas_pause(&xas); | |
517 | rcu_read_unlock(); | |
518 | ||
519 | /* Now, if we obtained any pages, we can shift them to being | |
520 | * writable and mark them for caching. | |
521 | */ | |
522 | if (!pagevec_count(&pvec)) | |
523 | break; | |
524 | ||
525 | for (i = 0; i < pagevec_count(&pvec); i++) { | |
78525c74 DH |
526 | folio = page_folio(pvec.pages[i]); |
527 | trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio); | |
13524ab3 | 528 | |
78525c74 | 529 | if (!folio_clear_dirty_for_io(folio)) |
31143d5d | 530 | BUG(); |
78525c74 | 531 | if (folio_start_writeback(folio)) |
31143d5d | 532 | BUG(); |
c7f75ef3 | 533 | afs_folio_start_fscache(caching, folio); |
e87b03f5 | 534 | |
78525c74 DH |
535 | *_count -= folio_nr_pages(folio); |
536 | folio_unlock(folio); | |
31143d5d DH |
537 | } |
538 | ||
e87b03f5 DH |
539 | pagevec_release(&pvec); |
540 | cond_resched(); | |
541 | } while (!stop); | |
31143d5d | 542 | |
e87b03f5 | 543 | *_len = len; |
810caa3e DH |
544 | } |
545 | ||
546 | /* | |
547 | * Synchronously write back the locked page and any subsequent non-locked dirty | |
548 | * pages. | |
549 | */ | |
78525c74 DH |
550 | static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, |
551 | struct writeback_control *wbc, | |
552 | struct folio *folio, | |
553 | loff_t start, loff_t end) | |
810caa3e DH |
554 | { |
555 | struct afs_vnode *vnode = AFS_FS_I(mapping->host); | |
556 | struct iov_iter iter; | |
e87b03f5 DH |
557 | unsigned long priv; |
558 | unsigned int offset, to, len, max_len; | |
559 | loff_t i_size = i_size_read(&vnode->vfs_inode); | |
810caa3e | 560 | bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); |
c7f75ef3 | 561 | bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode)); |
e87b03f5 | 562 | long count = wbc->nr_to_write; |
810caa3e DH |
563 | int ret; |
564 | ||
78525c74 | 565 | _enter(",%lx,%llx-%llx", folio_index(folio), start, end); |
810caa3e | 566 | |
78525c74 | 567 | if (folio_start_writeback(folio)) |
810caa3e | 568 | BUG(); |
c7f75ef3 | 569 | afs_folio_start_fscache(caching, folio); |
810caa3e | 570 | |
78525c74 | 571 | count -= folio_nr_pages(folio); |
e87b03f5 | 572 | |
810caa3e DH |
573 | /* Find all consecutive lockable dirty pages that have contiguous |
574 | * written regions, stopping when we find a page that is not | |
575 | * immediately lockable, is not dirty or is missing, or we reach the | |
576 | * end of the range. | |
577 | */ | |
78525c74 DH |
578 | priv = (unsigned long)folio_get_private(folio); |
579 | offset = afs_folio_dirty_from(folio, priv); | |
580 | to = afs_folio_dirty_to(folio, priv); | |
581 | trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio); | |
e87b03f5 DH |
582 | |
583 | len = to - offset; | |
584 | start += offset; | |
585 | if (start < i_size) { | |
586 | /* Trim the write to the EOF; the extra data is ignored. Also | |
587 | * put an upper limit on the size of a single storedata op. | |
588 | */ | |
589 | max_len = 65536 * 4096; | |
590 | max_len = min_t(unsigned long long, max_len, end - start + 1); | |
591 | max_len = min_t(unsigned long long, max_len, i_size - start); | |
592 | ||
593 | if (len < max_len && | |
78525c74 | 594 | (to == folio_size(folio) || new_content)) |
e87b03f5 | 595 | afs_extend_writeback(mapping, vnode, &count, |
c7f75ef3 DH |
596 | start, max_len, new_content, |
597 | caching, &len); | |
e87b03f5 DH |
598 | len = min_t(loff_t, len, max_len); |
599 | } | |
810caa3e | 600 | |
4343d008 DH |
601 | /* We now have a contiguous set of dirty pages, each with writeback |
602 | * set; the first page is still locked at this point, but all the rest | |
603 | * have been unlocked. | |
604 | */ | |
78525c74 | 605 | folio_unlock(folio); |
793fe82e | 606 | |
e87b03f5 DH |
607 | if (start < i_size) { |
608 | _debug("write back %x @%llx [%llx]", len, start, i_size); | |
bd80d8a8 | 609 | |
c7f75ef3 DH |
610 | /* Speculatively write to the cache. We have to fix this up |
611 | * later if the store fails. | |
612 | */ | |
613 | afs_write_to_cache(vnode, start, len, i_size, caching); | |
614 | ||
e87b03f5 DH |
615 | iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); |
616 | ret = afs_store_data(vnode, &iter, start, false); | |
bd80d8a8 | 617 | } else { |
e87b03f5 DH |
618 | _debug("write discard %x @%llx [%llx]", len, start, i_size); |
619 | ||
bd80d8a8 | 620 | /* The dirty region was entirely beyond the EOF. */ |
c7f75ef3 DH |
621 | fscache_clear_page_bits(afs_vnode_cache(vnode), |
622 | mapping, start, len, caching); | |
e87b03f5 | 623 | afs_pages_written_back(vnode, start, len); |
bd80d8a8 DH |
624 | ret = 0; |
625 | } | |
31143d5d | 626 | |
4343d008 DH |
627 | switch (ret) { |
628 | case 0: | |
e87b03f5 DH |
629 | wbc->nr_to_write = count; |
630 | ret = len; | |
4343d008 DH |
631 | break; |
632 | ||
633 | default: | |
634 | pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); | |
df561f66 | 635 | fallthrough; |
4343d008 DH |
636 | case -EACCES: |
637 | case -EPERM: | |
638 | case -ENOKEY: | |
639 | case -EKEYEXPIRED: | |
640 | case -EKEYREJECTED: | |
641 | case -EKEYREVOKED: | |
e87b03f5 | 642 | afs_redirty_pages(wbc, mapping, start, len); |
4343d008 DH |
643 | mapping_set_error(mapping, ret); |
644 | break; | |
645 | ||
646 | case -EDQUOT: | |
647 | case -ENOSPC: | |
e87b03f5 | 648 | afs_redirty_pages(wbc, mapping, start, len); |
4343d008 DH |
649 | mapping_set_error(mapping, -ENOSPC); |
650 | break; | |
651 | ||
652 | case -EROFS: | |
653 | case -EIO: | |
654 | case -EREMOTEIO: | |
655 | case -EFBIG: | |
656 | case -ENOENT: | |
657 | case -ENOMEDIUM: | |
658 | case -ENXIO: | |
f51375cd | 659 | trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); |
e87b03f5 | 660 | afs_kill_pages(mapping, start, len); |
4343d008 DH |
661 | mapping_set_error(mapping, ret); |
662 | break; | |
31143d5d DH |
663 | } |
664 | ||
665 | _leave(" = %d", ret); | |
666 | return ret; | |
667 | } | |
668 | ||
669 | /* | |
670 | * write a page back to the server | |
671 | * - the caller locked the page for us | |
672 | */ | |
78525c74 | 673 | int afs_writepage(struct page *subpage, struct writeback_control *wbc) |
31143d5d | 674 | { |
78525c74 | 675 | struct folio *folio = page_folio(subpage); |
e87b03f5 DH |
676 | ssize_t ret; |
677 | loff_t start; | |
31143d5d | 678 | |
78525c74 | 679 | _enter("{%lx},", folio_index(folio)); |
31143d5d | 680 | |
c7f75ef3 DH |
681 | #ifdef CONFIG_AFS_FSCACHE |
682 | folio_wait_fscache(folio); | |
683 | #endif | |
684 | ||
78525c74 DH |
685 | start = folio_index(folio) * PAGE_SIZE; |
686 | ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc, | |
687 | folio, start, LLONG_MAX - start); | |
31143d5d | 688 | if (ret < 0) { |
e87b03f5 DH |
689 | _leave(" = %zd", ret); |
690 | return ret; | |
31143d5d DH |
691 | } |
692 | ||
31143d5d DH |
693 | _leave(" = 0"); |
694 | return 0; | |
695 | } | |
696 | ||
697 | /* | |
698 | * write a region of pages back to the server | |
699 | */ | |
c1206a2c AB |
700 | static int afs_writepages_region(struct address_space *mapping, |
701 | struct writeback_control *wbc, | |
e87b03f5 | 702 | loff_t start, loff_t end, loff_t *_next) |
31143d5d | 703 | { |
78525c74 DH |
704 | struct folio *folio; |
705 | struct page *head_page; | |
e87b03f5 DH |
706 | ssize_t ret; |
707 | int n; | |
31143d5d | 708 | |
e87b03f5 | 709 | _enter("%llx,%llx,", start, end); |
31143d5d DH |
710 | |
711 | do { | |
e87b03f5 DH |
712 | pgoff_t index = start / PAGE_SIZE; |
713 | ||
714 | n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE, | |
78525c74 | 715 | PAGECACHE_TAG_DIRTY, 1, &head_page); |
31143d5d DH |
716 | if (!n) |
717 | break; | |
718 | ||
78525c74 DH |
719 | folio = page_folio(head_page); |
720 | start = folio_pos(folio); /* May regress with THPs */ | |
e87b03f5 | 721 | |
78525c74 | 722 | _debug("wback %lx", folio_index(folio)); |
31143d5d | 723 | |
e87b03f5 | 724 | /* At this point we hold neither the i_pages lock nor the |
b93b0163 MW |
725 | * page lock: the page may be truncated or invalidated |
726 | * (changing page->mapping to NULL), or even swizzled | |
727 | * back from swapper_space to tmpfs file mapping | |
31143d5d | 728 | */ |
e87b03f5 | 729 | if (wbc->sync_mode != WB_SYNC_NONE) { |
78525c74 | 730 | ret = folio_lock_killable(folio); |
e87b03f5 | 731 | if (ret < 0) { |
78525c74 | 732 | folio_put(folio); |
e87b03f5 DH |
733 | return ret; |
734 | } | |
735 | } else { | |
78525c74 DH |
736 | if (!folio_trylock(folio)) { |
737 | folio_put(folio); | |
e87b03f5 DH |
738 | return 0; |
739 | } | |
4343d008 | 740 | } |
31143d5d | 741 | |
78525c74 DH |
742 | if (folio_mapping(folio) != mapping || |
743 | !folio_test_dirty(folio)) { | |
744 | start += folio_size(folio); | |
745 | folio_unlock(folio); | |
746 | folio_put(folio); | |
31143d5d DH |
747 | continue; |
748 | } | |
749 | ||
c7f75ef3 DH |
750 | if (folio_test_writeback(folio) || |
751 | folio_test_fscache(folio)) { | |
78525c74 | 752 | folio_unlock(folio); |
c7f75ef3 | 753 | if (wbc->sync_mode != WB_SYNC_NONE) { |
78525c74 | 754 | folio_wait_writeback(folio); |
c7f75ef3 DH |
755 | #ifdef CONFIG_AFS_FSCACHE |
756 | folio_wait_fscache(folio); | |
757 | #endif | |
758 | } | |
78525c74 | 759 | folio_put(folio); |
31143d5d DH |
760 | continue; |
761 | } | |
762 | ||
78525c74 | 763 | if (!folio_clear_dirty_for_io(folio)) |
65a15109 | 764 | BUG(); |
78525c74 DH |
765 | ret = afs_write_back_from_locked_folio(mapping, wbc, folio, start, end); |
766 | folio_put(folio); | |
31143d5d | 767 | if (ret < 0) { |
e87b03f5 | 768 | _leave(" = %zd", ret); |
31143d5d DH |
769 | return ret; |
770 | } | |
771 | ||
dc255730 | 772 | start += ret; |
31143d5d | 773 | |
31143d5d | 774 | cond_resched(); |
e87b03f5 | 775 | } while (wbc->nr_to_write > 0); |
31143d5d | 776 | |
e87b03f5 DH |
777 | *_next = start; |
778 | _leave(" = 0 [%llx]", *_next); | |
31143d5d DH |
779 | return 0; |
780 | } | |
781 | ||
782 | /* | |
783 | * write some of the pending data back to the server | |
784 | */ | |
785 | int afs_writepages(struct address_space *mapping, | |
786 | struct writeback_control *wbc) | |
787 | { | |
ec0fa0b6 | 788 | struct afs_vnode *vnode = AFS_FS_I(mapping->host); |
e87b03f5 | 789 | loff_t start, next; |
31143d5d DH |
790 | int ret; |
791 | ||
792 | _enter(""); | |
793 | ||
ec0fa0b6 DH |
794 | /* We have to be careful as we can end up racing with setattr() |
795 | * truncating the pagecache since the caller doesn't take a lock here | |
796 | * to prevent it. | |
797 | */ | |
798 | if (wbc->sync_mode == WB_SYNC_ALL) | |
799 | down_read(&vnode->validate_lock); | |
800 | else if (!down_read_trylock(&vnode->validate_lock)) | |
801 | return 0; | |
802 | ||
31143d5d | 803 | if (wbc->range_cyclic) { |
e87b03f5 DH |
804 | start = mapping->writeback_index * PAGE_SIZE; |
805 | ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next); | |
afe69498 TR |
806 | if (ret == 0) { |
807 | mapping->writeback_index = next / PAGE_SIZE; | |
808 | if (start > 0 && wbc->nr_to_write > 0) { | |
809 | ret = afs_writepages_region(mapping, wbc, 0, | |
810 | start, &next); | |
811 | if (ret == 0) | |
812 | mapping->writeback_index = | |
813 | next / PAGE_SIZE; | |
814 | } | |
815 | } | |
31143d5d | 816 | } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { |
e87b03f5 | 817 | ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next); |
afe69498 | 818 | if (wbc->nr_to_write > 0 && ret == 0) |
5a972474 | 819 | mapping->writeback_index = next / PAGE_SIZE; |
31143d5d | 820 | } else { |
e87b03f5 DH |
821 | ret = afs_writepages_region(mapping, wbc, |
822 | wbc->range_start, wbc->range_end, &next); | |
31143d5d DH |
823 | } |
824 | ||
ec0fa0b6 | 825 | up_read(&vnode->validate_lock); |
31143d5d DH |
826 | _leave(" = %d", ret); |
827 | return ret; | |
828 | } | |
829 | ||
31143d5d DH |
830 | /* |
831 | * write to an AFS file | |
832 | */ | |
50b5551d | 833 | ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) |
31143d5d | 834 | { |
496ad9aa | 835 | struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); |
3978d816 | 836 | struct afs_file *af = iocb->ki_filp->private_data; |
31143d5d | 837 | ssize_t result; |
50b5551d | 838 | size_t count = iov_iter_count(from); |
31143d5d | 839 | |
3b6492df | 840 | _enter("{%llx:%llu},{%zu},", |
50b5551d | 841 | vnode->fid.vid, vnode->fid.vnode, count); |
31143d5d DH |
842 | |
843 | if (IS_SWAPFILE(&vnode->vfs_inode)) { | |
844 | printk(KERN_INFO | |
845 | "AFS: Attempt to write to active swap file!\n"); | |
846 | return -EBUSY; | |
847 | } | |
848 | ||
849 | if (!count) | |
850 | return 0; | |
851 | ||
3978d816 DH |
852 | result = afs_validate(vnode, af->key); |
853 | if (result < 0) | |
854 | return result; | |
855 | ||
50b5551d | 856 | result = generic_file_write_iter(iocb, from); |
31143d5d | 857 | |
31143d5d DH |
858 | _leave(" = %zd", result); |
859 | return result; | |
860 | } | |
861 | ||
31143d5d DH |
862 | /* |
863 | * flush any dirty pages for this process, and check for write errors. | |
864 | * - the return status from this call provides a reliable indication of | |
865 | * whether any write errors occurred for this process. | |
866 | */ | |
02c24a82 | 867 | int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
31143d5d | 868 | { |
3978d816 DH |
869 | struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); |
870 | struct afs_file *af = file->private_data; | |
871 | int ret; | |
31143d5d | 872 | |
3b6492df | 873 | _enter("{%llx:%llu},{n=%pD},%d", |
3c981bfc | 874 | vnode->fid.vid, vnode->fid.vnode, file, |
31143d5d DH |
875 | datasync); |
876 | ||
3978d816 DH |
877 | ret = afs_validate(vnode, af->key); |
878 | if (ret < 0) | |
879 | return ret; | |
880 | ||
4343d008 | 881 | return file_write_and_wait_range(file, start, end); |
31143d5d | 882 | } |
9b3f26c9 DH |
883 | |
884 | /* | |
885 | * notification that a previously read-only page is about to become writable | |
886 | * - if it returns an error, the caller will deliver a bus error signal | |
887 | */ | |
0722f186 | 888 | vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) |
9b3f26c9 | 889 | { |
490e016f | 890 | struct folio *folio = page_folio(vmf->page); |
1cf7a151 DH |
891 | struct file *file = vmf->vma->vm_file; |
892 | struct inode *inode = file_inode(file); | |
893 | struct afs_vnode *vnode = AFS_FS_I(inode); | |
3978d816 | 894 | struct afs_file *af = file->private_data; |
1cf7a151 | 895 | unsigned long priv; |
9620ad86 | 896 | vm_fault_t ret = VM_FAULT_RETRY; |
9b3f26c9 | 897 | |
78525c74 | 898 | _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); |
9b3f26c9 | 899 | |
3978d816 DH |
900 | afs_validate(vnode, af->key); |
901 | ||
1cf7a151 | 902 | sb_start_pagefault(inode->i_sb); |
9b3f26c9 | 903 | |
1cf7a151 DH |
904 | /* Wait for the page to be written to the cache before we allow it to |
905 | * be modified. We then assume the entire page will need writing back. | |
906 | */ | |
630f5dda | 907 | #ifdef CONFIG_AFS_FSCACHE |
78525c74 DH |
908 | if (folio_test_fscache(folio) && |
909 | folio_wait_fscache_killable(folio) < 0) | |
9620ad86 | 910 | goto out; |
630f5dda | 911 | #endif |
9b3f26c9 | 912 | |
490e016f | 913 | if (folio_wait_writeback_killable(folio)) |
9620ad86 | 914 | goto out; |
1cf7a151 | 915 | |
78525c74 | 916 | if (folio_lock_killable(folio) < 0) |
9620ad86 | 917 | goto out; |
1cf7a151 | 918 | |
78525c74 | 919 | /* We mustn't change folio->private until writeback is complete as that |
1cf7a151 DH |
920 | * details the portion of the page we need to write back and we might |
921 | * need to redirty the page if there's a problem. | |
922 | */ | |
490e016f MWO |
923 | if (folio_wait_writeback_killable(folio) < 0) { |
924 | folio_unlock(folio); | |
9620ad86 | 925 | goto out; |
5cbf0398 | 926 | } |
1cf7a151 | 927 | |
78525c74 DH |
928 | priv = afs_folio_dirty(folio, 0, folio_size(folio)); |
929 | priv = afs_folio_dirty_mmapped(priv); | |
930 | if (folio_test_private(folio)) { | |
931 | folio_change_private(folio, (void *)priv); | |
932 | trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio); | |
e87b03f5 | 933 | } else { |
78525c74 DH |
934 | folio_attach_private(folio, (void *)priv); |
935 | trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio); | |
e87b03f5 | 936 | } |
bb413489 | 937 | file_update_time(file); |
1cf7a151 | 938 | |
9620ad86 MWO |
939 | ret = VM_FAULT_LOCKED; |
940 | out: | |
1cf7a151 | 941 | sb_end_pagefault(inode->i_sb); |
9620ad86 | 942 | return ret; |
9b3f26c9 | 943 | } |
4343d008 DH |
944 | |
945 | /* | |
946 | * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. | |
947 | */ | |
948 | void afs_prune_wb_keys(struct afs_vnode *vnode) | |
949 | { | |
950 | LIST_HEAD(graveyard); | |
951 | struct afs_wb_key *wbk, *tmp; | |
952 | ||
953 | /* Discard unused keys */ | |
954 | spin_lock(&vnode->wb_lock); | |
955 | ||
956 | if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && | |
957 | !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { | |
958 | list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { | |
959 | if (refcount_read(&wbk->usage) == 1) | |
960 | list_move(&wbk->vnode_link, &graveyard); | |
961 | } | |
962 | } | |
963 | ||
964 | spin_unlock(&vnode->wb_lock); | |
965 | ||
966 | while (!list_empty(&graveyard)) { | |
967 | wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); | |
968 | list_del(&wbk->vnode_link); | |
969 | afs_put_wb_key(wbk); | |
970 | } | |
971 | } | |
972 | ||
973 | /* | |
974 | * Clean up a page during invalidation. | |
975 | */ | |
a42442dd | 976 | int afs_launder_folio(struct folio *folio) |
4343d008 | 977 | { |
78525c74 | 978 | struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio)); |
bd80d8a8 DH |
979 | struct iov_iter iter; |
980 | struct bio_vec bv[1]; | |
4343d008 DH |
981 | unsigned long priv; |
982 | unsigned int f, t; | |
983 | int ret = 0; | |
984 | ||
a42442dd | 985 | _enter("{%lx}", folio->index); |
4343d008 | 986 | |
78525c74 DH |
987 | priv = (unsigned long)folio_get_private(folio); |
988 | if (folio_clear_dirty_for_io(folio)) { | |
4343d008 | 989 | f = 0; |
78525c74 DH |
990 | t = folio_size(folio); |
991 | if (folio_test_private(folio)) { | |
992 | f = afs_folio_dirty_from(folio, priv); | |
993 | t = afs_folio_dirty_to(folio, priv); | |
4343d008 DH |
994 | } |
995 | ||
78525c74 | 996 | bv[0].bv_page = &folio->page; |
bd80d8a8 DH |
997 | bv[0].bv_offset = f; |
998 | bv[0].bv_len = t - f; | |
999 | iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); | |
1000 | ||
78525c74 DH |
1001 | trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio); |
1002 | ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true); | |
4343d008 DH |
1003 | } |
1004 | ||
78525c74 DH |
1005 | trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio); |
1006 | folio_detach_private(folio); | |
1007 | folio_wait_fscache(folio); | |
4343d008 | 1008 | return ret; |
9b3f26c9 | 1009 | } |
c7f75ef3 DH |
1010 | |
1011 | /* | |
1012 | * Deal with the completion of writing the data to the cache. | |
1013 | */ | |
1014 | static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error, | |
1015 | bool was_async) | |
1016 | { | |
1017 | struct afs_vnode *vnode = priv; | |
1018 | ||
1019 | if (IS_ERR_VALUE(transferred_or_error) && | |
1020 | transferred_or_error != -ENOBUFS) | |
1021 | afs_invalidate_cache(vnode, 0); | |
1022 | } | |
1023 | ||
1024 | /* | |
1025 | * Save the write to the cache also. | |
1026 | */ | |
1027 | static void afs_write_to_cache(struct afs_vnode *vnode, | |
1028 | loff_t start, size_t len, loff_t i_size, | |
1029 | bool caching) | |
1030 | { | |
1031 | fscache_write_to_cache(afs_vnode_cache(vnode), | |
1032 | vnode->vfs_inode.i_mapping, start, len, i_size, | |
1033 | afs_write_to_cache_done, vnode, caching); | |
1034 | } |