Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/fs/nfs/write.c | |
4 | * | |
7c85d900 | 5 | * Write file data over NFS. |
1da177e4 LT |
6 | * |
7 | * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> | |
8 | */ | |
9 | ||
1da177e4 LT |
10 | #include <linux/types.h> |
11 | #include <linux/slab.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/file.h> | |
1da177e4 | 15 | #include <linux/writeback.h> |
89a09141 | 16 | #include <linux/swap.h> |
074cc1de | 17 | #include <linux/migrate.h> |
1da177e4 LT |
18 | |
19 | #include <linux/sunrpc/clnt.h> | |
20 | #include <linux/nfs_fs.h> | |
21 | #include <linux/nfs_mount.h> | |
22 | #include <linux/nfs_page.h> | |
3fcfab16 | 23 | #include <linux/backing-dev.h> |
afeacc8c | 24 | #include <linux/export.h> |
af7cf057 TM |
25 | #include <linux/freezer.h> |
26 | #include <linux/wait.h> | |
1eb5d98f | 27 | #include <linux/iversion.h> |
5970e15d | 28 | #include <linux/filelock.h> |
3fcfab16 | 29 | |
7c0f6ba6 | 30 | #include <linux/uaccess.h> |
875bc3fb | 31 | #include <linux/sched/mm.h> |
1da177e4 LT |
32 | |
33 | #include "delegation.h" | |
49a70f27 | 34 | #include "internal.h" |
91d5b470 | 35 | #include "iostat.h" |
def6ed7e | 36 | #include "nfs4_fs.h" |
074cc1de | 37 | #include "fscache.h" |
94ad1c80 | 38 | #include "pnfs.h" |
1da177e4 | 39 | |
f4ce1299 TM |
40 | #include "nfstrace.h" |
41 | ||
1da177e4 LT |
42 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE |
43 | ||
44 | #define MIN_POOL_WRITE (32) | |
45 | #define MIN_POOL_COMMIT (4) | |
46 | ||
919e3bd9 TM |
47 | struct nfs_io_completion { |
48 | void (*complete)(void *data); | |
49 | void *data; | |
50 | struct kref refcount; | |
51 | }; | |
52 | ||
1da177e4 LT |
53 | /* |
54 | * Local function declarations | |
55 | */ | |
f8512ad0 | 56 | static void nfs_redirty_request(struct nfs_page *req); |
788e7a89 | 57 | static const struct rpc_call_ops nfs_commit_ops; |
061ae2ed | 58 | static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; |
f453a54a | 59 | static const struct nfs_commit_completion_ops nfs_commit_completion_ops; |
4a0de55c | 60 | static const struct nfs_rw_ops nfs_rw_write_ops; |
06c9fdf3 | 61 | static void nfs_inode_remove_request(struct nfs_page *req); |
b193a78d TM |
62 | static void nfs_clear_request_commit(struct nfs_commit_info *cinfo, |
63 | struct nfs_page *req); | |
02d1426c WAA |
64 | static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, |
65 | struct inode *inode); | |
3a3908c8 TM |
66 | static struct nfs_page * |
67 | nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, | |
0c493b5c | 68 | struct folio *folio); |
1da177e4 | 69 | |
e18b890b | 70 | static struct kmem_cache *nfs_wdata_cachep; |
3feb2d49 | 71 | static mempool_t *nfs_wdata_mempool; |
0b7c0153 | 72 | static struct kmem_cache *nfs_cdata_cachep; |
1da177e4 LT |
73 | static mempool_t *nfs_commit_mempool; |
74 | ||
515dcdcd | 75 | struct nfs_commit_data *nfs_commitdata_alloc(void) |
1da177e4 | 76 | { |
518662e0 | 77 | struct nfs_commit_data *p; |
40859d7e | 78 | |
515dcdcd TM |
79 | p = kmem_cache_zalloc(nfs_cdata_cachep, nfs_io_gfp_mask()); |
80 | if (!p) { | |
518662e0 | 81 | p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); |
518662e0 N |
82 | if (!p) |
83 | return NULL; | |
515dcdcd | 84 | memset(p, 0, sizeof(*p)); |
1da177e4 | 85 | } |
518662e0 | 86 | INIT_LIST_HEAD(&p->pages); |
1da177e4 LT |
87 | return p; |
88 | } | |
e0c2b380 | 89 | EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); |
1da177e4 | 90 | |
0b7c0153 | 91 | void nfs_commit_free(struct nfs_commit_data *p) |
1da177e4 LT |
92 | { |
93 | mempool_free(p, nfs_commit_mempool); | |
94 | } | |
e0c2b380 | 95 | EXPORT_SYMBOL_GPL(nfs_commit_free); |
1da177e4 | 96 | |
1e7f3a48 | 97 | static struct nfs_pgio_header *nfs_writehdr_alloc(void) |
3feb2d49 | 98 | { |
0bae835b | 99 | struct nfs_pgio_header *p; |
cd841605 | 100 | |
0bae835b TM |
101 | p = kmem_cache_zalloc(nfs_wdata_cachep, nfs_io_gfp_mask()); |
102 | if (!p) { | |
103 | p = mempool_alloc(nfs_wdata_mempool, GFP_NOWAIT); | |
104 | if (!p) | |
105 | return NULL; | |
106 | memset(p, 0, sizeof(*p)); | |
107 | } | |
237f8306 | 108 | p->rw_mode = FMODE_WRITE; |
3feb2d49 TM |
109 | return p; |
110 | } | |
6c75dc0d | 111 | |
1e7f3a48 | 112 | static void nfs_writehdr_free(struct nfs_pgio_header *hdr) |
3feb2d49 | 113 | { |
1e7f3a48 | 114 | mempool_free(hdr, nfs_wdata_mempool); |
3feb2d49 | 115 | } |
1da177e4 | 116 | |
919e3bd9 TM |
117 | static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags) |
118 | { | |
119 | return kmalloc(sizeof(struct nfs_io_completion), gfp_flags); | |
120 | } | |
121 | ||
122 | static void nfs_io_completion_init(struct nfs_io_completion *ioc, | |
123 | void (*complete)(void *), void *data) | |
124 | { | |
125 | ioc->complete = complete; | |
126 | ioc->data = data; | |
127 | kref_init(&ioc->refcount); | |
128 | } | |
129 | ||
130 | static void nfs_io_completion_release(struct kref *kref) | |
131 | { | |
132 | struct nfs_io_completion *ioc = container_of(kref, | |
133 | struct nfs_io_completion, refcount); | |
134 | ioc->complete(ioc->data); | |
135 | kfree(ioc); | |
136 | } | |
137 | ||
138 | static void nfs_io_completion_get(struct nfs_io_completion *ioc) | |
139 | { | |
140 | if (ioc != NULL) | |
141 | kref_get(&ioc->refcount); | |
142 | } | |
143 | ||
144 | static void nfs_io_completion_put(struct nfs_io_completion *ioc) | |
145 | { | |
146 | if (ioc != NULL) | |
147 | kref_put(&ioc->refcount, nfs_io_completion_release); | |
148 | } | |
149 | ||
e00ed89d TM |
150 | static void |
151 | nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode) | |
152 | { | |
153 | if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) { | |
154 | kref_get(&req->wb_kref); | |
155 | atomic_long_inc(&NFS_I(inode)->nrequests); | |
156 | } | |
157 | } | |
158 | ||
159 | static int | |
160 | nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode) | |
161 | { | |
162 | int ret; | |
163 | ||
164 | if (!test_bit(PG_REMOVE, &req->wb_flags)) | |
165 | return 0; | |
166 | ret = nfs_page_group_lock(req); | |
167 | if (ret) | |
168 | return ret; | |
169 | if (test_and_clear_bit(PG_REMOVE, &req->wb_flags)) | |
170 | nfs_page_set_inode_ref(req, inode); | |
171 | nfs_page_group_unlock(req); | |
172 | return 0; | |
173 | } | |
174 | ||
0c493b5c | 175 | static struct nfs_page *nfs_folio_private_request(struct folio *folio) |
bd37d6fc | 176 | { |
0c493b5c | 177 | return folio_get_private(folio); |
bd37d6fc TM |
178 | } |
179 | ||
0c493b5c TM |
180 | /** |
181 | * nfs_folio_find_private_request - find head request associated with a folio | |
182 | * @folio: pointer to folio | |
84d3a9a9 WAA |
183 | * |
184 | * must be called while holding the inode lock. | |
185 | * | |
186 | * returns matching head request with reference held, or NULL if not found. | |
187 | */ | |
0c493b5c | 188 | static struct nfs_page *nfs_folio_find_private_request(struct folio *folio) |
277459d2 | 189 | { |
0c493b5c | 190 | struct address_space *mapping = folio_file_mapping(folio); |
bd37d6fc | 191 | struct nfs_page *req; |
277459d2 | 192 | |
0c493b5c | 193 | if (!folio_test_private(folio)) |
b30d2f04 | 194 | return NULL; |
4b9bb25b | 195 | spin_lock(&mapping->private_lock); |
0c493b5c | 196 | req = nfs_folio_private_request(folio); |
84d3a9a9 WAA |
197 | if (req) { |
198 | WARN_ON_ONCE(req->wb_head != req); | |
29418aa4 | 199 | kref_get(&req->wb_kref); |
84d3a9a9 | 200 | } |
4b9bb25b | 201 | spin_unlock(&mapping->private_lock); |
b30d2f04 TM |
202 | return req; |
203 | } | |
29418aa4 | 204 | |
0c493b5c | 205 | static struct nfs_page *nfs_folio_find_swap_request(struct folio *folio) |
b30d2f04 | 206 | { |
0c493b5c | 207 | struct inode *inode = folio_file_mapping(folio)->host; |
b30d2f04 TM |
208 | struct nfs_inode *nfsi = NFS_I(inode); |
209 | struct nfs_page *req = NULL; | |
0c493b5c | 210 | if (!folio_test_swapcache(folio)) |
b30d2f04 | 211 | return NULL; |
e824f99a | 212 | mutex_lock(&nfsi->commit_mutex); |
0c493b5c | 213 | if (folio_test_swapcache(folio)) { |
b30d2f04 | 214 | req = nfs_page_search_commits_for_head_request_locked(nfsi, |
0c493b5c | 215 | folio); |
b30d2f04 TM |
216 | if (req) { |
217 | WARN_ON_ONCE(req->wb_head != req); | |
218 | kref_get(&req->wb_kref); | |
219 | } | |
220 | } | |
e824f99a | 221 | mutex_unlock(&nfsi->commit_mutex); |
277459d2 TM |
222 | return req; |
223 | } | |
224 | ||
0c493b5c TM |
225 | /** |
226 | * nfs_folio_find_head_request - find head request associated with a folio | |
227 | * @folio: pointer to folio | |
84d3a9a9 WAA |
228 | * |
229 | * returns matching head request with reference held, or NULL if not found. | |
230 | */ | |
0c493b5c | 231 | static struct nfs_page *nfs_folio_find_head_request(struct folio *folio) |
277459d2 | 232 | { |
b30d2f04 | 233 | struct nfs_page *req; |
277459d2 | 234 | |
0c493b5c | 235 | req = nfs_folio_find_private_request(folio); |
b30d2f04 | 236 | if (!req) |
0c493b5c | 237 | req = nfs_folio_find_swap_request(folio); |
277459d2 TM |
238 | return req; |
239 | } | |
240 | ||
0c493b5c | 241 | static struct nfs_page *nfs_folio_find_and_lock_request(struct folio *folio) |
e00ed89d | 242 | { |
0c493b5c | 243 | struct inode *inode = folio_file_mapping(folio)->host; |
e00ed89d TM |
244 | struct nfs_page *req, *head; |
245 | int ret; | |
246 | ||
247 | for (;;) { | |
0c493b5c | 248 | req = nfs_folio_find_head_request(folio); |
e00ed89d TM |
249 | if (!req) |
250 | return req; | |
251 | head = nfs_page_group_lock_head(req); | |
252 | if (head != req) | |
253 | nfs_release_request(req); | |
254 | if (IS_ERR(head)) | |
255 | return head; | |
256 | ret = nfs_cancel_remove_inode(head, inode); | |
257 | if (ret < 0) { | |
258 | nfs_unlock_and_release_request(head); | |
259 | return ERR_PTR(ret); | |
260 | } | |
261 | /* Ensure that nobody removed the request before we locked it */ | |
0c493b5c | 262 | if (head == nfs_folio_private_request(folio)) |
e00ed89d | 263 | break; |
0c493b5c | 264 | if (folio_test_swapcache(folio)) |
e00ed89d TM |
265 | break; |
266 | nfs_unlock_and_release_request(head); | |
267 | } | |
268 | return head; | |
269 | } | |
270 | ||
1da177e4 | 271 | /* Adjust the file length if we're writing beyond the end */ |
0c493b5c TM |
272 | static void nfs_grow_file(struct folio *folio, unsigned int offset, |
273 | unsigned int count) | |
1da177e4 | 274 | { |
0c493b5c | 275 | struct inode *inode = folio_file_mapping(folio)->host; |
a3d01454 TM |
276 | loff_t end, i_size; |
277 | pgoff_t end_index; | |
1da177e4 | 278 | |
a3d01454 TM |
279 | spin_lock(&inode->i_lock); |
280 | i_size = i_size_read(inode); | |
0c493b5c TM |
281 | end_index = ((i_size - 1) >> folio_shift(folio)) << folio_order(folio); |
282 | if (i_size > 0 && folio_index(folio) < end_index) | |
a3d01454 | 283 | goto out; |
0c493b5c | 284 | end = folio_file_pos(folio) + (loff_t)offset + (loff_t)count; |
1da177e4 | 285 | if (i_size >= end) |
a3d01454 | 286 | goto out; |
110cb2d2 | 287 | trace_nfs_size_grow(inode, end); |
1da177e4 | 288 | i_size_write(inode, end); |
f6cdfa6d | 289 | NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE; |
a3d01454 TM |
290 | nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); |
291 | out: | |
292 | spin_unlock(&inode->i_lock); | |
a6b5a28e | 293 | nfs_fscache_invalidate(inode, 0); |
1da177e4 LT |
294 | } |
295 | ||
a301b777 | 296 | /* A writeback failed: mark the page as bad, and invalidate the page cache */ |
d2ceb7e5 | 297 | static void nfs_set_pageerror(struct address_space *mapping) |
a301b777 | 298 | { |
0df68ced TM |
299 | struct inode *inode = mapping->host; |
300 | ||
d2ceb7e5 | 301 | nfs_zap_mapping(mapping->host, mapping); |
0df68ced TM |
302 | /* Force file size revalidation */ |
303 | spin_lock(&inode->i_lock); | |
ac46b3d7 | 304 | nfs_set_cache_invalid(inode, NFS_INO_REVAL_FORCED | |
88a6099f | 305 | NFS_INO_INVALID_CHANGE | |
ac46b3d7 | 306 | NFS_INO_INVALID_SIZE); |
0df68ced | 307 | spin_unlock(&inode->i_lock); |
a301b777 TM |
308 | } |
309 | ||
0c493b5c | 310 | static void nfs_mapping_set_error(struct folio *folio, int error) |
6fbda89b | 311 | { |
0c493b5c | 312 | struct address_space *mapping = folio_file_mapping(folio); |
b8946d7b | 313 | |
0c493b5c | 314 | folio_set_error(folio); |
6c984083 TM |
315 | filemap_set_wb_err(mapping, error); |
316 | if (mapping->host) | |
317 | errseq_set(&mapping->host->i_sb->s_wb_err, | |
318 | error == -ENOSPC ? -ENOSPC : -EIO); | |
b8946d7b | 319 | nfs_set_pageerror(mapping); |
6fbda89b TM |
320 | } |
321 | ||
d72ddcba WAA |
322 | /* |
323 | * nfs_page_group_search_locked | |
324 | * @head - head request of page group | |
325 | * @page_offset - offset into page | |
326 | * | |
327 | * Search page group with head @head to find a request that contains the | |
328 | * page offset @page_offset. | |
329 | * | |
330 | * Returns a pointer to the first matching nfs request, or NULL if no | |
331 | * match is found. | |
332 | * | |
333 | * Must be called with the page group lock held | |
334 | */ | |
335 | static struct nfs_page * | |
336 | nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) | |
337 | { | |
338 | struct nfs_page *req; | |
339 | ||
d72ddcba WAA |
340 | req = head; |
341 | do { | |
342 | if (page_offset >= req->wb_pgbase && | |
343 | page_offset < (req->wb_pgbase + req->wb_bytes)) | |
344 | return req; | |
345 | ||
346 | req = req->wb_this_page; | |
347 | } while (req != head); | |
348 | ||
349 | return NULL; | |
350 | } | |
351 | ||
352 | /* | |
353 | * nfs_page_group_covers_page | |
354 | * @head - head request of page group | |
355 | * | |
356 | * Return true if the page group with head @head covers the whole page, | |
357 | * returns false otherwise | |
358 | */ | |
359 | static bool nfs_page_group_covers_page(struct nfs_page *req) | |
360 | { | |
0c493b5c | 361 | unsigned int len = nfs_folio_length(nfs_page_to_folio(req)); |
d72ddcba WAA |
362 | struct nfs_page *tmp; |
363 | unsigned int pos = 0; | |
d72ddcba | 364 | |
1344b7ea | 365 | nfs_page_group_lock(req); |
d72ddcba | 366 | |
7e8a30f8 | 367 | for (;;) { |
d72ddcba | 368 | tmp = nfs_page_group_search_locked(req->wb_head, pos); |
7e8a30f8 TM |
369 | if (!tmp) |
370 | break; | |
371 | pos = tmp->wb_pgbase + tmp->wb_bytes; | |
372 | } | |
d72ddcba WAA |
373 | |
374 | nfs_page_group_unlock(req); | |
7e8a30f8 | 375 | return pos >= len; |
d72ddcba WAA |
376 | } |
377 | ||
1da177e4 LT |
378 | /* We can set the PG_uptodate flag if we see that a write request |
379 | * covers the full page. | |
380 | */ | |
d72ddcba | 381 | static void nfs_mark_uptodate(struct nfs_page *req) |
1da177e4 | 382 | { |
0c493b5c TM |
383 | struct folio *folio = nfs_page_to_folio(req); |
384 | ||
385 | if (folio_test_uptodate(folio)) | |
1da177e4 | 386 | return; |
d72ddcba | 387 | if (!nfs_page_group_covers_page(req)) |
1da177e4 | 388 | return; |
0c493b5c | 389 | folio_mark_uptodate(folio); |
1da177e4 LT |
390 | } |
391 | ||
1da177e4 LT |
392 | static int wb_priority(struct writeback_control *wbc) |
393 | { | |
e87b4c7a | 394 | int ret = 0; |
cca588d6 | 395 | |
e87b4c7a N |
396 | if (wbc->sync_mode == WB_SYNC_ALL) |
397 | ret = FLUSH_COND_STABLE; | |
e87b4c7a | 398 | return ret; |
1da177e4 LT |
399 | } |
400 | ||
89a09141 PZ |
401 | /* |
402 | * NFS congestion control | |
403 | */ | |
404 | ||
405 | int nfs_congestion_kb; | |
406 | ||
407 | #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) | |
408 | #define NFS_CONGESTION_OFF_THRESH \ | |
409 | (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) | |
410 | ||
0c493b5c | 411 | static void nfs_folio_set_writeback(struct folio *folio) |
89a09141 | 412 | { |
0c493b5c | 413 | struct nfs_server *nfss = NFS_SERVER(folio_file_mapping(folio)->host); |
89a09141 | 414 | |
0c493b5c TM |
415 | folio_start_writeback(folio); |
416 | if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH) | |
6df25e58 | 417 | nfss->write_congested = 1; |
89a09141 PZ |
418 | } |
419 | ||
0c493b5c | 420 | static void nfs_folio_end_writeback(struct folio *folio) |
89a09141 | 421 | { |
0c493b5c | 422 | struct nfs_server *nfss = NFS_SERVER(folio_file_mapping(folio)->host); |
89a09141 | 423 | |
0c493b5c TM |
424 | folio_end_writeback(folio); |
425 | if (atomic_long_dec_return(&nfss->writeback) < | |
426 | NFS_CONGESTION_OFF_THRESH) | |
6df25e58 | 427 | nfss->write_congested = 0; |
89a09141 PZ |
428 | } |
429 | ||
0c493b5c TM |
430 | static void nfs_page_end_writeback(struct nfs_page *req) |
431 | { | |
432 | if (nfs_page_group_sync_on_bit(req, PG_WB_END)) { | |
433 | nfs_unlock_request(req); | |
434 | nfs_folio_end_writeback(nfs_page_to_folio(req)); | |
435 | } else | |
436 | nfs_unlock_request(req); | |
437 | } | |
438 | ||
d4581383 WAA |
439 | /* |
440 | * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests | |
441 | * | |
442 | * @destroy_list - request list (using wb_this_page) terminated by @old_head | |
443 | * @old_head - the old head of the list | |
444 | * | |
445 | * All subrequests must be locked and removed from all lists, so at this point | |
446 | * they are only "active" in this function, and possibly in nfs_wait_on_request | |
447 | * with a reference held by some other context. | |
448 | */ | |
449 | static void | |
450 | nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, | |
b66aaa8d TM |
451 | struct nfs_page *old_head, |
452 | struct inode *inode) | |
d4581383 WAA |
453 | { |
454 | while (destroy_list) { | |
455 | struct nfs_page *subreq = destroy_list; | |
456 | ||
457 | destroy_list = (subreq->wb_this_page == old_head) ? | |
458 | NULL : subreq->wb_this_page; | |
459 | ||
08ca8b21 TM |
460 | /* Note: lock subreq in order to change subreq->wb_head */ |
461 | nfs_page_set_headlock(subreq); | |
d4581383 WAA |
462 | WARN_ON_ONCE(old_head != subreq->wb_head); |
463 | ||
464 | /* make sure old group is not used */ | |
d4581383 | 465 | subreq->wb_this_page = subreq; |
08ca8b21 | 466 | subreq->wb_head = subreq; |
d4581383 | 467 | |
902a4c00 TM |
468 | clear_bit(PG_REMOVE, &subreq->wb_flags); |
469 | ||
5b2b5187 TM |
470 | /* Note: races with nfs_page_group_destroy() */ |
471 | if (!kref_read(&subreq->wb_kref)) { | |
5b2b5187 | 472 | /* Check if we raced with nfs_page_group_destroy() */ |
08ca8b21 TM |
473 | if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { |
474 | nfs_page_clear_headlock(subreq); | |
5b2b5187 | 475 | nfs_free_request(subreq); |
08ca8b21 TM |
476 | } else |
477 | nfs_page_clear_headlock(subreq); | |
5b2b5187 TM |
478 | continue; |
479 | } | |
08ca8b21 | 480 | nfs_page_clear_headlock(subreq); |
d4581383 | 481 | |
add42de3 | 482 | nfs_release_request(old_head); |
5b2b5187 TM |
483 | |
484 | if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { | |
485 | nfs_release_request(subreq); | |
a6b6d5b8 | 486 | atomic_long_dec(&NFS_I(inode)->nrequests); |
d4581383 | 487 | } |
5b2b5187 | 488 | |
5b2b5187 TM |
489 | /* subreq is now totally disconnected from page group or any |
490 | * write / commit lists. last chance to wake any waiters */ | |
491 | nfs_unlock_and_release_request(subreq); | |
d4581383 WAA |
492 | } |
493 | } | |
494 | ||
495 | /* | |
e00ed89d TM |
496 | * nfs_join_page_group - destroy subrequests of the head req |
497 | * @head: the page used to lookup the "page group" of nfs_page structures | |
498 | * @inode: Inode to which the request belongs. | |
d4581383 WAA |
499 | * |
500 | * This function joins all sub requests to the head request by first | |
501 | * locking all requests in the group, cancelling any pending operations | |
502 | * and finally updating the head request to cover the whole range covered by | |
503 | * the (former) group. All subrequests are removed from any write or commit | |
504 | * lists, unlinked from the group and destroyed. | |
d4581383 | 505 | */ |
b193a78d TM |
506 | void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo, |
507 | struct inode *inode) | |
e261f51f | 508 | { |
e00ed89d | 509 | struct nfs_page *subreq; |
d4581383 | 510 | struct nfs_page *destroy_list = NULL; |
a62f8e3b | 511 | unsigned int pgbase, off, bytes; |
a62f8e3b TM |
512 | |
513 | pgbase = head->wb_pgbase; | |
514 | bytes = head->wb_bytes; | |
515 | off = head->wb_offset; | |
a0e265bc TM |
516 | for (subreq = head->wb_this_page; subreq != head; |
517 | subreq = subreq->wb_this_page) { | |
a62f8e3b TM |
518 | /* Subrequests should always form a contiguous range */ |
519 | if (pgbase > subreq->wb_pgbase) { | |
520 | off -= pgbase - subreq->wb_pgbase; | |
521 | bytes += pgbase - subreq->wb_pgbase; | |
522 | pgbase = subreq->wb_pgbase; | |
309a1d65 | 523 | } |
a62f8e3b TM |
524 | bytes = max(subreq->wb_pgbase + subreq->wb_bytes |
525 | - pgbase, bytes); | |
a0e265bc | 526 | } |
d4581383 | 527 | |
a62f8e3b TM |
528 | /* Set the head request's range to cover the former page group */ |
529 | head->wb_pgbase = pgbase; | |
530 | head->wb_bytes = bytes; | |
531 | head->wb_offset = off; | |
532 | ||
d4581383 WAA |
533 | /* Now that all requests are locked, make sure they aren't on any list. |
534 | * Commit list removal accounting is done after locks are dropped */ | |
535 | subreq = head; | |
536 | do { | |
b193a78d | 537 | nfs_clear_request_commit(cinfo, subreq); |
d4581383 WAA |
538 | subreq = subreq->wb_this_page; |
539 | } while (subreq != head); | |
540 | ||
541 | /* unlink subrequests from head, destroy them later */ | |
542 | if (head->wb_this_page != head) { | |
543 | /* destroy list will be terminated by head */ | |
544 | destroy_list = head->wb_this_page; | |
545 | head->wb_this_page = head; | |
e261f51f | 546 | } |
d4581383 | 547 | |
e00ed89d TM |
548 | nfs_destroy_unlinked_subrequests(destroy_list, head, inode); |
549 | } | |
b66aaa8d | 550 | |
e00ed89d TM |
551 | /* |
552 | * nfs_lock_and_join_requests - join all subreqs to the head req | |
0c493b5c | 553 | * @folio: the folio used to lookup the "page group" of nfs_page structures |
e00ed89d TM |
554 | * |
555 | * This function joins all sub requests to the head request by first | |
556 | * locking all requests in the group, cancelling any pending operations | |
557 | * and finally updating the head request to cover the whole range covered by | |
558 | * the (former) group. All subrequests are removed from any write or commit | |
559 | * lists, unlinked from the group and destroyed. | |
560 | * | |
561 | * Returns a locked, referenced pointer to the head request - which after | |
562 | * this call is guaranteed to be the only request associated with the page. | |
0c493b5c | 563 | * Returns NULL if no requests are found for @folio, or a ERR_PTR if an |
e00ed89d TM |
564 | * error was encountered. |
565 | */ | |
0c493b5c | 566 | static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio) |
e00ed89d | 567 | { |
0c493b5c | 568 | struct inode *inode = folio_file_mapping(folio)->host; |
e00ed89d | 569 | struct nfs_page *head; |
b193a78d | 570 | struct nfs_commit_info cinfo; |
e00ed89d | 571 | int ret; |
d4581383 | 572 | |
b193a78d | 573 | nfs_init_cinfo_from_inode(&cinfo, inode); |
e00ed89d TM |
574 | /* |
575 | * A reference is taken only on the head request which acts as a | |
576 | * reference to the whole page group - the group will not be destroyed | |
577 | * until the head reference is released. | |
578 | */ | |
0c493b5c | 579 | head = nfs_folio_find_and_lock_request(folio); |
e00ed89d TM |
580 | if (IS_ERR_OR_NULL(head)) |
581 | return head; | |
d4581383 | 582 | |
e00ed89d TM |
583 | /* lock each request in the page group */ |
584 | ret = nfs_page_group_lock_subrequests(head); | |
585 | if (ret < 0) { | |
b5bab9bf | 586 | nfs_unlock_and_release_request(head); |
e00ed89d | 587 | return ERR_PTR(ret); |
b5bab9bf TM |
588 | } |
589 | ||
b193a78d | 590 | nfs_join_page_group(head, &cinfo, inode); |
0671d8f1 | 591 | |
e00ed89d | 592 | return head; |
074cc1de TM |
593 | } |
594 | ||
6fbda89b | 595 | static void nfs_write_error(struct nfs_page *req, int error) |
0bcbf039 | 596 | { |
6dd85e83 | 597 | trace_nfs_write_error(nfs_page_to_inode(req), req, error); |
0c493b5c | 598 | nfs_mapping_set_error(nfs_page_to_folio(req), error); |
06c9fdf3 | 599 | nfs_inode_remove_request(req); |
0c493b5c | 600 | nfs_page_end_writeback(req); |
1f84ccdf | 601 | nfs_release_request(req); |
0bcbf039 PT |
602 | } |
603 | ||
074cc1de TM |
604 | /* |
605 | * Find an associated nfs write request, and prepare to flush it out | |
606 | * May return an error if the user signalled nfs_wait_on_request(). | |
607 | */ | |
0c493b5c | 608 | static int nfs_page_async_flush(struct folio *folio, |
c6fd3511 TM |
609 | struct writeback_control *wbc, |
610 | struct nfs_pageio_descriptor *pgio) | |
074cc1de TM |
611 | { |
612 | struct nfs_page *req; | |
613 | int ret = 0; | |
614 | ||
0c493b5c | 615 | req = nfs_lock_and_join_requests(folio); |
074cc1de TM |
616 | if (!req) |
617 | goto out; | |
618 | ret = PTR_ERR(req); | |
619 | if (IS_ERR(req)) | |
620 | goto out; | |
621 | ||
0c493b5c | 622 | nfs_folio_set_writeback(folio); |
deed85e7 | 623 | WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); |
074cc1de | 624 | |
a6598813 | 625 | /* If there is a fatal error that covers this write, just exit */ |
96c41455 TM |
626 | ret = pgio->pg_error; |
627 | if (nfs_error_is_fatal_on_server(ret)) | |
a6598813 TM |
628 | goto out_launder; |
629 | ||
96c41455 | 630 | ret = 0; |
f8512ad0 | 631 | if (!nfs_pageio_add_request(pgio, req)) { |
074cc1de | 632 | ret = pgio->pg_error; |
0bcbf039 | 633 | /* |
c373fff7 | 634 | * Remove the problematic req upon fatal errors on the server |
0bcbf039 | 635 | */ |
c6fd3511 TM |
636 | if (nfs_error_is_fatal_on_server(ret)) |
637 | goto out_launder; | |
638 | if (wbc->sync_mode == WB_SYNC_NONE) | |
639 | ret = AOP_WRITEPAGE_ACTIVATE; | |
0c493b5c | 640 | folio_redirty_for_writepage(wbc, folio); |
d6c843b9 | 641 | nfs_redirty_request(req); |
96c41455 | 642 | pgio->pg_error = 0; |
40f90271 | 643 | } else |
0c493b5c TM |
644 | nfs_add_stats(folio_file_mapping(folio)->host, |
645 | NFSIOS_WRITEPAGES, 1); | |
074cc1de TM |
646 | out: |
647 | return ret; | |
a6598813 | 648 | out_launder: |
6fbda89b | 649 | nfs_write_error(req, ret); |
14bebe3c | 650 | return 0; |
e261f51f TM |
651 | } |
652 | ||
0c493b5c | 653 | static int nfs_do_writepage(struct folio *folio, struct writeback_control *wbc, |
c373fff7 | 654 | struct nfs_pageio_descriptor *pgio) |
1da177e4 | 655 | { |
0c493b5c TM |
656 | nfs_pageio_cond_complete(pgio, folio_index(folio)); |
657 | return nfs_page_async_flush(folio, wbc, pgio); | |
f758c885 | 658 | } |
7fe7f848 | 659 | |
f758c885 TM |
660 | /* |
661 | * Write an mmapped page to the server. | |
662 | */ | |
0c493b5c | 663 | static int nfs_writepage_locked(struct folio *folio, |
c373fff7 | 664 | struct writeback_control *wbc) |
f758c885 TM |
665 | { |
666 | struct nfs_pageio_descriptor pgio; | |
0c493b5c | 667 | struct inode *inode = folio_file_mapping(folio)->host; |
f758c885 | 668 | int err; |
49a70f27 | 669 | |
6df25e58 N |
670 | if (wbc->sync_mode == WB_SYNC_NONE && |
671 | NFS_SERVER(inode)->write_congested) | |
672 | return AOP_WRITEPAGE_ACTIVATE; | |
673 | ||
40f90271 | 674 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); |
0c493b5c TM |
675 | nfs_pageio_init_write(&pgio, inode, 0, false, |
676 | &nfs_async_write_completion_ops); | |
677 | err = nfs_do_writepage(folio, wbc, &pgio); | |
96c41455 | 678 | pgio.pg_error = 0; |
f758c885 | 679 | nfs_pageio_complete(&pgio); |
c5e483b7 | 680 | return err; |
4d770ccf TM |
681 | } |
682 | ||
683 | int nfs_writepage(struct page *page, struct writeback_control *wbc) | |
684 | { | |
0c493b5c | 685 | struct folio *folio = page_folio(page); |
f758c885 | 686 | int ret; |
4d770ccf | 687 | |
0c493b5c | 688 | ret = nfs_writepage_locked(folio, wbc); |
96c41455 TM |
689 | if (ret != AOP_WRITEPAGE_ACTIVATE) |
690 | unlock_page(page); | |
f758c885 TM |
691 | return ret; |
692 | } | |
693 | ||
d585bdbe | 694 | static int nfs_writepages_callback(struct folio *folio, |
0c493b5c | 695 | struct writeback_control *wbc, void *data) |
f758c885 TM |
696 | { |
697 | int ret; | |
698 | ||
0c493b5c | 699 | ret = nfs_do_writepage(folio, wbc, data); |
96c41455 | 700 | if (ret != AOP_WRITEPAGE_ACTIVATE) |
d585bdbe | 701 | folio_unlock(folio); |
f758c885 | 702 | return ret; |
1da177e4 LT |
703 | } |
704 | ||
919e3bd9 TM |
705 | static void nfs_io_completion_commit(void *inode) |
706 | { | |
707 | nfs_commit_inode(inode, 0); | |
708 | } | |
709 | ||
1da177e4 LT |
710 | int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) |
711 | { | |
1da177e4 | 712 | struct inode *inode = mapping->host; |
c63c7b05 | 713 | struct nfs_pageio_descriptor pgio; |
ed7bcdb3 TM |
714 | struct nfs_io_completion *ioc = NULL; |
715 | unsigned int mntflags = NFS_SERVER(inode)->flags; | |
716 | int priority = 0; | |
1da177e4 LT |
717 | int err; |
718 | ||
6df25e58 N |
719 | if (wbc->sync_mode == WB_SYNC_NONE && |
720 | NFS_SERVER(inode)->write_congested) | |
721 | return 0; | |
722 | ||
91d5b470 CL |
723 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); |
724 | ||
ed7bcdb3 TM |
725 | if (!(mntflags & NFS_MOUNT_WRITE_EAGER) || wbc->for_kupdate || |
726 | wbc->for_background || wbc->for_sync || wbc->for_reclaim) { | |
727 | ioc = nfs_io_completion_alloc(GFP_KERNEL); | |
728 | if (ioc) | |
729 | nfs_io_completion_init(ioc, nfs_io_completion_commit, | |
730 | inode); | |
731 | priority = wb_priority(wbc); | |
732 | } | |
919e3bd9 | 733 | |
c6fd3511 TM |
734 | do { |
735 | nfs_pageio_init_write(&pgio, inode, priority, false, | |
736 | &nfs_async_write_completion_ops); | |
737 | pgio.pg_io_completion = ioc; | |
738 | err = write_cache_pages(mapping, wbc, nfs_writepages_callback, | |
739 | &pgio); | |
740 | pgio.pg_error = 0; | |
741 | nfs_pageio_complete(&pgio); | |
742 | } while (err < 0 && !nfs_error_is_fatal(err)); | |
919e3bd9 | 743 | nfs_io_completion_put(ioc); |
72cb77f4 | 744 | |
f758c885 | 745 | if (err < 0) |
72cb77f4 | 746 | goto out_err; |
c63c7b05 | 747 | return 0; |
72cb77f4 TM |
748 | out_err: |
749 | return err; | |
1da177e4 LT |
750 | } |
751 | ||
752 | /* | |
753 | * Insert a write request into an inode | |
754 | */ | |
0c493b5c | 755 | static void nfs_inode_add_request(struct nfs_page *req) |
1da177e4 | 756 | { |
0c493b5c TM |
757 | struct folio *folio = nfs_page_to_folio(req); |
758 | struct address_space *mapping = folio_file_mapping(folio); | |
759 | struct nfs_inode *nfsi = NFS_I(mapping->host); | |
e7d39069 | 760 | |
2bfc6e56 WAA |
761 | WARN_ON_ONCE(req->wb_this_page != req); |
762 | ||
e7d39069 | 763 | /* Lock the request! */ |
7ad84aa9 | 764 | nfs_lock_request(req); |
e7d39069 | 765 | |
29418aa4 MG |
766 | /* |
767 | * Swap-space should not get truncated. Hence no need to plug the race | |
768 | * with invalidate/truncate. | |
769 | */ | |
4b9bb25b | 770 | spin_lock(&mapping->private_lock); |
0c493b5c | 771 | if (likely(!folio_test_swapcache(folio))) { |
29418aa4 | 772 | set_bit(PG_MAPPED, &req->wb_flags); |
0c493b5c TM |
773 | folio_set_private(folio); |
774 | folio->private = req; | |
29418aa4 | 775 | } |
4b9bb25b | 776 | spin_unlock(&mapping->private_lock); |
a6b6d5b8 | 777 | atomic_long_inc(&nfsi->nrequests); |
17089a29 | 778 | /* this a head request for a page group - mark it as having an |
cb1410c7 WAA |
779 | * extra reference so sub groups can follow suit. |
780 | * This flag also informs pgio layer when to bump nrequests when | |
781 | * adding subrequests. */ | |
17089a29 | 782 | WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); |
c03b4024 | 783 | kref_get(&req->wb_kref); |
1da177e4 LT |
784 | } |
785 | ||
786 | /* | |
89a09141 | 787 | * Remove a write request from an inode |
1da177e4 LT |
788 | */ |
789 | static void nfs_inode_remove_request(struct nfs_page *req) | |
790 | { | |
6a6d4644 SM |
791 | struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req)); |
792 | ||
20633f04 | 793 | if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { |
0c493b5c TM |
794 | struct folio *folio = nfs_page_to_folio(req->wb_head); |
795 | struct address_space *mapping = folio_file_mapping(folio); | |
20633f04 | 796 | |
4b9bb25b | 797 | spin_lock(&mapping->private_lock); |
0c493b5c TM |
798 | if (likely(folio && !folio_test_swapcache(folio))) { |
799 | folio->private = NULL; | |
800 | folio_clear_private(folio); | |
801 | clear_bit(PG_MAPPED, &req->wb_head->wb_flags); | |
20633f04 | 802 | } |
4b9bb25b | 803 | spin_unlock(&mapping->private_lock); |
29418aa4 | 804 | } |
17089a29 | 805 | |
33ea5aaa | 806 | if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) { |
6a6d4644 | 807 | atomic_long_dec(&nfsi->nrequests); |
dd1b2026 | 808 | nfs_release_request(req); |
33ea5aaa | 809 | } |
1da177e4 LT |
810 | } |
811 | ||
0c493b5c | 812 | static void nfs_mark_request_dirty(struct nfs_page *req) |
61822ab5 | 813 | { |
0c493b5c TM |
814 | struct folio *folio = nfs_page_to_folio(req); |
815 | if (folio) | |
816 | filemap_dirty_folio(folio_mapping(folio), folio); | |
61822ab5 TM |
817 | } |
818 | ||
3a3908c8 TM |
819 | /* |
820 | * nfs_page_search_commits_for_head_request_locked | |
821 | * | |
0c493b5c | 822 | * Search through commit lists on @inode for the head request for @folio. |
3a3908c8 TM |
823 | * Must be called while holding the inode (which is cinfo) lock. |
824 | * | |
825 | * Returns the head request if found, or NULL if not found. | |
826 | */ | |
827 | static struct nfs_page * | |
828 | nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, | |
0c493b5c | 829 | struct folio *folio) |
3a3908c8 TM |
830 | { |
831 | struct nfs_page *freq, *t; | |
832 | struct nfs_commit_info cinfo; | |
833 | struct inode *inode = &nfsi->vfs_inode; | |
834 | ||
835 | nfs_init_cinfo_from_inode(&cinfo, inode); | |
836 | ||
837 | /* search through pnfs commit lists */ | |
0c493b5c | 838 | freq = pnfs_search_commit_reqs(inode, &cinfo, folio); |
3a3908c8 TM |
839 | if (freq) |
840 | return freq->wb_head; | |
841 | ||
842 | /* Linearly search the commit list for the correct request */ | |
843 | list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { | |
0c493b5c | 844 | if (nfs_page_to_folio(freq) == folio) |
3a3908c8 TM |
845 | return freq->wb_head; |
846 | } | |
847 | ||
848 | return NULL; | |
849 | } | |
850 | ||
86d80f97 TM |
851 | /** |
852 | * nfs_request_add_commit_list_locked - add request to a commit list | |
853 | * @req: pointer to a struct nfs_page | |
854 | * @dst: commit list head | |
855 | * @cinfo: holds list lock and accounting info | |
856 | * | |
857 | * This sets the PG_CLEAN bit, updates the cinfo count of | |
858 | * number of outstanding requests requiring a commit as well as | |
859 | * the MM page stats. | |
860 | * | |
e824f99a TM |
861 | * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the |
862 | * nfs_page lock. | |
86d80f97 TM |
863 | */ |
864 | void | |
865 | nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, | |
866 | struct nfs_commit_info *cinfo) | |
867 | { | |
868 | set_bit(PG_CLEAN, &req->wb_flags); | |
869 | nfs_list_add_request(req, dst); | |
5cb953d4 | 870 | atomic_long_inc(&cinfo->mds->ncommit); |
86d80f97 TM |
871 | } |
872 | EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); | |
873 | ||
8dd37758 TM |
874 | /** |
875 | * nfs_request_add_commit_list - add request to a commit list | |
876 | * @req: pointer to a struct nfs_page | |
ea2cf228 | 877 | * @cinfo: holds list lock and accounting info |
8dd37758 | 878 | * |
ea2cf228 | 879 | * This sets the PG_CLEAN bit, updates the cinfo count of |
8dd37758 TM |
880 | * number of outstanding requests requiring a commit as well as |
881 | * the MM page stats. | |
882 | * | |
ea2cf228 | 883 | * The caller must _not_ hold the cinfo->lock, but must be |
8dd37758 | 884 | * holding the nfs_page lock. |
1da177e4 | 885 | */ |
8dd37758 | 886 | void |
6272dcc6 | 887 | nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo) |
1da177e4 | 888 | { |
e824f99a | 889 | mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); |
6272dcc6 | 890 | nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); |
e824f99a | 891 | mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); |
0c493b5c | 892 | nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo); |
1da177e4 | 893 | } |
8dd37758 TM |
894 | EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); |
895 | ||
896 | /** | |
897 | * nfs_request_remove_commit_list - Remove request from a commit list | |
898 | * @req: pointer to a nfs_page | |
ea2cf228 | 899 | * @cinfo: holds list lock and accounting info |
8dd37758 | 900 | * |
ea2cf228 | 901 | * This clears the PG_CLEAN bit, and updates the cinfo's count of |
8dd37758 TM |
902 | * number of outstanding requests requiring a commit |
903 | * It does not update the MM page stats. | |
904 | * | |
ea2cf228 | 905 | * The caller _must_ hold the cinfo->lock and the nfs_page lock. |
8dd37758 TM |
906 | */ |
907 | void | |
ea2cf228 FI |
908 | nfs_request_remove_commit_list(struct nfs_page *req, |
909 | struct nfs_commit_info *cinfo) | |
8dd37758 | 910 | { |
8dd37758 TM |
911 | if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) |
912 | return; | |
913 | nfs_list_remove_request(req); | |
5cb953d4 | 914 | atomic_long_dec(&cinfo->mds->ncommit); |
8dd37758 TM |
915 | } |
916 | EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); | |
917 | ||
ea2cf228 FI |
918 | static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, |
919 | struct inode *inode) | |
920 | { | |
fe238e60 | 921 | cinfo->inode = inode; |
ea2cf228 FI |
922 | cinfo->mds = &NFS_I(inode)->commit_info; |
923 | cinfo->ds = pnfs_get_ds_info(inode); | |
b359f9d0 | 924 | cinfo->dreq = NULL; |
f453a54a | 925 | cinfo->completion_ops = &nfs_commit_completion_ops; |
ea2cf228 FI |
926 | } |
927 | ||
928 | void nfs_init_cinfo(struct nfs_commit_info *cinfo, | |
929 | struct inode *inode, | |
930 | struct nfs_direct_req *dreq) | |
931 | { | |
1763da12 FI |
932 | if (dreq) |
933 | nfs_init_cinfo_from_dreq(cinfo, dreq); | |
934 | else | |
935 | nfs_init_cinfo_from_inode(cinfo, inode); | |
ea2cf228 FI |
936 | } |
937 | EXPORT_SYMBOL_GPL(nfs_init_cinfo); | |
8dd37758 TM |
938 | |
939 | /* | |
940 | * Add a request to the inode's commit list. | |
941 | */ | |
1763da12 | 942 | void |
ea2cf228 | 943 | nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, |
b57ff130 | 944 | struct nfs_commit_info *cinfo, u32 ds_commit_idx) |
8dd37758 | 945 | { |
b57ff130 | 946 | if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx)) |
8dd37758 | 947 | return; |
6272dcc6 | 948 | nfs_request_add_commit_list(req, cinfo); |
8dd37758 | 949 | } |
8e821cad | 950 | |
0c493b5c | 951 | static void nfs_folio_clear_commit(struct folio *folio) |
d6d6dc7c | 952 | { |
0c493b5c TM |
953 | if (folio) { |
954 | long nr = folio_nr_pages(folio); | |
955 | ||
956 | node_stat_mod_folio(folio, NR_WRITEBACK, -nr); | |
957 | wb_stat_mod(&inode_to_bdi(folio_file_mapping(folio)->host)->wb, | |
958 | WB_WRITEBACK, -nr); | |
959 | } | |
d6d6dc7c FI |
960 | } |
961 | ||
b5bab9bf | 962 | /* Called holding the request lock on @req */ |
b193a78d TM |
963 | static void nfs_clear_request_commit(struct nfs_commit_info *cinfo, |
964 | struct nfs_page *req) | |
e468bae9 | 965 | { |
8dd37758 | 966 | if (test_bit(PG_CLEAN, &req->wb_flags)) { |
9fcd5960 TM |
967 | struct nfs_open_context *ctx = nfs_req_openctx(req); |
968 | struct inode *inode = d_inode(ctx->dentry); | |
e468bae9 | 969 | |
e824f99a | 970 | mutex_lock(&NFS_I(inode)->commit_mutex); |
b193a78d TM |
971 | if (!pnfs_clear_request_commit(req, cinfo)) { |
972 | nfs_request_remove_commit_list(req, cinfo); | |
8dd37758 | 973 | } |
e824f99a | 974 | mutex_unlock(&NFS_I(inode)->commit_mutex); |
0c493b5c | 975 | nfs_folio_clear_commit(nfs_page_to_folio(req)); |
e468bae9 | 976 | } |
e468bae9 TM |
977 | } |
978 | ||
d45f60c6 | 979 | int nfs_write_need_commit(struct nfs_pgio_header *hdr) |
8e821cad | 980 | { |
c65e6254 | 981 | if (hdr->verf.committed == NFS_DATA_SYNC) |
d45f60c6 | 982 | return hdr->lseg == NULL; |
c65e6254 | 983 | return hdr->verf.committed != NFS_FILE_SYNC; |
8e821cad TM |
984 | } |
985 | ||
919e3bd9 TM |
986 | static void nfs_async_write_init(struct nfs_pgio_header *hdr) |
987 | { | |
988 | nfs_io_completion_get(hdr->io_completion); | |
989 | } | |
990 | ||
061ae2ed | 991 | static void nfs_write_completion(struct nfs_pgio_header *hdr) |
8e821cad | 992 | { |
ea2cf228 | 993 | struct nfs_commit_info cinfo; |
6c75dc0d FI |
994 | unsigned long bytes = 0; |
995 | ||
996 | if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) | |
997 | goto out; | |
ea2cf228 | 998 | nfs_init_cinfo_from_inode(&cinfo, hdr->inode); |
6c75dc0d FI |
999 | while (!list_empty(&hdr->pages)) { |
1000 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); | |
6c75dc0d FI |
1001 | |
1002 | bytes += req->wb_bytes; | |
1003 | nfs_list_remove_request(req); | |
1004 | if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && | |
1005 | (hdr->good_bytes < bytes)) { | |
af887e43 | 1006 | trace_nfs_comp_error(hdr->inode, req, hdr->error); |
0c493b5c TM |
1007 | nfs_mapping_set_error(nfs_page_to_folio(req), |
1008 | hdr->error); | |
6c75dc0d FI |
1009 | goto remove_req; |
1010 | } | |
c65e6254 | 1011 | if (nfs_write_need_commit(hdr)) { |
33344e0f TM |
1012 | /* Reset wb_nio, since the write was successful. */ |
1013 | req->wb_nio = 0; | |
f79d06f5 | 1014 | memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); |
b57ff130 | 1015 | nfs_mark_request_commit(req, hdr->lseg, &cinfo, |
a7d42ddb | 1016 | hdr->pgio_mirror_idx); |
6c75dc0d FI |
1017 | goto next; |
1018 | } | |
1019 | remove_req: | |
1020 | nfs_inode_remove_request(req); | |
1021 | next: | |
0c493b5c | 1022 | nfs_page_end_writeback(req); |
3aff4ebb | 1023 | nfs_release_request(req); |
6c75dc0d FI |
1024 | } |
1025 | out: | |
919e3bd9 | 1026 | nfs_io_completion_put(hdr->io_completion); |
6c75dc0d | 1027 | hdr->release(hdr); |
8e821cad | 1028 | } |
1da177e4 | 1029 | |
ce59515c | 1030 | unsigned long |
ea2cf228 | 1031 | nfs_reqs_to_commit(struct nfs_commit_info *cinfo) |
fb8a1f11 | 1032 | { |
5cb953d4 | 1033 | return atomic_long_read(&cinfo->mds->ncommit); |
d6d6dc7c FI |
1034 | } |
1035 | ||
e824f99a | 1036 | /* NFS_I(cinfo->inode)->commit_mutex held by caller */ |
1763da12 | 1037 | int |
ea2cf228 FI |
1038 | nfs_scan_commit_list(struct list_head *src, struct list_head *dst, |
1039 | struct nfs_commit_info *cinfo, int max) | |
d6d6dc7c | 1040 | { |
137da553 | 1041 | struct nfs_page *req, *tmp; |
d6d6dc7c FI |
1042 | int ret = 0; |
1043 | ||
137da553 | 1044 | list_for_each_entry_safe(req, tmp, src, wb_list) { |
7ad84aa9 | 1045 | kref_get(&req->wb_kref); |
2ce209c4 | 1046 | if (!nfs_lock_request(req)) { |
2ce209c4 | 1047 | nfs_release_request(req); |
64a93dbf | 1048 | continue; |
2ce209c4 | 1049 | } |
ea2cf228 | 1050 | nfs_request_remove_commit_list(req, cinfo); |
5d2a9d9d | 1051 | clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); |
8dd37758 TM |
1052 | nfs_list_add_request(req, dst); |
1053 | ret++; | |
1763da12 | 1054 | if ((ret == max) && !cinfo->dreq) |
8dd37758 | 1055 | break; |
e824f99a | 1056 | cond_resched(); |
d6d6dc7c FI |
1057 | } |
1058 | return ret; | |
fb8a1f11 | 1059 | } |
5d2a9d9d | 1060 | EXPORT_SYMBOL_GPL(nfs_scan_commit_list); |
fb8a1f11 | 1061 | |
1da177e4 LT |
1062 | /* |
1063 | * nfs_scan_commit - Scan an inode for commit requests | |
1064 | * @inode: NFS inode to scan | |
ea2cf228 FI |
1065 | * @dst: mds destination list |
1066 | * @cinfo: mds and ds lists of reqs ready to commit | |
1da177e4 LT |
1067 | * |
1068 | * Moves requests from the inode's 'commit' request list. | |
1069 | * The requests are *not* checked to ensure that they form a contiguous set. | |
1070 | */ | |
1763da12 | 1071 | int |
ea2cf228 FI |
1072 | nfs_scan_commit(struct inode *inode, struct list_head *dst, |
1073 | struct nfs_commit_info *cinfo) | |
1da177e4 | 1074 | { |
d6d6dc7c | 1075 | int ret = 0; |
fb8a1f11 | 1076 | |
5cb953d4 TM |
1077 | if (!atomic_long_read(&cinfo->mds->ncommit)) |
1078 | return 0; | |
e824f99a | 1079 | mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); |
5cb953d4 | 1080 | if (atomic_long_read(&cinfo->mds->ncommit) > 0) { |
8dd37758 | 1081 | const int max = INT_MAX; |
d6d6dc7c | 1082 | |
ea2cf228 FI |
1083 | ret = nfs_scan_commit_list(&cinfo->mds->list, dst, |
1084 | cinfo, max); | |
1085 | ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); | |
d6d6dc7c | 1086 | } |
e824f99a | 1087 | mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); |
ff778d02 | 1088 | return ret; |
1da177e4 | 1089 | } |
d6d6dc7c | 1090 | |
1da177e4 | 1091 | /* |
e7d39069 TM |
1092 | * Search for an existing write request, and attempt to update |
1093 | * it to reflect a new dirty region on a given page. | |
1da177e4 | 1094 | * |
e7d39069 TM |
1095 | * If the attempt fails, then the existing request is flushed out |
1096 | * to disk. | |
1da177e4 | 1097 | */ |
0c493b5c TM |
1098 | static struct nfs_page *nfs_try_to_update_request(struct folio *folio, |
1099 | unsigned int offset, | |
1100 | unsigned int bytes) | |
1da177e4 | 1101 | { |
e7d39069 TM |
1102 | struct nfs_page *req; |
1103 | unsigned int rqend; | |
1104 | unsigned int end; | |
1105 | int error; | |
1106 | ||
1da177e4 LT |
1107 | end = offset + bytes; |
1108 | ||
0c493b5c | 1109 | req = nfs_lock_and_join_requests(folio); |
f6032f21 TM |
1110 | if (IS_ERR_OR_NULL(req)) |
1111 | return req; | |
1da177e4 | 1112 | |
f6032f21 TM |
1113 | rqend = req->wb_offset + req->wb_bytes; |
1114 | /* | |
1115 | * Tell the caller to flush out the request if | |
1116 | * the offsets are non-contiguous. | |
1117 | * Note: nfs_flush_incompatible() will already | |
1118 | * have flushed out requests having wrong owners. | |
1119 | */ | |
1120 | if (offset > rqend || end < req->wb_offset) | |
1121 | goto out_flushme; | |
1da177e4 LT |
1122 | |
1123 | /* Okay, the request matches. Update the region */ | |
1124 | if (offset < req->wb_offset) { | |
1125 | req->wb_offset = offset; | |
1126 | req->wb_pgbase = offset; | |
1da177e4 | 1127 | } |
1da177e4 LT |
1128 | if (end > rqend) |
1129 | req->wb_bytes = end - req->wb_offset; | |
e7d39069 TM |
1130 | else |
1131 | req->wb_bytes = rqend - req->wb_offset; | |
33344e0f | 1132 | req->wb_nio = 0; |
e7d39069 TM |
1133 | return req; |
1134 | out_flushme: | |
f6032f21 TM |
1135 | /* |
1136 | * Note: we mark the request dirty here because | |
1137 | * nfs_lock_and_join_requests() cannot preserve | |
1138 | * commit flags, so we have to replay the write. | |
1139 | */ | |
1140 | nfs_mark_request_dirty(req); | |
1141 | nfs_unlock_and_release_request(req); | |
0c493b5c | 1142 | error = nfs_wb_folio(folio_file_mapping(folio)->host, folio); |
f6032f21 | 1143 | return (error < 0) ? ERR_PTR(error) : NULL; |
e7d39069 TM |
1144 | } |
1145 | ||
1146 | /* | |
1147 | * Try to update an existing write request, or create one if there is none. | |
1148 | * | |
1149 | * Note: Should always be called with the Page Lock held to prevent races | |
1150 | * if we have to add a new request. Also assumes that the caller has | |
1151 | * already called nfs_flush_incompatible() if necessary. | |
1152 | */ | |
0c493b5c TM |
1153 | static struct nfs_page *nfs_setup_write_request(struct nfs_open_context *ctx, |
1154 | struct folio *folio, | |
1155 | unsigned int offset, | |
1156 | unsigned int bytes) | |
e7d39069 | 1157 | { |
0c493b5c | 1158 | struct nfs_page *req; |
1da177e4 | 1159 | |
0c493b5c | 1160 | req = nfs_try_to_update_request(folio, offset, bytes); |
e7d39069 TM |
1161 | if (req != NULL) |
1162 | goto out; | |
0c493b5c | 1163 | req = nfs_page_create_from_folio(ctx, folio, offset, bytes); |
e7d39069 TM |
1164 | if (IS_ERR(req)) |
1165 | goto out; | |
0c493b5c | 1166 | nfs_inode_add_request(req); |
efc91ed0 | 1167 | out: |
61e930a9 | 1168 | return req; |
1da177e4 LT |
1169 | } |
1170 | ||
0c493b5c TM |
1171 | static int nfs_writepage_setup(struct nfs_open_context *ctx, |
1172 | struct folio *folio, unsigned int offset, | |
1173 | unsigned int count) | |
e7d39069 | 1174 | { |
0c493b5c | 1175 | struct nfs_page *req; |
e7d39069 | 1176 | |
0c493b5c | 1177 | req = nfs_setup_write_request(ctx, folio, offset, count); |
e7d39069 TM |
1178 | if (IS_ERR(req)) |
1179 | return PTR_ERR(req); | |
1180 | /* Update file length */ | |
0c493b5c | 1181 | nfs_grow_file(folio, offset, count); |
d72ddcba | 1182 | nfs_mark_uptodate(req); |
a6305ddb | 1183 | nfs_mark_request_dirty(req); |
1d1afcbc | 1184 | nfs_unlock_and_release_request(req); |
e7d39069 TM |
1185 | return 0; |
1186 | } | |
1187 | ||
0c493b5c | 1188 | int nfs_flush_incompatible(struct file *file, struct folio *folio) |
1da177e4 | 1189 | { |
cd3758e3 | 1190 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
2a369153 | 1191 | struct nfs_lock_context *l_ctx; |
17b985de | 1192 | struct file_lock_context *flctx = locks_inode_context(file_inode(file)); |
1da177e4 | 1193 | struct nfs_page *req; |
1a54533e | 1194 | int do_flush, status; |
1da177e4 LT |
1195 | /* |
1196 | * Look for a request corresponding to this page. If there | |
1197 | * is one, and it belongs to another file, we flush it out | |
1198 | * before we try to copy anything into the page. Do this | |
1199 | * due to the lack of an ACCESS-type call in NFSv2. | |
1200 | * Also do the same if we find a request from an existing | |
1201 | * dropped page. | |
1202 | */ | |
1a54533e | 1203 | do { |
0c493b5c | 1204 | req = nfs_folio_find_head_request(folio); |
1a54533e TM |
1205 | if (req == NULL) |
1206 | return 0; | |
2a369153 | 1207 | l_ctx = req->wb_lock_context; |
0c493b5c TM |
1208 | do_flush = nfs_page_to_folio(req) != folio || |
1209 | !nfs_match_open_context(nfs_req_openctx(req), ctx); | |
bd61e0a9 JL |
1210 | if (l_ctx && flctx && |
1211 | !(list_empty_careful(&flctx->flc_posix) && | |
1212 | list_empty_careful(&flctx->flc_flock))) { | |
d51fdb87 | 1213 | do_flush |= l_ctx->lockowner != current->files; |
5263e31e | 1214 | } |
1da177e4 | 1215 | nfs_release_request(req); |
1a54533e TM |
1216 | if (!do_flush) |
1217 | return 0; | |
0c493b5c | 1218 | status = nfs_wb_folio(folio_file_mapping(folio)->host, folio); |
1a54533e TM |
1219 | } while (status == 0); |
1220 | return status; | |
1da177e4 LT |
1221 | } |
1222 | ||
dc24826b AA |
1223 | /* |
1224 | * Avoid buffered writes when a open context credential's key would | |
1225 | * expire soon. | |
1226 | * | |
1227 | * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL. | |
1228 | * | |
1229 | * Return 0 and set a credential flag which triggers the inode to flush | |
1230 | * and performs NFS_FILE_SYNC writes if the key will expired within | |
1231 | * RPC_KEY_EXPIRE_TIMEO. | |
1232 | */ | |
1233 | int | |
1234 | nfs_key_timeout_notify(struct file *filp, struct inode *inode) | |
1235 | { | |
1236 | struct nfs_open_context *ctx = nfs_file_open_context(filp); | |
dc24826b | 1237 | |
ddf529ee | 1238 | if (nfs_ctx_key_to_expire(ctx, inode) && |
ca05cbae | 1239 | !rcu_access_pointer(ctx->ll_cred)) |
ddf529ee N |
1240 | /* Already expired! */ |
1241 | return -EACCES; | |
1242 | return 0; | |
dc24826b AA |
1243 | } |
1244 | ||
1245 | /* | |
1246 | * Test if the open context credential key is marked to expire soon. | |
1247 | */ | |
ce52914e | 1248 | bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode) |
dc24826b | 1249 | { |
ce52914e | 1250 | struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; |
ca05cbae | 1251 | struct rpc_cred *cred, *new, *old = NULL; |
ddf529ee | 1252 | struct auth_cred acred = { |
a52458b4 | 1253 | .cred = ctx->cred, |
ddf529ee | 1254 | }; |
ca05cbae | 1255 | bool ret = false; |
ddf529ee | 1256 | |
ca05cbae TM |
1257 | rcu_read_lock(); |
1258 | cred = rcu_dereference(ctx->ll_cred); | |
1259 | if (cred && !(cred->cr_ops->crkey_timeout && | |
1260 | cred->cr_ops->crkey_timeout(cred))) | |
1261 | goto out; | |
1262 | rcu_read_unlock(); | |
1263 | ||
1264 | new = auth->au_ops->lookup_cred(auth, &acred, 0); | |
1265 | if (new == cred) { | |
1266 | put_rpccred(new); | |
ddf529ee | 1267 | return true; |
ca05cbae TM |
1268 | } |
1269 | if (IS_ERR_OR_NULL(new)) { | |
1270 | new = NULL; | |
1271 | ret = true; | |
1272 | } else if (new->cr_ops->crkey_timeout && | |
1273 | new->cr_ops->crkey_timeout(new)) | |
1274 | ret = true; | |
1275 | ||
1276 | rcu_read_lock(); | |
1277 | old = rcu_dereference_protected(xchg(&ctx->ll_cred, | |
1278 | RCU_INITIALIZER(new)), 1); | |
1279 | out: | |
1280 | rcu_read_unlock(); | |
1281 | put_rpccred(old); | |
1282 | return ret; | |
dc24826b AA |
1283 | } |
1284 | ||
5d47a356 TM |
1285 | /* |
1286 | * If the page cache is marked as unsafe or invalid, then we can't rely on | |
1287 | * the PageUptodate() flag. In this case, we will need to turn off | |
1288 | * write optimisations that depend on the page contents being correct. | |
1289 | */ | |
0c493b5c | 1290 | static bool nfs_folio_write_uptodate(struct folio *folio, unsigned int pagelen) |
5d47a356 | 1291 | { |
0c493b5c | 1292 | struct inode *inode = folio_file_mapping(folio)->host; |
d529ef83 JL |
1293 | struct nfs_inode *nfsi = NFS_I(inode); |
1294 | ||
8d197a56 TM |
1295 | if (nfs_have_delegated_attributes(inode)) |
1296 | goto out; | |
fc9dc401 | 1297 | if (nfsi->cache_validity & |
13c0b082 | 1298 | (NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE)) |
d529ef83 | 1299 | return false; |
4db72b40 | 1300 | smp_rmb(); |
fc9dc401 | 1301 | if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags) && pagelen != 0) |
8d197a56 TM |
1302 | return false; |
1303 | out: | |
fc9dc401 | 1304 | if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0) |
18dd78c4 | 1305 | return false; |
0c493b5c | 1306 | return folio_test_uptodate(folio) != 0; |
5d47a356 TM |
1307 | } |
1308 | ||
5263e31e JL |
1309 | static bool |
1310 | is_whole_file_wrlock(struct file_lock *fl) | |
1311 | { | |
1312 | return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && | |
1313 | fl->fl_type == F_WRLCK; | |
1314 | } | |
1315 | ||
c7559663 SM |
1316 | /* If we know the page is up to date, and we're not using byte range locks (or |
1317 | * if we have the whole file locked for writing), it may be more efficient to | |
1318 | * extend the write to cover the entire page in order to avoid fragmentation | |
1319 | * inefficiencies. | |
1320 | * | |
263b4509 SM |
1321 | * If the file is opened for synchronous writes then we can just skip the rest |
1322 | * of the checks. | |
c7559663 | 1323 | */ |
0c493b5c TM |
1324 | static int nfs_can_extend_write(struct file *file, struct folio *folio, |
1325 | unsigned int pagelen) | |
c7559663 | 1326 | { |
0c493b5c | 1327 | struct inode *inode = file_inode(file); |
17b985de | 1328 | struct file_lock_context *flctx = locks_inode_context(inode); |
5263e31e | 1329 | struct file_lock *fl; |
0c493b5c | 1330 | int ret; |
5263e31e | 1331 | |
c7559663 SM |
1332 | if (file->f_flags & O_DSYNC) |
1333 | return 0; | |
0c493b5c | 1334 | if (!nfs_folio_write_uptodate(folio, pagelen)) |
263b4509 | 1335 | return 0; |
c7559663 SM |
1336 | if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) |
1337 | return 1; | |
bd61e0a9 JL |
1338 | if (!flctx || (list_empty_careful(&flctx->flc_flock) && |
1339 | list_empty_careful(&flctx->flc_posix))) | |
8fa4592a | 1340 | return 1; |
5263e31e JL |
1341 | |
1342 | /* Check to see if there are whole file write locks */ | |
5263e31e | 1343 | ret = 0; |
6109c850 | 1344 | spin_lock(&flctx->flc_lock); |
bd61e0a9 JL |
1345 | if (!list_empty(&flctx->flc_posix)) { |
1346 | fl = list_first_entry(&flctx->flc_posix, struct file_lock, | |
1347 | fl_list); | |
1348 | if (is_whole_file_wrlock(fl)) | |
1349 | ret = 1; | |
1350 | } else if (!list_empty(&flctx->flc_flock)) { | |
5263e31e JL |
1351 | fl = list_first_entry(&flctx->flc_flock, struct file_lock, |
1352 | fl_list); | |
1353 | if (fl->fl_type == F_WRLCK) | |
1354 | ret = 1; | |
1355 | } | |
6109c850 | 1356 | spin_unlock(&flctx->flc_lock); |
5263e31e | 1357 | return ret; |
c7559663 SM |
1358 | } |
1359 | ||
1da177e4 LT |
1360 | /* |
1361 | * Update and possibly write a cached page of an NFS file. | |
1362 | * | |
1363 | * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad | |
1364 | * things with a page scheduled for an RPC call (e.g. invalidate it). | |
1365 | */ | |
0c493b5c TM |
1366 | int nfs_update_folio(struct file *file, struct folio *folio, |
1367 | unsigned int offset, unsigned int count) | |
1da177e4 | 1368 | { |
cd3758e3 | 1369 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
0c493b5c TM |
1370 | struct address_space *mapping = folio_file_mapping(folio); |
1371 | struct inode *inode = mapping->host; | |
1372 | unsigned int pagelen = nfs_folio_length(folio); | |
1da177e4 LT |
1373 | int status = 0; |
1374 | ||
91d5b470 CL |
1375 | nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); |
1376 | ||
0c493b5c TM |
1377 | dprintk("NFS: nfs_update_folio(%pD2 %d@%lld)\n", file, count, |
1378 | (long long)(folio_file_pos(folio) + offset)); | |
1da177e4 | 1379 | |
149a4fdd BC |
1380 | if (!count) |
1381 | goto out; | |
1382 | ||
0c493b5c | 1383 | if (nfs_can_extend_write(file, folio, pagelen)) { |
fc9dc401 | 1384 | count = max(count + offset, pagelen); |
1da177e4 | 1385 | offset = 0; |
1da177e4 LT |
1386 | } |
1387 | ||
0c493b5c | 1388 | status = nfs_writepage_setup(ctx, folio, offset, count); |
03fa9e84 | 1389 | if (status < 0) |
d2ceb7e5 | 1390 | nfs_set_pageerror(mapping); |
149a4fdd | 1391 | out: |
0c493b5c | 1392 | dprintk("NFS: nfs_update_folio returns %d (isize %lld)\n", |
1da177e4 | 1393 | status, (long long)i_size_read(inode)); |
1da177e4 LT |
1394 | return status; |
1395 | } | |
1396 | ||
3ff7576d | 1397 | static int flush_task_priority(int how) |
1da177e4 LT |
1398 | { |
1399 | switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { | |
1400 | case FLUSH_HIGHPRI: | |
1401 | return RPC_PRIORITY_HIGH; | |
1402 | case FLUSH_LOWPRI: | |
1403 | return RPC_PRIORITY_LOW; | |
1404 | } | |
1405 | return RPC_PRIORITY_NORMAL; | |
1406 | } | |
1407 | ||
d45f60c6 WAA |
1408 | static void nfs_initiate_write(struct nfs_pgio_header *hdr, |
1409 | struct rpc_message *msg, | |
abde71f4 | 1410 | const struct nfs_rpc_ops *rpc_ops, |
1ed26f33 | 1411 | struct rpc_task_setup *task_setup_data, int how) |
1da177e4 | 1412 | { |
3ff7576d | 1413 | int priority = flush_task_priority(how); |
d138d5d1 | 1414 | |
8db55a03 N |
1415 | if (IS_SWAPFILE(hdr->inode)) |
1416 | task_setup_data->flags |= RPC_TASK_SWAPPER; | |
1ed26f33 | 1417 | task_setup_data->priority = priority; |
fb91fb0e | 1418 | rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client); |
5bb2a7cb | 1419 | trace_nfs_initiate_write(hdr); |
275acaaf TM |
1420 | } |
1421 | ||
6d884e8f F |
1422 | /* If a nfs_flush_* function fails, it should remove reqs from @head and |
1423 | * call this on each, which will prepare them to be retried on next | |
1424 | * writeback using standard nfs. | |
1425 | */ | |
1426 | static void nfs_redirty_request(struct nfs_page *req) | |
1427 | { | |
6dd85e83 | 1428 | struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req)); |
67f4b5dc | 1429 | |
33344e0f TM |
1430 | /* Bump the transmission count */ |
1431 | req->wb_nio++; | |
6d884e8f | 1432 | nfs_mark_request_dirty(req); |
67f4b5dc | 1433 | atomic_long_inc(&nfsi->redirtied_pages); |
0c493b5c | 1434 | nfs_page_end_writeback(req); |
3aff4ebb | 1435 | nfs_release_request(req); |
6d884e8f F |
1436 | } |
1437 | ||
df3accb8 | 1438 | static void nfs_async_write_error(struct list_head *head, int error) |
6c75dc0d FI |
1439 | { |
1440 | struct nfs_page *req; | |
1441 | ||
1442 | while (!list_empty(head)) { | |
1443 | req = nfs_list_entry(head->next); | |
1444 | nfs_list_remove_request(req); | |
cea9ba72 | 1445 | if (nfs_error_is_fatal_on_server(error)) |
6fbda89b TM |
1446 | nfs_write_error(req, error); |
1447 | else | |
1448 | nfs_redirty_request(req); | |
6c75dc0d FI |
1449 | } |
1450 | } | |
1451 | ||
dc602dd7 TM |
1452 | static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) |
1453 | { | |
df3accb8 | 1454 | nfs_async_write_error(&hdr->pages, 0); |
dc602dd7 TM |
1455 | } |
1456 | ||
061ae2ed | 1457 | static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { |
919e3bd9 | 1458 | .init_hdr = nfs_async_write_init, |
061ae2ed FI |
1459 | .error_cleanup = nfs_async_write_error, |
1460 | .completion = nfs_write_completion, | |
dc602dd7 | 1461 | .reschedule_io = nfs_async_write_reschedule_io, |
061ae2ed FI |
1462 | }; |
1463 | ||
57208fa7 | 1464 | void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, |
a20c93e3 | 1465 | struct inode *inode, int ioflags, bool force_mds, |
061ae2ed | 1466 | const struct nfs_pgio_completion_ops *compl_ops) |
1da177e4 | 1467 | { |
a20c93e3 | 1468 | struct nfs_server *server = NFS_SERVER(inode); |
41d8d5b7 | 1469 | const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; |
a20c93e3 CH |
1470 | |
1471 | #ifdef CONFIG_NFS_V4_1 | |
1472 | if (server->pnfs_curr_ld && !force_mds) | |
1473 | pg_ops = server->pnfs_curr_ld->pg_write_ops; | |
1474 | #endif | |
4a0de55c | 1475 | nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops, |
3bde7afd | 1476 | server->wsize, ioflags); |
1751c363 | 1477 | } |
ddda8e0a | 1478 | EXPORT_SYMBOL_GPL(nfs_pageio_init_write); |
1da177e4 | 1479 | |
dce81290 TM |
1480 | void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) |
1481 | { | |
a7d42ddb WAA |
1482 | struct nfs_pgio_mirror *mirror; |
1483 | ||
6f29b9bb KM |
1484 | if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) |
1485 | pgio->pg_ops->pg_cleanup(pgio); | |
1486 | ||
41d8d5b7 | 1487 | pgio->pg_ops = &nfs_pgio_rw_ops; |
a7d42ddb WAA |
1488 | |
1489 | nfs_pageio_stop_mirroring(pgio); | |
1490 | ||
1491 | mirror = &pgio->pg_mirrors[0]; | |
1492 | mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; | |
dce81290 | 1493 | } |
1f945357 | 1494 | EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); |
dce81290 | 1495 | |
1da177e4 | 1496 | |
0b7c0153 FI |
1497 | void nfs_commit_prepare(struct rpc_task *task, void *calldata) |
1498 | { | |
1499 | struct nfs_commit_data *data = calldata; | |
1500 | ||
1501 | NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); | |
1502 | } | |
1503 | ||
a08a8cd3 TM |
1504 | static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, |
1505 | struct nfs_fattr *fattr) | |
1506 | { | |
1507 | struct nfs_pgio_args *argp = &hdr->args; | |
1508 | struct nfs_pgio_res *resp = &hdr->res; | |
2b83d3de | 1509 | u64 size = argp->offset + resp->count; |
a08a8cd3 TM |
1510 | |
1511 | if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) | |
2b83d3de TM |
1512 | fattr->size = size; |
1513 | if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { | |
1514 | fattr->valid &= ~NFS_ATTR_FATTR_SIZE; | |
a08a8cd3 | 1515 | return; |
2b83d3de TM |
1516 | } |
1517 | if (size != fattr->size) | |
a08a8cd3 TM |
1518 | return; |
1519 | /* Set attribute barrier */ | |
1520 | nfs_fattr_set_barrier(fattr); | |
2b83d3de TM |
1521 | /* ...and update size */ |
1522 | fattr->valid |= NFS_ATTR_FATTR_SIZE; | |
a08a8cd3 TM |
1523 | } |
1524 | ||
1525 | void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) | |
1526 | { | |
2b83d3de | 1527 | struct nfs_fattr *fattr = &hdr->fattr; |
a08a8cd3 TM |
1528 | struct inode *inode = hdr->inode; |
1529 | ||
a08a8cd3 TM |
1530 | spin_lock(&inode->i_lock); |
1531 | nfs_writeback_check_extend(hdr, fattr); | |
1532 | nfs_post_op_update_inode_force_wcc_locked(inode, fattr); | |
1533 | spin_unlock(&inode->i_lock); | |
1534 | } | |
1535 | EXPORT_SYMBOL_GPL(nfs_writeback_update_inode); | |
1536 | ||
1da177e4 LT |
1537 | /* |
1538 | * This function is called when the WRITE call is complete. | |
1539 | */ | |
d45f60c6 WAA |
1540 | static int nfs_writeback_done(struct rpc_task *task, |
1541 | struct nfs_pgio_header *hdr, | |
0eecb214 | 1542 | struct inode *inode) |
1da177e4 | 1543 | { |
788e7a89 | 1544 | int status; |
1da177e4 | 1545 | |
f551e44f CL |
1546 | /* |
1547 | * ->write_done will attempt to use post-op attributes to detect | |
1548 | * conflicting writes by other clients. A strict interpretation | |
1549 | * of close-to-open would allow us to continue caching even if | |
1550 | * another writer had changed the file, but some applications | |
1551 | * depend on tighter cache coherency when writing. | |
1552 | */ | |
d45f60c6 | 1553 | status = NFS_PROTO(inode)->write_done(task, hdr); |
788e7a89 | 1554 | if (status != 0) |
0eecb214 | 1555 | return status; |
8224b273 | 1556 | |
d45f60c6 | 1557 | nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); |
5bb2a7cb | 1558 | trace_nfs_writeback_done(task, hdr); |
91d5b470 | 1559 | |
69d96651 JL |
1560 | if (task->tk_status >= 0) { |
1561 | enum nfs3_stable_how committed = hdr->res.verf->committed; | |
1562 | ||
1563 | if (committed == NFS_UNSTABLE) { | |
1564 | /* | |
1565 | * We have some uncommitted data on the server at | |
1566 | * this point, so ensure that we keep track of that | |
1567 | * fact irrespective of what later writes do. | |
1568 | */ | |
1569 | set_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags); | |
1570 | } | |
1da177e4 | 1571 | |
69d96651 JL |
1572 | if (committed < hdr->args.stable) { |
1573 | /* We tried a write call, but the server did not | |
1574 | * commit data to stable storage even though we | |
1575 | * requested it. | |
1576 | * Note: There is a known bug in Tru64 < 5.0 in which | |
1577 | * the server reports NFS_DATA_SYNC, but performs | |
1578 | * NFS_FILE_SYNC. We therefore implement this checking | |
1579 | * as a dprintk() in order to avoid filling syslog. | |
1580 | */ | |
1581 | static unsigned long complain; | |
1582 | ||
1583 | /* Note this will print the MDS for a DS write */ | |
1584 | if (time_before(complain, jiffies)) { | |
1585 | dprintk("NFS: faulty NFS server %s:" | |
1586 | " (committed = %d) != (stable = %d)\n", | |
1587 | NFS_SERVER(inode)->nfs_client->cl_hostname, | |
1588 | committed, hdr->args.stable); | |
1589 | complain = jiffies + 300 * HZ; | |
1590 | } | |
1da177e4 LT |
1591 | } |
1592 | } | |
1f2edbe3 TM |
1593 | |
1594 | /* Deal with the suid/sgid bit corner case */ | |
16e14375 TM |
1595 | if (nfs_should_remove_suid(inode)) { |
1596 | spin_lock(&inode->i_lock); | |
720869eb | 1597 | nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE); |
16e14375 TM |
1598 | spin_unlock(&inode->i_lock); |
1599 | } | |
0eecb214 AS |
1600 | return 0; |
1601 | } | |
1602 | ||
1603 | /* | |
1604 | * This function is called when the WRITE call is complete. | |
1605 | */ | |
d45f60c6 WAA |
1606 | static void nfs_writeback_result(struct rpc_task *task, |
1607 | struct nfs_pgio_header *hdr) | |
0eecb214 | 1608 | { |
d45f60c6 WAA |
1609 | struct nfs_pgio_args *argp = &hdr->args; |
1610 | struct nfs_pgio_res *resp = &hdr->res; | |
1f2edbe3 TM |
1611 | |
1612 | if (resp->count < argp->count) { | |
1da177e4 LT |
1613 | static unsigned long complain; |
1614 | ||
6c75dc0d | 1615 | /* This a short write! */ |
d45f60c6 | 1616 | nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); |
91d5b470 | 1617 | |
1da177e4 | 1618 | /* Has the server at least made some progress? */ |
6c75dc0d FI |
1619 | if (resp->count == 0) { |
1620 | if (time_before(complain, jiffies)) { | |
1621 | printk(KERN_WARNING | |
1622 | "NFS: Server wrote zero bytes, expected %u.\n", | |
1623 | argp->count); | |
1624 | complain = jiffies + 300 * HZ; | |
1da177e4 | 1625 | } |
d45f60c6 | 1626 | nfs_set_pgio_error(hdr, -EIO, argp->offset); |
6c75dc0d | 1627 | task->tk_status = -EIO; |
13602896 | 1628 | return; |
1da177e4 | 1629 | } |
f8417b48 KM |
1630 | |
1631 | /* For non rpc-based layout drivers, retry-through-MDS */ | |
1632 | if (!task->tk_ops) { | |
1633 | hdr->pnfs_error = -EAGAIN; | |
1634 | return; | |
1635 | } | |
1636 | ||
6c75dc0d FI |
1637 | /* Was this an NFSv2 write or an NFSv3 stable write? */ |
1638 | if (resp->verf->committed != NFS_UNSTABLE) { | |
1639 | /* Resend from where the server left off */ | |
d45f60c6 | 1640 | hdr->mds_offset += resp->count; |
6c75dc0d FI |
1641 | argp->offset += resp->count; |
1642 | argp->pgbase += resp->count; | |
1643 | argp->count -= resp->count; | |
1644 | } else { | |
1645 | /* Resend as a stable write in order to avoid | |
1646 | * headaches in the case of a server crash. | |
1647 | */ | |
1648 | argp->stable = NFS_FILE_SYNC; | |
1da177e4 | 1649 | } |
8c9cb714 TM |
1650 | resp->count = 0; |
1651 | resp->verf->committed = 0; | |
6c75dc0d | 1652 | rpc_restart_call_prepare(task); |
1da177e4 | 1653 | } |
1da177e4 LT |
1654 | } |
1655 | ||
af7cf057 | 1656 | static int wait_on_commit(struct nfs_mds_commit_info *cinfo) |
71d0a611 | 1657 | { |
723c921e PZ |
1658 | return wait_var_event_killable(&cinfo->rpcs_out, |
1659 | !atomic_read(&cinfo->rpcs_out)); | |
af7cf057 | 1660 | } |
b8413f98 | 1661 | |
af7cf057 TM |
1662 | static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) |
1663 | { | |
1664 | atomic_inc(&cinfo->rpcs_out); | |
71d0a611 TM |
1665 | } |
1666 | ||
133a48ab | 1667 | bool nfs_commit_end(struct nfs_mds_commit_info *cinfo) |
71d0a611 | 1668 | { |
133a48ab | 1669 | if (atomic_dec_and_test(&cinfo->rpcs_out)) { |
723c921e | 1670 | wake_up_var(&cinfo->rpcs_out); |
133a48ab TM |
1671 | return true; |
1672 | } | |
1673 | return false; | |
71d0a611 TM |
1674 | } |
1675 | ||
0b7c0153 | 1676 | void nfs_commitdata_release(struct nfs_commit_data *data) |
1da177e4 | 1677 | { |
0b7c0153 FI |
1678 | put_nfs_open_context(data->context); |
1679 | nfs_commit_free(data); | |
1da177e4 | 1680 | } |
e0c2b380 | 1681 | EXPORT_SYMBOL_GPL(nfs_commitdata_release); |
1da177e4 | 1682 | |
0b7c0153 | 1683 | int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, |
c36aae9a | 1684 | const struct nfs_rpc_ops *nfs_ops, |
9ace33cd | 1685 | const struct rpc_call_ops *call_ops, |
9f0ec176 | 1686 | int how, int flags) |
1da177e4 | 1687 | { |
07737691 | 1688 | struct rpc_task *task; |
9ace33cd | 1689 | int priority = flush_task_priority(how); |
bdc7f021 TM |
1690 | struct rpc_message msg = { |
1691 | .rpc_argp = &data->args, | |
1692 | .rpc_resp = &data->res, | |
9ace33cd | 1693 | .rpc_cred = data->cred, |
bdc7f021 | 1694 | }; |
84115e1c | 1695 | struct rpc_task_setup task_setup_data = { |
07737691 | 1696 | .task = &data->task, |
9ace33cd | 1697 | .rpc_client = clnt, |
bdc7f021 | 1698 | .rpc_message = &msg, |
9ace33cd | 1699 | .callback_ops = call_ops, |
84115e1c | 1700 | .callback_data = data, |
101070ca | 1701 | .workqueue = nfsiod_workqueue, |
4fa7ef69 | 1702 | .flags = RPC_TASK_ASYNC | flags, |
3ff7576d | 1703 | .priority = priority, |
84115e1c | 1704 | }; |
118f09ed OK |
1705 | |
1706 | if (nfs_server_capable(data->inode, NFS_CAP_MOVEABLE)) | |
1707 | task_setup_data.flags |= RPC_TASK_MOVEABLE; | |
1708 | ||
9ace33cd | 1709 | /* Set up the initial task struct. */ |
e9ae1ee2 | 1710 | nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client); |
8224b273 | 1711 | trace_nfs_initiate_commit(data); |
9ace33cd | 1712 | |
b4839ebe | 1713 | dprintk("NFS: initiated commit call\n"); |
9ace33cd FI |
1714 | |
1715 | task = rpc_run_task(&task_setup_data); | |
1716 | if (IS_ERR(task)) | |
1717 | return PTR_ERR(task); | |
1718 | if (how & FLUSH_SYNC) | |
1719 | rpc_wait_for_completion_task(task); | |
1720 | rpc_put_task(task); | |
1721 | return 0; | |
1722 | } | |
e0c2b380 | 1723 | EXPORT_SYMBOL_GPL(nfs_initiate_commit); |
9ace33cd | 1724 | |
378520b8 PT |
1725 | static loff_t nfs_get_lwb(struct list_head *head) |
1726 | { | |
1727 | loff_t lwb = 0; | |
1728 | struct nfs_page *req; | |
1729 | ||
1730 | list_for_each_entry(req, head, wb_list) | |
1731 | if (lwb < (req_offset(req) + req->wb_bytes)) | |
1732 | lwb = req_offset(req) + req->wb_bytes; | |
1733 | ||
1734 | return lwb; | |
1735 | } | |
1736 | ||
9ace33cd FI |
1737 | /* |
1738 | * Set up the argument/result storage required for the RPC call. | |
1739 | */ | |
0b7c0153 | 1740 | void nfs_init_commit(struct nfs_commit_data *data, |
f453a54a FI |
1741 | struct list_head *head, |
1742 | struct pnfs_layout_segment *lseg, | |
1743 | struct nfs_commit_info *cinfo) | |
9ace33cd | 1744 | { |
19573c93 TM |
1745 | struct nfs_page *first; |
1746 | struct nfs_open_context *ctx; | |
1747 | struct inode *inode; | |
1da177e4 LT |
1748 | |
1749 | /* Set up the RPC argument and reply structs | |
1750 | * NB: take care not to mess about with data->commit et al. */ | |
1751 | ||
19573c93 TM |
1752 | if (head) |
1753 | list_splice_init(head, &data->pages); | |
1754 | ||
1755 | first = nfs_list_entry(data->pages.next); | |
1756 | ctx = nfs_req_openctx(first); | |
1757 | inode = d_inode(ctx->dentry); | |
1da177e4 | 1758 | |
1da177e4 | 1759 | data->inode = inode; |
9fcd5960 | 1760 | data->cred = ctx->cred; |
988b6dce | 1761 | data->lseg = lseg; /* reference transferred */ |
378520b8 PT |
1762 | /* only set lwb for pnfs commit */ |
1763 | if (lseg) | |
1764 | data->lwb = nfs_get_lwb(&data->pages); | |
9ace33cd | 1765 | data->mds_ops = &nfs_commit_ops; |
f453a54a | 1766 | data->completion_ops = cinfo->completion_ops; |
b359f9d0 | 1767 | data->dreq = cinfo->dreq; |
1da177e4 LT |
1768 | |
1769 | data->args.fh = NFS_FH(data->inode); | |
3da28eb1 TM |
1770 | /* Note: we always request a commit of the entire inode */ |
1771 | data->args.offset = 0; | |
1772 | data->args.count = 0; | |
9fcd5960 | 1773 | data->context = get_nfs_open_context(ctx); |
1da177e4 LT |
1774 | data->res.fattr = &data->fattr; |
1775 | data->res.verf = &data->verf; | |
0e574af1 | 1776 | nfs_fattr_init(&data->fattr); |
133a48ab | 1777 | nfs_commit_begin(cinfo->mds); |
1da177e4 | 1778 | } |
e0c2b380 | 1779 | EXPORT_SYMBOL_GPL(nfs_init_commit); |
1da177e4 | 1780 | |
e0c2b380 | 1781 | void nfs_retry_commit(struct list_head *page_list, |
ea2cf228 | 1782 | struct pnfs_layout_segment *lseg, |
b57ff130 WAA |
1783 | struct nfs_commit_info *cinfo, |
1784 | u32 ds_commit_idx) | |
64bfeb49 FI |
1785 | { |
1786 | struct nfs_page *req; | |
1787 | ||
1788 | while (!list_empty(page_list)) { | |
1789 | req = nfs_list_entry(page_list->next); | |
1790 | nfs_list_remove_request(req); | |
b57ff130 | 1791 | nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); |
0c493b5c | 1792 | nfs_folio_clear_commit(nfs_page_to_folio(req)); |
1d1afcbc | 1793 | nfs_unlock_and_release_request(req); |
64bfeb49 FI |
1794 | } |
1795 | } | |
e0c2b380 | 1796 | EXPORT_SYMBOL_GPL(nfs_retry_commit); |
64bfeb49 | 1797 | |
0c493b5c TM |
1798 | static void nfs_commit_resched_write(struct nfs_commit_info *cinfo, |
1799 | struct nfs_page *req) | |
b20135d0 | 1800 | { |
0c493b5c TM |
1801 | struct folio *folio = nfs_page_to_folio(req); |
1802 | ||
1803 | filemap_dirty_folio(folio_mapping(folio), folio); | |
b20135d0 TM |
1804 | } |
1805 | ||
1da177e4 LT |
1806 | /* |
1807 | * Commit dirty pages | |
1808 | */ | |
1809 | static int | |
ea2cf228 FI |
1810 | nfs_commit_list(struct inode *inode, struct list_head *head, int how, |
1811 | struct nfs_commit_info *cinfo) | |
1da177e4 | 1812 | { |
0b7c0153 | 1813 | struct nfs_commit_data *data; |
85e39fee | 1814 | unsigned short task_flags = 0; |
1da177e4 | 1815 | |
ade8febd WAA |
1816 | /* another commit raced with us */ |
1817 | if (list_empty(head)) | |
1818 | return 0; | |
1819 | ||
515dcdcd TM |
1820 | data = nfs_commitdata_alloc(); |
1821 | if (!data) { | |
1822 | nfs_retry_commit(head, NULL, cinfo, -1); | |
1823 | return -ENOMEM; | |
1824 | } | |
1da177e4 LT |
1825 | |
1826 | /* Set up the argument struct */ | |
f453a54a | 1827 | nfs_init_commit(data, head, NULL, cinfo); |
85e39fee OK |
1828 | if (NFS_SERVER(inode)->nfs_client->cl_minorversion) |
1829 | task_flags = RPC_TASK_MOVEABLE; | |
c36aae9a | 1830 | return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), |
85e39fee OK |
1831 | data->mds_ops, how, |
1832 | RPC_TASK_CRED_NOREF | task_flags); | |
67911c8f | 1833 | } |
67911c8f | 1834 | |
1da177e4 LT |
1835 | /* |
1836 | * COMMIT call returned | |
1837 | */ | |
788e7a89 | 1838 | static void nfs_commit_done(struct rpc_task *task, void *calldata) |
1da177e4 | 1839 | { |
0b7c0153 | 1840 | struct nfs_commit_data *data = calldata; |
1da177e4 | 1841 | |
788e7a89 | 1842 | /* Call the NFS version-specific code */ |
c0d0e96b | 1843 | NFS_PROTO(data->inode)->commit_done(task, data); |
7bdd297e | 1844 | trace_nfs_commit_done(task, data); |
c9d8f89d TM |
1845 | } |
1846 | ||
f453a54a | 1847 | static void nfs_commit_release_pages(struct nfs_commit_data *data) |
c9d8f89d | 1848 | { |
221203ce | 1849 | const struct nfs_writeverf *verf = data->res.verf; |
5917ce84 | 1850 | struct nfs_page *req; |
c9d8f89d | 1851 | int status = data->task.tk_status; |
f453a54a | 1852 | struct nfs_commit_info cinfo; |
353db796 | 1853 | struct nfs_server *nfss; |
0c493b5c | 1854 | struct folio *folio; |
788e7a89 | 1855 | |
1da177e4 LT |
1856 | while (!list_empty(&data->pages)) { |
1857 | req = nfs_list_entry(data->pages.next); | |
1858 | nfs_list_remove_request(req); | |
0c493b5c TM |
1859 | folio = nfs_page_to_folio(req); |
1860 | nfs_folio_clear_commit(folio); | |
1da177e4 | 1861 | |
1e8968c5 | 1862 | dprintk("NFS: commit (%s/%llu %d@%lld)", |
9fcd5960 TM |
1863 | nfs_req_openctx(req)->dentry->d_sb->s_id, |
1864 | (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)), | |
1da177e4 LT |
1865 | req->wb_bytes, |
1866 | (long long)req_offset(req)); | |
c9d8f89d | 1867 | if (status < 0) { |
0c493b5c | 1868 | if (folio) { |
af887e43 TM |
1869 | trace_nfs_commit_error(data->inode, req, |
1870 | status); | |
0c493b5c | 1871 | nfs_mapping_set_error(folio, status); |
38a33101 | 1872 | nfs_inode_remove_request(req); |
6fbda89b | 1873 | } |
ddeaa637 | 1874 | dprintk_cont(", error = %d\n", status); |
1da177e4 LT |
1875 | goto next; |
1876 | } | |
1877 | ||
1878 | /* Okay, COMMIT succeeded, apparently. Check the verifier | |
1879 | * returned by the server against all stored verfs. */ | |
1f28476d | 1880 | if (nfs_write_match_verf(verf, req)) { |
1da177e4 | 1881 | /* We have a match */ |
0c493b5c | 1882 | if (folio) |
38a33101 | 1883 | nfs_inode_remove_request(req); |
ddeaa637 | 1884 | dprintk_cont(" OK\n"); |
1da177e4 LT |
1885 | goto next; |
1886 | } | |
1887 | /* We have a mismatch. Write the page again */ | |
ddeaa637 | 1888 | dprintk_cont(" mismatch\n"); |
6d884e8f | 1889 | nfs_mark_request_dirty(req); |
67f4b5dc | 1890 | atomic_long_inc(&NFS_I(data->inode)->redirtied_pages); |
1da177e4 | 1891 | next: |
1d1afcbc | 1892 | nfs_unlock_and_release_request(req); |
7f1bda44 TM |
1893 | /* Latency breaker */ |
1894 | cond_resched(); | |
1da177e4 | 1895 | } |
353db796 N |
1896 | nfss = NFS_SERVER(data->inode); |
1897 | if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) | |
6df25e58 | 1898 | nfss->write_congested = 0; |
353db796 | 1899 | |
f453a54a | 1900 | nfs_init_cinfo(&cinfo, data->inode, data->dreq); |
af7cf057 | 1901 | nfs_commit_end(cinfo.mds); |
5917ce84 FI |
1902 | } |
1903 | ||
1904 | static void nfs_commit_release(void *calldata) | |
1905 | { | |
0b7c0153 | 1906 | struct nfs_commit_data *data = calldata; |
5917ce84 | 1907 | |
f453a54a | 1908 | data->completion_ops->completion(data); |
c9d8f89d | 1909 | nfs_commitdata_release(calldata); |
1da177e4 | 1910 | } |
788e7a89 TM |
1911 | |
1912 | static const struct rpc_call_ops nfs_commit_ops = { | |
0b7c0153 | 1913 | .rpc_call_prepare = nfs_commit_prepare, |
788e7a89 TM |
1914 | .rpc_call_done = nfs_commit_done, |
1915 | .rpc_release = nfs_commit_release, | |
1916 | }; | |
1da177e4 | 1917 | |
f453a54a FI |
1918 | static const struct nfs_commit_completion_ops nfs_commit_completion_ops = { |
1919 | .completion = nfs_commit_release_pages, | |
b20135d0 | 1920 | .resched_write = nfs_commit_resched_write, |
f453a54a FI |
1921 | }; |
1922 | ||
1763da12 FI |
1923 | int nfs_generic_commit_list(struct inode *inode, struct list_head *head, |
1924 | int how, struct nfs_commit_info *cinfo) | |
84c53ab5 FI |
1925 | { |
1926 | int status; | |
1927 | ||
ea2cf228 | 1928 | status = pnfs_commit_list(inode, head, how, cinfo); |
84c53ab5 | 1929 | if (status == PNFS_NOT_ATTEMPTED) |
ea2cf228 | 1930 | status = nfs_commit_list(inode, head, how, cinfo); |
84c53ab5 FI |
1931 | return status; |
1932 | } | |
1933 | ||
c4f24df9 TM |
1934 | static int __nfs_commit_inode(struct inode *inode, int how, |
1935 | struct writeback_control *wbc) | |
1da177e4 | 1936 | { |
1da177e4 | 1937 | LIST_HEAD(head); |
ea2cf228 | 1938 | struct nfs_commit_info cinfo; |
71d0a611 | 1939 | int may_wait = how & FLUSH_SYNC; |
c4f24df9 | 1940 | int ret, nscan; |
1da177e4 | 1941 | |
64a93dbf | 1942 | how &= ~FLUSH_SYNC; |
ea2cf228 | 1943 | nfs_init_cinfo_from_inode(&cinfo, inode); |
af7cf057 | 1944 | nfs_commit_begin(cinfo.mds); |
c4f24df9 TM |
1945 | for (;;) { |
1946 | ret = nscan = nfs_scan_commit(inode, &head, &cinfo); | |
1947 | if (ret <= 0) | |
1948 | break; | |
1949 | ret = nfs_generic_commit_list(inode, &head, how, &cinfo); | |
1950 | if (ret < 0) | |
1951 | break; | |
1952 | ret = 0; | |
1953 | if (wbc && wbc->sync_mode == WB_SYNC_NONE) { | |
1954 | if (nscan < wbc->nr_to_write) | |
1955 | wbc->nr_to_write -= nscan; | |
1956 | else | |
1957 | wbc->nr_to_write = 0; | |
1958 | } | |
1959 | if (nscan < INT_MAX) | |
1960 | break; | |
1961 | cond_resched(); | |
1962 | } | |
af7cf057 | 1963 | nfs_commit_end(cinfo.mds); |
c4f24df9 TM |
1964 | if (ret || !may_wait) |
1965 | return ret; | |
1966 | return wait_on_commit(cinfo.mds); | |
1967 | } | |
1968 | ||
1969 | int nfs_commit_inode(struct inode *inode, int how) | |
1970 | { | |
1971 | return __nfs_commit_inode(inode, how, NULL); | |
1da177e4 | 1972 | } |
b20135d0 | 1973 | EXPORT_SYMBOL_GPL(nfs_commit_inode); |
8fc795f7 | 1974 | |
ae09c31f | 1975 | int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
8fc795f7 | 1976 | { |
420e3646 TM |
1977 | struct nfs_inode *nfsi = NFS_I(inode); |
1978 | int flags = FLUSH_SYNC; | |
1979 | int ret = 0; | |
8fc795f7 | 1980 | |
a00dd6c0 | 1981 | if (wbc->sync_mode == WB_SYNC_NONE) { |
c4f24df9 TM |
1982 | /* no commits means nothing needs to be done */ |
1983 | if (!atomic_long_read(&nfsi->commit_info.ncommit)) | |
1984 | goto check_requests_outstanding; | |
1985 | ||
a00dd6c0 JL |
1986 | /* Don't commit yet if this is a non-blocking flush and there |
1987 | * are a lot of outstanding writes for this mapping. | |
1988 | */ | |
1a4edf0f | 1989 | if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) |
a00dd6c0 | 1990 | goto out_mark_dirty; |
420e3646 | 1991 | |
a00dd6c0 | 1992 | /* don't wait for the COMMIT response */ |
420e3646 | 1993 | flags = 0; |
a00dd6c0 JL |
1994 | } |
1995 | ||
c4f24df9 TM |
1996 | ret = __nfs_commit_inode(inode, flags, wbc); |
1997 | if (!ret) { | |
1998 | if (flags & FLUSH_SYNC) | |
1999 | return 0; | |
2000 | } else if (atomic_long_read(&nfsi->commit_info.ncommit)) | |
2001 | goto out_mark_dirty; | |
2002 | ||
2003 | check_requests_outstanding: | |
2004 | if (!atomic_read(&nfsi->commit_info.rpcs_out)) | |
2005 | return ret; | |
420e3646 | 2006 | out_mark_dirty: |
8fc795f7 TM |
2007 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
2008 | return ret; | |
2009 | } | |
89d77c8f | 2010 | EXPORT_SYMBOL_GPL(nfs_write_inode); |
a8d8f02c | 2011 | |
837bb1d7 TM |
2012 | /* |
2013 | * Wrapper for filemap_write_and_wait_range() | |
2014 | * | |
2015 | * Needed for pNFS in order to ensure data becomes visible to the | |
2016 | * client. | |
2017 | */ | |
2018 | int nfs_filemap_write_and_wait_range(struct address_space *mapping, | |
2019 | loff_t lstart, loff_t lend) | |
2020 | { | |
2021 | int ret; | |
2022 | ||
2023 | ret = filemap_write_and_wait_range(mapping, lstart, lend); | |
2024 | if (ret == 0) | |
2025 | ret = pnfs_sync_inode(mapping->host, true); | |
2026 | return ret; | |
2027 | } | |
2028 | EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range); | |
2029 | ||
acdc53b2 TM |
2030 | /* |
2031 | * flush the inode to disk. | |
2032 | */ | |
2033 | int nfs_wb_all(struct inode *inode) | |
34901f70 | 2034 | { |
f4ce1299 TM |
2035 | int ret; |
2036 | ||
2037 | trace_nfs_writeback_inode_enter(inode); | |
2038 | ||
5bb89b47 | 2039 | ret = filemap_write_and_wait(inode->i_mapping); |
6b196875 CL |
2040 | if (ret) |
2041 | goto out; | |
2042 | ret = nfs_commit_inode(inode, FLUSH_SYNC); | |
2043 | if (ret < 0) | |
2044 | goto out; | |
2045 | pnfs_sync_inode(inode, true); | |
2046 | ret = 0; | |
34901f70 | 2047 | |
6b196875 | 2048 | out: |
f4ce1299 TM |
2049 | trace_nfs_writeback_inode_exit(inode, ret); |
2050 | return ret; | |
1c75950b | 2051 | } |
ddda8e0a | 2052 | EXPORT_SYMBOL_GPL(nfs_wb_all); |
1c75950b | 2053 | |
6d740c76 | 2054 | int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio) |
1b3b4a1a TM |
2055 | { |
2056 | struct nfs_page *req; | |
1b3b4a1a TM |
2057 | int ret = 0; |
2058 | ||
6d740c76 | 2059 | folio_wait_writeback(folio); |
3e217045 WAA |
2060 | |
2061 | /* blocking call to cancel all requests and join to a single (head) | |
2062 | * request */ | |
0c493b5c | 2063 | req = nfs_lock_and_join_requests(folio); |
3e217045 WAA |
2064 | |
2065 | if (IS_ERR(req)) { | |
2066 | ret = PTR_ERR(req); | |
2067 | } else if (req) { | |
6d740c76 | 2068 | /* all requests from this folio have been cancelled by |
3e217045 WAA |
2069 | * nfs_lock_and_join_requests, so just remove the head |
2070 | * request from the inode / page_private pointer and | |
2071 | * release it */ | |
2072 | nfs_inode_remove_request(req); | |
3e217045 | 2073 | nfs_unlock_and_release_request(req); |
1b3b4a1a | 2074 | } |
3e217045 | 2075 | |
1b3b4a1a TM |
2076 | return ret; |
2077 | } | |
2078 | ||
5241060e TM |
2079 | /** |
2080 | * nfs_wb_folio - Write back all requests on one page | |
2081 | * @inode: pointer to page | |
2082 | * @folio: pointer to folio | |
2083 | * | |
2084 | * Assumes that the folio has been locked by the caller, and will | |
2085 | * not unlock it. | |
7f2f12d9 | 2086 | */ |
5241060e | 2087 | int nfs_wb_folio(struct inode *inode, struct folio *folio) |
1c75950b | 2088 | { |
5241060e TM |
2089 | loff_t range_start = folio_file_pos(folio); |
2090 | loff_t range_end = range_start + (loff_t)folio_size(folio) - 1; | |
4d770ccf | 2091 | struct writeback_control wbc = { |
4d770ccf | 2092 | .sync_mode = WB_SYNC_ALL, |
7f2f12d9 | 2093 | .nr_to_write = 0, |
4d770ccf TM |
2094 | .range_start = range_start, |
2095 | .range_end = range_end, | |
2096 | }; | |
2097 | int ret; | |
1c75950b | 2098 | |
256093fe | 2099 | trace_nfs_writeback_folio(inode, folio); |
f4ce1299 | 2100 | |
0522f6ad | 2101 | for (;;) { |
5241060e TM |
2102 | folio_wait_writeback(folio); |
2103 | if (folio_clear_dirty_for_io(folio)) { | |
0c493b5c | 2104 | ret = nfs_writepage_locked(folio, &wbc); |
73e3302f TM |
2105 | if (ret < 0) |
2106 | goto out_error; | |
0522f6ad | 2107 | continue; |
7f2f12d9 | 2108 | } |
f4ce1299 | 2109 | ret = 0; |
5241060e | 2110 | if (!folio_test_private(folio)) |
0522f6ad TM |
2111 | break; |
2112 | ret = nfs_commit_inode(inode, FLUSH_SYNC); | |
ba8b06e6 | 2113 | if (ret < 0) |
73e3302f | 2114 | goto out_error; |
7f2f12d9 | 2115 | } |
73e3302f | 2116 | out_error: |
256093fe | 2117 | trace_nfs_writeback_folio_done(inode, folio, ret); |
4d770ccf | 2118 | return ret; |
1c75950b TM |
2119 | } |
2120 | ||
074cc1de | 2121 | #ifdef CONFIG_MIGRATION |
4ae84a80 MWO |
2122 | int nfs_migrate_folio(struct address_space *mapping, struct folio *dst, |
2123 | struct folio *src, enum migrate_mode mode) | |
074cc1de | 2124 | { |
2da95652 | 2125 | /* |
4ae84a80 | 2126 | * If the private flag is set, the folio is currently associated with |
2da95652 JL |
2127 | * an in-progress read or write request. Don't try to migrate it. |
2128 | * | |
2129 | * FIXME: we could do this in principle, but we'll need a way to ensure | |
2130 | * that we can safely release the inode reference while holding | |
4ae84a80 | 2131 | * the folio lock. |
2da95652 | 2132 | */ |
4ae84a80 | 2133 | if (folio_test_private(src)) |
2da95652 | 2134 | return -EBUSY; |
074cc1de | 2135 | |
4ae84a80 | 2136 | if (folio_test_fscache(src)) { |
16f2f4e6 DH |
2137 | if (mode == MIGRATE_ASYNC) |
2138 | return -EBUSY; | |
4ae84a80 | 2139 | folio_wait_fscache(src); |
16f2f4e6 | 2140 | } |
074cc1de | 2141 | |
54184650 | 2142 | return migrate_folio(mapping, dst, src, mode); |
074cc1de TM |
2143 | } |
2144 | #endif | |
2145 | ||
f7b422b1 | 2146 | int __init nfs_init_writepagecache(void) |
1da177e4 LT |
2147 | { |
2148 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", | |
1e7f3a48 | 2149 | sizeof(struct nfs_pgio_header), |
1da177e4 | 2150 | 0, SLAB_HWCACHE_ALIGN, |
20c2df83 | 2151 | NULL); |
1da177e4 LT |
2152 | if (nfs_wdata_cachep == NULL) |
2153 | return -ENOMEM; | |
2154 | ||
93d2341c MD |
2155 | nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, |
2156 | nfs_wdata_cachep); | |
1da177e4 | 2157 | if (nfs_wdata_mempool == NULL) |
3dd4765f | 2158 | goto out_destroy_write_cache; |
1da177e4 | 2159 | |
0b7c0153 FI |
2160 | nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", |
2161 | sizeof(struct nfs_commit_data), | |
2162 | 0, SLAB_HWCACHE_ALIGN, | |
2163 | NULL); | |
2164 | if (nfs_cdata_cachep == NULL) | |
3dd4765f | 2165 | goto out_destroy_write_mempool; |
0b7c0153 | 2166 | |
93d2341c | 2167 | nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, |
4c100210 | 2168 | nfs_cdata_cachep); |
1da177e4 | 2169 | if (nfs_commit_mempool == NULL) |
3dd4765f | 2170 | goto out_destroy_commit_cache; |
1da177e4 | 2171 | |
89a09141 PZ |
2172 | /* |
2173 | * NFS congestion size, scale with available memory. | |
2174 | * | |
2175 | * 64MB: 8192k | |
2176 | * 128MB: 11585k | |
2177 | * 256MB: 16384k | |
2178 | * 512MB: 23170k | |
2179 | * 1GB: 32768k | |
2180 | * 2GB: 46340k | |
2181 | * 4GB: 65536k | |
2182 | * 8GB: 92681k | |
2183 | * 16GB: 131072k | |
2184 | * | |
2185 | * This allows larger machines to have larger/more transfers. | |
2186 | * Limit the default to 256M | |
2187 | */ | |
ca79b0c2 | 2188 | nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); |
89a09141 PZ |
2189 | if (nfs_congestion_kb > 256*1024) |
2190 | nfs_congestion_kb = 256*1024; | |
2191 | ||
1da177e4 | 2192 | return 0; |
3dd4765f JL |
2193 | |
2194 | out_destroy_commit_cache: | |
2195 | kmem_cache_destroy(nfs_cdata_cachep); | |
2196 | out_destroy_write_mempool: | |
2197 | mempool_destroy(nfs_wdata_mempool); | |
2198 | out_destroy_write_cache: | |
2199 | kmem_cache_destroy(nfs_wdata_cachep); | |
2200 | return -ENOMEM; | |
1da177e4 LT |
2201 | } |
2202 | ||
266bee88 | 2203 | void nfs_destroy_writepagecache(void) |
1da177e4 LT |
2204 | { |
2205 | mempool_destroy(nfs_commit_mempool); | |
3dd4765f | 2206 | kmem_cache_destroy(nfs_cdata_cachep); |
1da177e4 | 2207 | mempool_destroy(nfs_wdata_mempool); |
1a1d92c1 | 2208 | kmem_cache_destroy(nfs_wdata_cachep); |
1da177e4 LT |
2209 | } |
2210 | ||
4a0de55c AS |
2211 | static const struct nfs_rw_ops nfs_rw_write_ops = { |
2212 | .rw_alloc_header = nfs_writehdr_alloc, | |
2213 | .rw_free_header = nfs_writehdr_free, | |
0eecb214 AS |
2214 | .rw_done = nfs_writeback_done, |
2215 | .rw_result = nfs_writeback_result, | |
1ed26f33 | 2216 | .rw_initiate = nfs_initiate_write, |
4a0de55c | 2217 | }; |