Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/fs/nfs/write.c | |
4 | * | |
7c85d900 | 5 | * Write file data over NFS. |
1da177e4 LT |
6 | * |
7 | * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> | |
8 | */ | |
9 | ||
1da177e4 LT |
10 | #include <linux/types.h> |
11 | #include <linux/slab.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/file.h> | |
1da177e4 | 15 | #include <linux/writeback.h> |
89a09141 | 16 | #include <linux/swap.h> |
074cc1de | 17 | #include <linux/migrate.h> |
1da177e4 LT |
18 | |
19 | #include <linux/sunrpc/clnt.h> | |
20 | #include <linux/nfs_fs.h> | |
21 | #include <linux/nfs_mount.h> | |
22 | #include <linux/nfs_page.h> | |
3fcfab16 | 23 | #include <linux/backing-dev.h> |
afeacc8c | 24 | #include <linux/export.h> |
af7cf057 TM |
25 | #include <linux/freezer.h> |
26 | #include <linux/wait.h> | |
1eb5d98f | 27 | #include <linux/iversion.h> |
5970e15d | 28 | #include <linux/filelock.h> |
3fcfab16 | 29 | |
7c0f6ba6 | 30 | #include <linux/uaccess.h> |
875bc3fb | 31 | #include <linux/sched/mm.h> |
1da177e4 LT |
32 | |
33 | #include "delegation.h" | |
49a70f27 | 34 | #include "internal.h" |
91d5b470 | 35 | #include "iostat.h" |
def6ed7e | 36 | #include "nfs4_fs.h" |
074cc1de | 37 | #include "fscache.h" |
94ad1c80 | 38 | #include "pnfs.h" |
1da177e4 | 39 | |
f4ce1299 TM |
40 | #include "nfstrace.h" |
41 | ||
1da177e4 LT |
42 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE |
43 | ||
44 | #define MIN_POOL_WRITE (32) | |
45 | #define MIN_POOL_COMMIT (4) | |
46 | ||
919e3bd9 TM |
47 | struct nfs_io_completion { |
48 | void (*complete)(void *data); | |
49 | void *data; | |
50 | struct kref refcount; | |
51 | }; | |
52 | ||
1da177e4 LT |
53 | /* |
54 | * Local function declarations | |
55 | */ | |
f8512ad0 | 56 | static void nfs_redirty_request(struct nfs_page *req); |
788e7a89 | 57 | static const struct rpc_call_ops nfs_commit_ops; |
061ae2ed | 58 | static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; |
f453a54a | 59 | static const struct nfs_commit_completion_ops nfs_commit_completion_ops; |
4a0de55c | 60 | static const struct nfs_rw_ops nfs_rw_write_ops; |
06c9fdf3 | 61 | static void nfs_inode_remove_request(struct nfs_page *req); |
b193a78d TM |
62 | static void nfs_clear_request_commit(struct nfs_commit_info *cinfo, |
63 | struct nfs_page *req); | |
02d1426c WAA |
64 | static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, |
65 | struct inode *inode); | |
1da177e4 | 66 | |
e18b890b | 67 | static struct kmem_cache *nfs_wdata_cachep; |
3feb2d49 | 68 | static mempool_t *nfs_wdata_mempool; |
0b7c0153 | 69 | static struct kmem_cache *nfs_cdata_cachep; |
1da177e4 LT |
70 | static mempool_t *nfs_commit_mempool; |
71 | ||
515dcdcd | 72 | struct nfs_commit_data *nfs_commitdata_alloc(void) |
1da177e4 | 73 | { |
518662e0 | 74 | struct nfs_commit_data *p; |
40859d7e | 75 | |
515dcdcd TM |
76 | p = kmem_cache_zalloc(nfs_cdata_cachep, nfs_io_gfp_mask()); |
77 | if (!p) { | |
518662e0 | 78 | p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); |
518662e0 N |
79 | if (!p) |
80 | return NULL; | |
515dcdcd | 81 | memset(p, 0, sizeof(*p)); |
1da177e4 | 82 | } |
518662e0 | 83 | INIT_LIST_HEAD(&p->pages); |
1da177e4 LT |
84 | return p; |
85 | } | |
e0c2b380 | 86 | EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); |
1da177e4 | 87 | |
0b7c0153 | 88 | void nfs_commit_free(struct nfs_commit_data *p) |
1da177e4 LT |
89 | { |
90 | mempool_free(p, nfs_commit_mempool); | |
91 | } | |
e0c2b380 | 92 | EXPORT_SYMBOL_GPL(nfs_commit_free); |
1da177e4 | 93 | |
1e7f3a48 | 94 | static struct nfs_pgio_header *nfs_writehdr_alloc(void) |
3feb2d49 | 95 | { |
0bae835b | 96 | struct nfs_pgio_header *p; |
cd841605 | 97 | |
0bae835b TM |
98 | p = kmem_cache_zalloc(nfs_wdata_cachep, nfs_io_gfp_mask()); |
99 | if (!p) { | |
100 | p = mempool_alloc(nfs_wdata_mempool, GFP_NOWAIT); | |
101 | if (!p) | |
102 | return NULL; | |
103 | memset(p, 0, sizeof(*p)); | |
104 | } | |
237f8306 | 105 | p->rw_mode = FMODE_WRITE; |
3feb2d49 TM |
106 | return p; |
107 | } | |
6c75dc0d | 108 | |
1e7f3a48 | 109 | static void nfs_writehdr_free(struct nfs_pgio_header *hdr) |
3feb2d49 | 110 | { |
1e7f3a48 | 111 | mempool_free(hdr, nfs_wdata_mempool); |
3feb2d49 | 112 | } |
1da177e4 | 113 | |
919e3bd9 TM |
114 | static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags) |
115 | { | |
116 | return kmalloc(sizeof(struct nfs_io_completion), gfp_flags); | |
117 | } | |
118 | ||
119 | static void nfs_io_completion_init(struct nfs_io_completion *ioc, | |
120 | void (*complete)(void *), void *data) | |
121 | { | |
122 | ioc->complete = complete; | |
123 | ioc->data = data; | |
124 | kref_init(&ioc->refcount); | |
125 | } | |
126 | ||
127 | static void nfs_io_completion_release(struct kref *kref) | |
128 | { | |
129 | struct nfs_io_completion *ioc = container_of(kref, | |
130 | struct nfs_io_completion, refcount); | |
131 | ioc->complete(ioc->data); | |
132 | kfree(ioc); | |
133 | } | |
134 | ||
135 | static void nfs_io_completion_get(struct nfs_io_completion *ioc) | |
136 | { | |
137 | if (ioc != NULL) | |
138 | kref_get(&ioc->refcount); | |
139 | } | |
140 | ||
141 | static void nfs_io_completion_put(struct nfs_io_completion *ioc) | |
142 | { | |
143 | if (ioc != NULL) | |
144 | kref_put(&ioc->refcount, nfs_io_completion_release); | |
145 | } | |
146 | ||
66f9dac9 TM |
147 | static void |
148 | nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode) | |
149 | { | |
150 | if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) { | |
151 | kref_get(&req->wb_kref); | |
152 | atomic_long_inc(&NFS_I(inode)->nrequests); | |
153 | } | |
154 | } | |
155 | ||
156 | static int | |
157 | nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode) | |
158 | { | |
159 | int ret; | |
160 | ||
161 | if (!test_bit(PG_REMOVE, &req->wb_flags)) | |
162 | return 0; | |
163 | ret = nfs_page_group_lock(req); | |
164 | if (ret) | |
165 | return ret; | |
166 | if (test_and_clear_bit(PG_REMOVE, &req->wb_flags)) | |
167 | nfs_page_set_inode_ref(req, inode); | |
168 | nfs_page_group_unlock(req); | |
169 | return 0; | |
170 | } | |
171 | ||
0c493b5c | 172 | /** |
7e8e78a0 | 173 | * nfs_folio_find_head_request - find head request associated with a folio |
0c493b5c | 174 | * @folio: pointer to folio |
84d3a9a9 WAA |
175 | * |
176 | * must be called while holding the inode lock. | |
177 | * | |
178 | * returns matching head request with reference held, or NULL if not found. | |
179 | */ | |
7e8e78a0 | 180 | static struct nfs_page *nfs_folio_find_head_request(struct folio *folio) |
277459d2 | 181 | { |
7e8e78a0 | 182 | struct address_space *mapping = folio->mapping; |
bd37d6fc | 183 | struct nfs_page *req; |
277459d2 | 184 | |
0c493b5c | 185 | if (!folio_test_private(folio)) |
b30d2f04 | 186 | return NULL; |
600f111e | 187 | spin_lock(&mapping->i_private_lock); |
02e61ec1 | 188 | req = folio->private; |
84d3a9a9 WAA |
189 | if (req) { |
190 | WARN_ON_ONCE(req->wb_head != req); | |
29418aa4 | 191 | kref_get(&req->wb_kref); |
84d3a9a9 | 192 | } |
600f111e | 193 | spin_unlock(&mapping->i_private_lock); |
b30d2f04 TM |
194 | return req; |
195 | } | |
29418aa4 | 196 | |
1da177e4 | 197 | /* Adjust the file length if we're writing beyond the end */ |
0c493b5c TM |
198 | static void nfs_grow_file(struct folio *folio, unsigned int offset, |
199 | unsigned int count) | |
1da177e4 | 200 | { |
7e8e78a0 | 201 | struct inode *inode = folio->mapping->host; |
a3d01454 TM |
202 | loff_t end, i_size; |
203 | pgoff_t end_index; | |
1da177e4 | 204 | |
a3d01454 TM |
205 | spin_lock(&inode->i_lock); |
206 | i_size = i_size_read(inode); | |
0c493b5c | 207 | end_index = ((i_size - 1) >> folio_shift(folio)) << folio_order(folio); |
7e8e78a0 | 208 | if (i_size > 0 && folio->index < end_index) |
a3d01454 | 209 | goto out; |
237d2907 | 210 | end = folio_pos(folio) + (loff_t)offset + (loff_t)count; |
1da177e4 | 211 | if (i_size >= end) |
a3d01454 | 212 | goto out; |
110cb2d2 | 213 | trace_nfs_size_grow(inode, end); |
1da177e4 | 214 | i_size_write(inode, end); |
f6cdfa6d | 215 | NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE; |
a3d01454 TM |
216 | nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); |
217 | out: | |
e12912d9 TM |
218 | /* Atomically update timestamps if they are delegated to us. */ |
219 | nfs_update_delegated_mtime_locked(inode); | |
a3d01454 | 220 | spin_unlock(&inode->i_lock); |
a6b5a28e | 221 | nfs_fscache_invalidate(inode, 0); |
1da177e4 LT |
222 | } |
223 | ||
a301b777 | 224 | /* A writeback failed: mark the page as bad, and invalidate the page cache */ |
d2ceb7e5 | 225 | static void nfs_set_pageerror(struct address_space *mapping) |
a301b777 | 226 | { |
0df68ced TM |
227 | struct inode *inode = mapping->host; |
228 | ||
d2ceb7e5 | 229 | nfs_zap_mapping(mapping->host, mapping); |
0df68ced TM |
230 | /* Force file size revalidation */ |
231 | spin_lock(&inode->i_lock); | |
ac46b3d7 | 232 | nfs_set_cache_invalid(inode, NFS_INO_REVAL_FORCED | |
88a6099f | 233 | NFS_INO_INVALID_CHANGE | |
ac46b3d7 | 234 | NFS_INO_INVALID_SIZE); |
0df68ced | 235 | spin_unlock(&inode->i_lock); |
a301b777 TM |
236 | } |
237 | ||
0c493b5c | 238 | static void nfs_mapping_set_error(struct folio *folio, int error) |
6fbda89b | 239 | { |
7e8e78a0 | 240 | struct address_space *mapping = folio->mapping; |
b8946d7b | 241 | |
6c984083 TM |
242 | filemap_set_wb_err(mapping, error); |
243 | if (mapping->host) | |
244 | errseq_set(&mapping->host->i_sb->s_wb_err, | |
245 | error == -ENOSPC ? -ENOSPC : -EIO); | |
b8946d7b | 246 | nfs_set_pageerror(mapping); |
6fbda89b TM |
247 | } |
248 | ||
d72ddcba WAA |
249 | /* |
250 | * nfs_page_group_search_locked | |
251 | * @head - head request of page group | |
252 | * @page_offset - offset into page | |
253 | * | |
254 | * Search page group with head @head to find a request that contains the | |
255 | * page offset @page_offset. | |
256 | * | |
257 | * Returns a pointer to the first matching nfs request, or NULL if no | |
258 | * match is found. | |
259 | * | |
260 | * Must be called with the page group lock held | |
261 | */ | |
262 | static struct nfs_page * | |
263 | nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) | |
264 | { | |
265 | struct nfs_page *req; | |
266 | ||
d72ddcba WAA |
267 | req = head; |
268 | do { | |
269 | if (page_offset >= req->wb_pgbase && | |
270 | page_offset < (req->wb_pgbase + req->wb_bytes)) | |
271 | return req; | |
272 | ||
273 | req = req->wb_this_page; | |
274 | } while (req != head); | |
275 | ||
276 | return NULL; | |
277 | } | |
278 | ||
279 | /* | |
280 | * nfs_page_group_covers_page | |
281 | * @head - head request of page group | |
282 | * | |
283 | * Return true if the page group with head @head covers the whole page, | |
284 | * returns false otherwise | |
285 | */ | |
286 | static bool nfs_page_group_covers_page(struct nfs_page *req) | |
287 | { | |
0c493b5c | 288 | unsigned int len = nfs_folio_length(nfs_page_to_folio(req)); |
d72ddcba WAA |
289 | struct nfs_page *tmp; |
290 | unsigned int pos = 0; | |
d72ddcba | 291 | |
1344b7ea | 292 | nfs_page_group_lock(req); |
d72ddcba | 293 | |
7e8a30f8 | 294 | for (;;) { |
d72ddcba | 295 | tmp = nfs_page_group_search_locked(req->wb_head, pos); |
7e8a30f8 TM |
296 | if (!tmp) |
297 | break; | |
298 | pos = tmp->wb_pgbase + tmp->wb_bytes; | |
299 | } | |
d72ddcba WAA |
300 | |
301 | nfs_page_group_unlock(req); | |
7e8a30f8 | 302 | return pos >= len; |
d72ddcba WAA |
303 | } |
304 | ||
1da177e4 LT |
305 | /* We can set the PG_uptodate flag if we see that a write request |
306 | * covers the full page. | |
307 | */ | |
d72ddcba | 308 | static void nfs_mark_uptodate(struct nfs_page *req) |
1da177e4 | 309 | { |
0c493b5c TM |
310 | struct folio *folio = nfs_page_to_folio(req); |
311 | ||
312 | if (folio_test_uptodate(folio)) | |
1da177e4 | 313 | return; |
d72ddcba | 314 | if (!nfs_page_group_covers_page(req)) |
1da177e4 | 315 | return; |
0c493b5c | 316 | folio_mark_uptodate(folio); |
1da177e4 LT |
317 | } |
318 | ||
1da177e4 LT |
319 | static int wb_priority(struct writeback_control *wbc) |
320 | { | |
e87b4c7a | 321 | int ret = 0; |
cca588d6 | 322 | |
e87b4c7a N |
323 | if (wbc->sync_mode == WB_SYNC_ALL) |
324 | ret = FLUSH_COND_STABLE; | |
e87b4c7a | 325 | return ret; |
1da177e4 LT |
326 | } |
327 | ||
89a09141 PZ |
328 | /* |
329 | * NFS congestion control | |
330 | */ | |
331 | ||
332 | int nfs_congestion_kb; | |
333 | ||
334 | #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) | |
335 | #define NFS_CONGESTION_OFF_THRESH \ | |
336 | (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) | |
337 | ||
0c493b5c | 338 | static void nfs_folio_set_writeback(struct folio *folio) |
89a09141 | 339 | { |
7e8e78a0 | 340 | struct nfs_server *nfss = NFS_SERVER(folio->mapping->host); |
89a09141 | 341 | |
0c493b5c TM |
342 | folio_start_writeback(folio); |
343 | if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH) | |
6df25e58 | 344 | nfss->write_congested = 1; |
89a09141 PZ |
345 | } |
346 | ||
0c493b5c | 347 | static void nfs_folio_end_writeback(struct folio *folio) |
89a09141 | 348 | { |
7e8e78a0 | 349 | struct nfs_server *nfss = NFS_SERVER(folio->mapping->host); |
89a09141 | 350 | |
0c493b5c TM |
351 | folio_end_writeback(folio); |
352 | if (atomic_long_dec_return(&nfss->writeback) < | |
2f1f3104 | 353 | NFS_CONGESTION_OFF_THRESH) { |
6df25e58 | 354 | nfss->write_congested = 0; |
2f1f3104 JK |
355 | wake_up_all(&nfss->write_congestion_wait); |
356 | } | |
89a09141 PZ |
357 | } |
358 | ||
0c493b5c TM |
359 | static void nfs_page_end_writeback(struct nfs_page *req) |
360 | { | |
361 | if (nfs_page_group_sync_on_bit(req, PG_WB_END)) { | |
362 | nfs_unlock_request(req); | |
363 | nfs_folio_end_writeback(nfs_page_to_folio(req)); | |
364 | } else | |
365 | nfs_unlock_request(req); | |
366 | } | |
367 | ||
d4581383 WAA |
368 | /* |
369 | * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests | |
370 | * | |
371 | * @destroy_list - request list (using wb_this_page) terminated by @old_head | |
372 | * @old_head - the old head of the list | |
373 | * | |
374 | * All subrequests must be locked and removed from all lists, so at this point | |
375 | * they are only "active" in this function, and possibly in nfs_wait_on_request | |
376 | * with a reference held by some other context. | |
377 | */ | |
378 | static void | |
379 | nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, | |
b66aaa8d TM |
380 | struct nfs_page *old_head, |
381 | struct inode *inode) | |
d4581383 WAA |
382 | { |
383 | while (destroy_list) { | |
384 | struct nfs_page *subreq = destroy_list; | |
385 | ||
386 | destroy_list = (subreq->wb_this_page == old_head) ? | |
387 | NULL : subreq->wb_this_page; | |
388 | ||
08ca8b21 TM |
389 | /* Note: lock subreq in order to change subreq->wb_head */ |
390 | nfs_page_set_headlock(subreq); | |
d4581383 WAA |
391 | WARN_ON_ONCE(old_head != subreq->wb_head); |
392 | ||
393 | /* make sure old group is not used */ | |
d4581383 | 394 | subreq->wb_this_page = subreq; |
08ca8b21 | 395 | subreq->wb_head = subreq; |
d4581383 | 396 | |
902a4c00 TM |
397 | clear_bit(PG_REMOVE, &subreq->wb_flags); |
398 | ||
5b2b5187 TM |
399 | /* Note: races with nfs_page_group_destroy() */ |
400 | if (!kref_read(&subreq->wb_kref)) { | |
5b2b5187 | 401 | /* Check if we raced with nfs_page_group_destroy() */ |
08ca8b21 TM |
402 | if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { |
403 | nfs_page_clear_headlock(subreq); | |
5b2b5187 | 404 | nfs_free_request(subreq); |
08ca8b21 TM |
405 | } else |
406 | nfs_page_clear_headlock(subreq); | |
5b2b5187 TM |
407 | continue; |
408 | } | |
08ca8b21 | 409 | nfs_page_clear_headlock(subreq); |
d4581383 | 410 | |
add42de3 | 411 | nfs_release_request(old_head); |
5b2b5187 TM |
412 | |
413 | if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { | |
414 | nfs_release_request(subreq); | |
a6b6d5b8 | 415 | atomic_long_dec(&NFS_I(inode)->nrequests); |
d4581383 | 416 | } |
5b2b5187 | 417 | |
5b2b5187 TM |
418 | /* subreq is now totally disconnected from page group or any |
419 | * write / commit lists. last chance to wake any waiters */ | |
420 | nfs_unlock_and_release_request(subreq); | |
d4581383 WAA |
421 | } |
422 | } | |
423 | ||
424 | /* | |
e00ed89d TM |
425 | * nfs_join_page_group - destroy subrequests of the head req |
426 | * @head: the page used to lookup the "page group" of nfs_page structures | |
427 | * @inode: Inode to which the request belongs. | |
d4581383 WAA |
428 | * |
429 | * This function joins all sub requests to the head request by first | |
430 | * locking all requests in the group, cancelling any pending operations | |
431 | * and finally updating the head request to cover the whole range covered by | |
432 | * the (former) group. All subrequests are removed from any write or commit | |
433 | * lists, unlinked from the group and destroyed. | |
d4581383 | 434 | */ |
b193a78d TM |
435 | void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo, |
436 | struct inode *inode) | |
e261f51f | 437 | { |
e00ed89d | 438 | struct nfs_page *subreq; |
d4581383 | 439 | struct nfs_page *destroy_list = NULL; |
a62f8e3b | 440 | unsigned int pgbase, off, bytes; |
a62f8e3b TM |
441 | |
442 | pgbase = head->wb_pgbase; | |
443 | bytes = head->wb_bytes; | |
444 | off = head->wb_offset; | |
a0e265bc TM |
445 | for (subreq = head->wb_this_page; subreq != head; |
446 | subreq = subreq->wb_this_page) { | |
a62f8e3b TM |
447 | /* Subrequests should always form a contiguous range */ |
448 | if (pgbase > subreq->wb_pgbase) { | |
449 | off -= pgbase - subreq->wb_pgbase; | |
450 | bytes += pgbase - subreq->wb_pgbase; | |
451 | pgbase = subreq->wb_pgbase; | |
309a1d65 | 452 | } |
a62f8e3b TM |
453 | bytes = max(subreq->wb_pgbase + subreq->wb_bytes |
454 | - pgbase, bytes); | |
a0e265bc | 455 | } |
d4581383 | 456 | |
a62f8e3b TM |
457 | /* Set the head request's range to cover the former page group */ |
458 | head->wb_pgbase = pgbase; | |
459 | head->wb_bytes = bytes; | |
460 | head->wb_offset = off; | |
461 | ||
d4581383 WAA |
462 | /* Now that all requests are locked, make sure they aren't on any list. |
463 | * Commit list removal accounting is done after locks are dropped */ | |
464 | subreq = head; | |
465 | do { | |
b193a78d | 466 | nfs_clear_request_commit(cinfo, subreq); |
d4581383 WAA |
467 | subreq = subreq->wb_this_page; |
468 | } while (subreq != head); | |
469 | ||
470 | /* unlink subrequests from head, destroy them later */ | |
471 | if (head->wb_this_page != head) { | |
472 | /* destroy list will be terminated by head */ | |
473 | destroy_list = head->wb_this_page; | |
474 | head->wb_this_page = head; | |
e261f51f | 475 | } |
d4581383 | 476 | |
e00ed89d TM |
477 | nfs_destroy_unlinked_subrequests(destroy_list, head, inode); |
478 | } | |
b66aaa8d | 479 | |
f1b7c755 CH |
480 | /** |
481 | * nfs_wait_on_request - Wait for a request to complete. | |
482 | * @req: request to wait upon. | |
483 | * | |
484 | * Interruptible by fatal signals only. | |
485 | * The user is responsible for holding a count on the request. | |
486 | */ | |
487 | static int nfs_wait_on_request(struct nfs_page *req) | |
488 | { | |
489 | if (!test_bit(PG_BUSY, &req->wb_flags)) | |
490 | return 0; | |
491 | set_bit(PG_CONTENDED2, &req->wb_flags); | |
492 | smp_mb__after_atomic(); | |
493 | return wait_on_bit_io(&req->wb_flags, PG_BUSY, | |
494 | TASK_UNINTERRUPTIBLE); | |
495 | } | |
496 | ||
25edbcac CH |
497 | /* |
498 | * nfs_unroll_locks - unlock all newly locked reqs and wait on @req | |
499 | * @head: head request of page group, must be holding head lock | |
500 | * @req: request that couldn't lock and needs to wait on the req bit lock | |
501 | * | |
502 | * This is a helper function for nfs_lock_and_join_requests | |
503 | * returns 0 on success, < 0 on error. | |
504 | */ | |
505 | static void | |
506 | nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req) | |
507 | { | |
508 | struct nfs_page *tmp; | |
509 | ||
510 | /* relinquish all the locks successfully grabbed this run */ | |
511 | for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { | |
512 | if (!kref_read(&tmp->wb_kref)) | |
513 | continue; | |
514 | nfs_unlock_and_release_request(tmp); | |
515 | } | |
516 | } | |
517 | ||
518 | /* | |
519 | * nfs_page_group_lock_subreq - try to lock a subrequest | |
520 | * @head: head request of page group | |
521 | * @subreq: request to lock | |
522 | * | |
523 | * This is a helper function for nfs_lock_and_join_requests which | |
524 | * must be called with the head request and page group both locked. | |
525 | * On error, it returns with the page group unlocked. | |
526 | */ | |
527 | static int | |
528 | nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq) | |
529 | { | |
530 | int ret; | |
531 | ||
532 | if (!kref_get_unless_zero(&subreq->wb_kref)) | |
533 | return 0; | |
534 | while (!nfs_lock_request(subreq)) { | |
535 | nfs_page_group_unlock(head); | |
536 | ret = nfs_wait_on_request(subreq); | |
537 | if (!ret) | |
538 | ret = nfs_page_group_lock(head); | |
539 | if (ret < 0) { | |
540 | nfs_unroll_locks(head, subreq); | |
541 | nfs_release_request(subreq); | |
542 | return ret; | |
543 | } | |
544 | } | |
545 | return 0; | |
546 | } | |
547 | ||
e00ed89d TM |
548 | /* |
549 | * nfs_lock_and_join_requests - join all subreqs to the head req | |
0c493b5c | 550 | * @folio: the folio used to lookup the "page group" of nfs_page structures |
e00ed89d TM |
551 | * |
552 | * This function joins all sub requests to the head request by first | |
553 | * locking all requests in the group, cancelling any pending operations | |
554 | * and finally updating the head request to cover the whole range covered by | |
555 | * the (former) group. All subrequests are removed from any write or commit | |
556 | * lists, unlinked from the group and destroyed. | |
557 | * | |
558 | * Returns a locked, referenced pointer to the head request - which after | |
559 | * this call is guaranteed to be the only request associated with the page. | |
0c493b5c | 560 | * Returns NULL if no requests are found for @folio, or a ERR_PTR if an |
e00ed89d TM |
561 | * error was encountered. |
562 | */ | |
0c493b5c | 563 | static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio) |
e00ed89d | 564 | { |
7e8e78a0 | 565 | struct inode *inode = folio->mapping->host; |
25edbcac | 566 | struct nfs_page *head, *subreq; |
b193a78d | 567 | struct nfs_commit_info cinfo; |
e00ed89d | 568 | int ret; |
d4581383 | 569 | |
e00ed89d TM |
570 | /* |
571 | * A reference is taken only on the head request which acts as a | |
572 | * reference to the whole page group - the group will not be destroyed | |
573 | * until the head reference is released. | |
574 | */ | |
c3f22357 CH |
575 | retry: |
576 | head = nfs_folio_find_head_request(folio); | |
577 | if (!head) | |
578 | return NULL; | |
d4581383 | 579 | |
c3f22357 CH |
580 | while (!nfs_lock_request(head)) { |
581 | ret = nfs_wait_on_request(head); | |
8e5419d6 DC |
582 | if (ret < 0) { |
583 | nfs_release_request(head); | |
c3f22357 | 584 | return ERR_PTR(ret); |
8e5419d6 | 585 | } |
c3f22357 CH |
586 | } |
587 | ||
588 | /* Ensure that nobody removed the request before we locked it */ | |
589 | if (head != folio->private) { | |
b5bab9bf | 590 | nfs_unlock_and_release_request(head); |
c3f22357 | 591 | goto retry; |
b5bab9bf TM |
592 | } |
593 | ||
66f9dac9 | 594 | ret = nfs_cancel_remove_inode(head, inode); |
c3f22357 CH |
595 | if (ret < 0) |
596 | goto out_unlock; | |
597 | ||
66f9dac9 TM |
598 | ret = nfs_page_group_lock(head); |
599 | if (ret < 0) | |
600 | goto out_unlock; | |
d4581383 | 601 | |
e00ed89d | 602 | /* lock each request in the page group */ |
25edbcac CH |
603 | for (subreq = head->wb_this_page; |
604 | subreq != head; | |
605 | subreq = subreq->wb_this_page) { | |
606 | ret = nfs_page_group_lock_subreq(head, subreq); | |
607 | if (ret < 0) | |
608 | goto out_unlock; | |
609 | } | |
610 | ||
611 | nfs_page_group_unlock(head); | |
612 | ||
c3f22357 | 613 | nfs_init_cinfo_from_inode(&cinfo, inode); |
b193a78d | 614 | nfs_join_page_group(head, &cinfo, inode); |
e00ed89d | 615 | return head; |
c3f22357 CH |
616 | |
617 | out_unlock: | |
618 | nfs_unlock_and_release_request(head); | |
619 | return ERR_PTR(ret); | |
074cc1de TM |
620 | } |
621 | ||
6fbda89b | 622 | static void nfs_write_error(struct nfs_page *req, int error) |
0bcbf039 | 623 | { |
6dd85e83 | 624 | trace_nfs_write_error(nfs_page_to_inode(req), req, error); |
0c493b5c | 625 | nfs_mapping_set_error(nfs_page_to_folio(req), error); |
06c9fdf3 | 626 | nfs_inode_remove_request(req); |
0c493b5c | 627 | nfs_page_end_writeback(req); |
1f84ccdf | 628 | nfs_release_request(req); |
0bcbf039 PT |
629 | } |
630 | ||
074cc1de TM |
631 | /* |
632 | * Find an associated nfs write request, and prepare to flush it out | |
633 | * May return an error if the user signalled nfs_wait_on_request(). | |
634 | */ | |
b6354e60 CH |
635 | static int nfs_do_writepage(struct folio *folio, struct writeback_control *wbc, |
636 | struct nfs_pageio_descriptor *pgio) | |
074cc1de TM |
637 | { |
638 | struct nfs_page *req; | |
66a49813 | 639 | int ret; |
074cc1de | 640 | |
b6354e60 CH |
641 | nfs_pageio_cond_complete(pgio, folio->index); |
642 | ||
0c493b5c | 643 | req = nfs_lock_and_join_requests(folio); |
074cc1de | 644 | if (!req) |
66a49813 | 645 | return 0; |
074cc1de | 646 | if (IS_ERR(req)) |
66a49813 | 647 | return PTR_ERR(req); |
074cc1de | 648 | |
0c493b5c | 649 | nfs_folio_set_writeback(folio); |
deed85e7 | 650 | WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); |
074cc1de | 651 | |
a6598813 | 652 | /* If there is a fatal error that covers this write, just exit */ |
96c41455 TM |
653 | ret = pgio->pg_error; |
654 | if (nfs_error_is_fatal_on_server(ret)) | |
a6598813 TM |
655 | goto out_launder; |
656 | ||
f8512ad0 | 657 | if (!nfs_pageio_add_request(pgio, req)) { |
074cc1de | 658 | ret = pgio->pg_error; |
0bcbf039 | 659 | /* |
c373fff7 | 660 | * Remove the problematic req upon fatal errors on the server |
0bcbf039 | 661 | */ |
c6fd3511 TM |
662 | if (nfs_error_is_fatal_on_server(ret)) |
663 | goto out_launder; | |
0c493b5c | 664 | folio_redirty_for_writepage(wbc, folio); |
d6c843b9 | 665 | nfs_redirty_request(req); |
96c41455 | 666 | pgio->pg_error = 0; |
66a49813 CH |
667 | return ret; |
668 | } | |
669 | ||
670 | nfs_add_stats(folio->mapping->host, NFSIOS_WRITEPAGES, 1); | |
671 | return 0; | |
672 | ||
a6598813 | 673 | out_launder: |
6fbda89b | 674 | nfs_write_error(req, ret); |
14bebe3c | 675 | return 0; |
e261f51f TM |
676 | } |
677 | ||
f758c885 TM |
678 | /* |
679 | * Write an mmapped page to the server. | |
680 | */ | |
0c493b5c | 681 | static int nfs_writepage_locked(struct folio *folio, |
c373fff7 | 682 | struct writeback_control *wbc) |
f758c885 TM |
683 | { |
684 | struct nfs_pageio_descriptor pgio; | |
7e8e78a0 | 685 | struct inode *inode = folio->mapping->host; |
f758c885 | 686 | int err; |
49a70f27 | 687 | |
40f90271 | 688 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); |
0c493b5c TM |
689 | nfs_pageio_init_write(&pgio, inode, 0, false, |
690 | &nfs_async_write_completion_ops); | |
691 | err = nfs_do_writepage(folio, wbc, &pgio); | |
96c41455 | 692 | pgio.pg_error = 0; |
f758c885 | 693 | nfs_pageio_complete(&pgio); |
c5e483b7 | 694 | return err; |
4d770ccf TM |
695 | } |
696 | ||
919e3bd9 TM |
697 | static void nfs_io_completion_commit(void *inode) |
698 | { | |
699 | nfs_commit_inode(inode, 0); | |
700 | } | |
701 | ||
1da177e4 LT |
702 | int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) |
703 | { | |
1da177e4 | 704 | struct inode *inode = mapping->host; |
c63c7b05 | 705 | struct nfs_pageio_descriptor pgio; |
ed7bcdb3 TM |
706 | struct nfs_io_completion *ioc = NULL; |
707 | unsigned int mntflags = NFS_SERVER(inode)->flags; | |
2f1f3104 | 708 | struct nfs_server *nfss = NFS_SERVER(inode); |
ed7bcdb3 | 709 | int priority = 0; |
1da177e4 LT |
710 | int err; |
711 | ||
2f1f3104 JK |
712 | /* Wait with writeback until write congestion eases */ |
713 | if (wbc->sync_mode == WB_SYNC_NONE && nfss->write_congested) { | |
714 | err = wait_event_killable(nfss->write_congestion_wait, | |
715 | nfss->write_congested == 0); | |
716 | if (err) | |
717 | return err; | |
718 | } | |
6df25e58 | 719 | |
91d5b470 CL |
720 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); |
721 | ||
ed7bcdb3 TM |
722 | if (!(mntflags & NFS_MOUNT_WRITE_EAGER) || wbc->for_kupdate || |
723 | wbc->for_background || wbc->for_sync || wbc->for_reclaim) { | |
724 | ioc = nfs_io_completion_alloc(GFP_KERNEL); | |
725 | if (ioc) | |
726 | nfs_io_completion_init(ioc, nfs_io_completion_commit, | |
727 | inode); | |
728 | priority = wb_priority(wbc); | |
729 | } | |
919e3bd9 | 730 | |
c6fd3511 | 731 | do { |
f72a6759 CH |
732 | struct folio *folio = NULL; |
733 | ||
c6fd3511 TM |
734 | nfs_pageio_init_write(&pgio, inode, priority, false, |
735 | &nfs_async_write_completion_ops); | |
736 | pgio.pg_io_completion = ioc; | |
f72a6759 CH |
737 | while ((folio = writeback_iter(mapping, wbc, folio, &err))) { |
738 | err = nfs_do_writepage(folio, wbc, &pgio); | |
739 | folio_unlock(folio); | |
740 | } | |
c6fd3511 TM |
741 | pgio.pg_error = 0; |
742 | nfs_pageio_complete(&pgio); | |
6e7434ab TM |
743 | if (err == -EAGAIN && mntflags & NFS_MOUNT_SOFTERR) |
744 | break; | |
c6fd3511 | 745 | } while (err < 0 && !nfs_error_is_fatal(err)); |
919e3bd9 | 746 | nfs_io_completion_put(ioc); |
72cb77f4 | 747 | |
f758c885 | 748 | if (err < 0) |
72cb77f4 | 749 | goto out_err; |
c63c7b05 | 750 | return 0; |
72cb77f4 TM |
751 | out_err: |
752 | return err; | |
1da177e4 LT |
753 | } |
754 | ||
755 | /* | |
756 | * Insert a write request into an inode | |
757 | */ | |
0c493b5c | 758 | static void nfs_inode_add_request(struct nfs_page *req) |
1da177e4 | 759 | { |
0c493b5c | 760 | struct folio *folio = nfs_page_to_folio(req); |
7e8e78a0 | 761 | struct address_space *mapping = folio->mapping; |
0c493b5c | 762 | struct nfs_inode *nfsi = NFS_I(mapping->host); |
e7d39069 | 763 | |
2bfc6e56 WAA |
764 | WARN_ON_ONCE(req->wb_this_page != req); |
765 | ||
e7d39069 | 766 | /* Lock the request! */ |
7ad84aa9 | 767 | nfs_lock_request(req); |
600f111e | 768 | spin_lock(&mapping->i_private_lock); |
7e8e78a0 | 769 | set_bit(PG_MAPPED, &req->wb_flags); |
8f52caf9 TM |
770 | folio_set_private(folio); |
771 | folio->private = req; | |
600f111e | 772 | spin_unlock(&mapping->i_private_lock); |
a6b6d5b8 | 773 | atomic_long_inc(&nfsi->nrequests); |
17089a29 | 774 | /* this a head request for a page group - mark it as having an |
cb1410c7 WAA |
775 | * extra reference so sub groups can follow suit. |
776 | * This flag also informs pgio layer when to bump nrequests when | |
777 | * adding subrequests. */ | |
17089a29 | 778 | WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); |
c03b4024 | 779 | kref_get(&req->wb_kref); |
1da177e4 LT |
780 | } |
781 | ||
782 | /* | |
89a09141 | 783 | * Remove a write request from an inode |
1da177e4 LT |
784 | */ |
785 | static void nfs_inode_remove_request(struct nfs_page *req) | |
786 | { | |
6a6d4644 SM |
787 | struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req)); |
788 | ||
20633f04 | 789 | if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { |
0c493b5c | 790 | struct folio *folio = nfs_page_to_folio(req->wb_head); |
7e8e78a0 | 791 | struct address_space *mapping = folio->mapping; |
20633f04 | 792 | |
600f111e | 793 | spin_lock(&mapping->i_private_lock); |
7e8e78a0 | 794 | if (likely(folio)) { |
8f52caf9 TM |
795 | folio->private = NULL; |
796 | folio_clear_private(folio); | |
0c493b5c | 797 | clear_bit(PG_MAPPED, &req->wb_head->wb_flags); |
20633f04 | 798 | } |
600f111e | 799 | spin_unlock(&mapping->i_private_lock); |
29418aa4 | 800 | } |
17089a29 | 801 | |
33ea5aaa | 802 | if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) { |
6a6d4644 | 803 | atomic_long_dec(&nfsi->nrequests); |
dd1b2026 | 804 | nfs_release_request(req); |
33ea5aaa | 805 | } |
1da177e4 LT |
806 | } |
807 | ||
0c493b5c | 808 | static void nfs_mark_request_dirty(struct nfs_page *req) |
61822ab5 | 809 | { |
0c493b5c TM |
810 | struct folio *folio = nfs_page_to_folio(req); |
811 | if (folio) | |
812 | filemap_dirty_folio(folio_mapping(folio), folio); | |
61822ab5 TM |
813 | } |
814 | ||
86d80f97 TM |
815 | /** |
816 | * nfs_request_add_commit_list_locked - add request to a commit list | |
817 | * @req: pointer to a struct nfs_page | |
818 | * @dst: commit list head | |
819 | * @cinfo: holds list lock and accounting info | |
820 | * | |
821 | * This sets the PG_CLEAN bit, updates the cinfo count of | |
822 | * number of outstanding requests requiring a commit as well as | |
823 | * the MM page stats. | |
824 | * | |
e824f99a TM |
825 | * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the |
826 | * nfs_page lock. | |
86d80f97 TM |
827 | */ |
828 | void | |
829 | nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, | |
830 | struct nfs_commit_info *cinfo) | |
831 | { | |
832 | set_bit(PG_CLEAN, &req->wb_flags); | |
833 | nfs_list_add_request(req, dst); | |
5cb953d4 | 834 | atomic_long_inc(&cinfo->mds->ncommit); |
86d80f97 TM |
835 | } |
836 | EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); | |
837 | ||
8dd37758 TM |
838 | /** |
839 | * nfs_request_add_commit_list - add request to a commit list | |
840 | * @req: pointer to a struct nfs_page | |
ea2cf228 | 841 | * @cinfo: holds list lock and accounting info |
8dd37758 | 842 | * |
ea2cf228 | 843 | * This sets the PG_CLEAN bit, updates the cinfo count of |
8dd37758 TM |
844 | * number of outstanding requests requiring a commit as well as |
845 | * the MM page stats. | |
846 | * | |
ea2cf228 | 847 | * The caller must _not_ hold the cinfo->lock, but must be |
8dd37758 | 848 | * holding the nfs_page lock. |
1da177e4 | 849 | */ |
8dd37758 | 850 | void |
6272dcc6 | 851 | nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo) |
1da177e4 | 852 | { |
e824f99a | 853 | mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); |
6272dcc6 | 854 | nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); |
e824f99a | 855 | mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); |
0c493b5c | 856 | nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo); |
1da177e4 | 857 | } |
8dd37758 TM |
858 | EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); |
859 | ||
860 | /** | |
861 | * nfs_request_remove_commit_list - Remove request from a commit list | |
862 | * @req: pointer to a nfs_page | |
ea2cf228 | 863 | * @cinfo: holds list lock and accounting info |
8dd37758 | 864 | * |
ea2cf228 | 865 | * This clears the PG_CLEAN bit, and updates the cinfo's count of |
8dd37758 TM |
866 | * number of outstanding requests requiring a commit |
867 | * It does not update the MM page stats. | |
868 | * | |
ea2cf228 | 869 | * The caller _must_ hold the cinfo->lock and the nfs_page lock. |
8dd37758 TM |
870 | */ |
871 | void | |
ea2cf228 FI |
872 | nfs_request_remove_commit_list(struct nfs_page *req, |
873 | struct nfs_commit_info *cinfo) | |
8dd37758 | 874 | { |
8dd37758 TM |
875 | if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) |
876 | return; | |
877 | nfs_list_remove_request(req); | |
5cb953d4 | 878 | atomic_long_dec(&cinfo->mds->ncommit); |
8dd37758 TM |
879 | } |
880 | EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); | |
881 | ||
ea2cf228 FI |
882 | static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, |
883 | struct inode *inode) | |
884 | { | |
fe238e60 | 885 | cinfo->inode = inode; |
ea2cf228 FI |
886 | cinfo->mds = &NFS_I(inode)->commit_info; |
887 | cinfo->ds = pnfs_get_ds_info(inode); | |
b359f9d0 | 888 | cinfo->dreq = NULL; |
f453a54a | 889 | cinfo->completion_ops = &nfs_commit_completion_ops; |
ea2cf228 FI |
890 | } |
891 | ||
892 | void nfs_init_cinfo(struct nfs_commit_info *cinfo, | |
893 | struct inode *inode, | |
894 | struct nfs_direct_req *dreq) | |
895 | { | |
1763da12 FI |
896 | if (dreq) |
897 | nfs_init_cinfo_from_dreq(cinfo, dreq); | |
898 | else | |
899 | nfs_init_cinfo_from_inode(cinfo, inode); | |
ea2cf228 FI |
900 | } |
901 | EXPORT_SYMBOL_GPL(nfs_init_cinfo); | |
8dd37758 TM |
902 | |
903 | /* | |
904 | * Add a request to the inode's commit list. | |
905 | */ | |
1763da12 | 906 | void |
ea2cf228 | 907 | nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, |
b57ff130 | 908 | struct nfs_commit_info *cinfo, u32 ds_commit_idx) |
8dd37758 | 909 | { |
b57ff130 | 910 | if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx)) |
8dd37758 | 911 | return; |
6272dcc6 | 912 | nfs_request_add_commit_list(req, cinfo); |
8dd37758 | 913 | } |
8e821cad | 914 | |
0c493b5c | 915 | static void nfs_folio_clear_commit(struct folio *folio) |
d6d6dc7c | 916 | { |
0c493b5c TM |
917 | if (folio) { |
918 | long nr = folio_nr_pages(folio); | |
919 | ||
920 | node_stat_mod_folio(folio, NR_WRITEBACK, -nr); | |
7e8e78a0 | 921 | wb_stat_mod(&inode_to_bdi(folio->mapping->host)->wb, |
0c493b5c TM |
922 | WB_WRITEBACK, -nr); |
923 | } | |
d6d6dc7c FI |
924 | } |
925 | ||
b5bab9bf | 926 | /* Called holding the request lock on @req */ |
b193a78d TM |
927 | static void nfs_clear_request_commit(struct nfs_commit_info *cinfo, |
928 | struct nfs_page *req) | |
e468bae9 | 929 | { |
8dd37758 | 930 | if (test_bit(PG_CLEAN, &req->wb_flags)) { |
9fcd5960 TM |
931 | struct nfs_open_context *ctx = nfs_req_openctx(req); |
932 | struct inode *inode = d_inode(ctx->dentry); | |
e468bae9 | 933 | |
e824f99a | 934 | mutex_lock(&NFS_I(inode)->commit_mutex); |
b193a78d TM |
935 | if (!pnfs_clear_request_commit(req, cinfo)) { |
936 | nfs_request_remove_commit_list(req, cinfo); | |
8dd37758 | 937 | } |
e824f99a | 938 | mutex_unlock(&NFS_I(inode)->commit_mutex); |
0c493b5c | 939 | nfs_folio_clear_commit(nfs_page_to_folio(req)); |
e468bae9 | 940 | } |
e468bae9 TM |
941 | } |
942 | ||
d45f60c6 | 943 | int nfs_write_need_commit(struct nfs_pgio_header *hdr) |
8e821cad | 944 | { |
c65e6254 | 945 | if (hdr->verf.committed == NFS_DATA_SYNC) |
d45f60c6 | 946 | return hdr->lseg == NULL; |
c65e6254 | 947 | return hdr->verf.committed != NFS_FILE_SYNC; |
8e821cad TM |
948 | } |
949 | ||
919e3bd9 TM |
950 | static void nfs_async_write_init(struct nfs_pgio_header *hdr) |
951 | { | |
952 | nfs_io_completion_get(hdr->io_completion); | |
953 | } | |
954 | ||
061ae2ed | 955 | static void nfs_write_completion(struct nfs_pgio_header *hdr) |
8e821cad | 956 | { |
ea2cf228 | 957 | struct nfs_commit_info cinfo; |
6c75dc0d FI |
958 | unsigned long bytes = 0; |
959 | ||
960 | if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) | |
961 | goto out; | |
ea2cf228 | 962 | nfs_init_cinfo_from_inode(&cinfo, hdr->inode); |
6c75dc0d FI |
963 | while (!list_empty(&hdr->pages)) { |
964 | struct nfs_page *req = nfs_list_entry(hdr->pages.next); | |
6c75dc0d FI |
965 | |
966 | bytes += req->wb_bytes; | |
967 | nfs_list_remove_request(req); | |
968 | if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && | |
969 | (hdr->good_bytes < bytes)) { | |
af887e43 | 970 | trace_nfs_comp_error(hdr->inode, req, hdr->error); |
0c493b5c TM |
971 | nfs_mapping_set_error(nfs_page_to_folio(req), |
972 | hdr->error); | |
6c75dc0d FI |
973 | goto remove_req; |
974 | } | |
c65e6254 | 975 | if (nfs_write_need_commit(hdr)) { |
33344e0f TM |
976 | /* Reset wb_nio, since the write was successful. */ |
977 | req->wb_nio = 0; | |
f79d06f5 | 978 | memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); |
b57ff130 | 979 | nfs_mark_request_commit(req, hdr->lseg, &cinfo, |
a7d42ddb | 980 | hdr->pgio_mirror_idx); |
6c75dc0d FI |
981 | goto next; |
982 | } | |
983 | remove_req: | |
984 | nfs_inode_remove_request(req); | |
985 | next: | |
0c493b5c | 986 | nfs_page_end_writeback(req); |
3aff4ebb | 987 | nfs_release_request(req); |
6c75dc0d FI |
988 | } |
989 | out: | |
919e3bd9 | 990 | nfs_io_completion_put(hdr->io_completion); |
6c75dc0d | 991 | hdr->release(hdr); |
8e821cad | 992 | } |
1da177e4 | 993 | |
ce59515c | 994 | unsigned long |
ea2cf228 | 995 | nfs_reqs_to_commit(struct nfs_commit_info *cinfo) |
fb8a1f11 | 996 | { |
5cb953d4 | 997 | return atomic_long_read(&cinfo->mds->ncommit); |
d6d6dc7c FI |
998 | } |
999 | ||
e824f99a | 1000 | /* NFS_I(cinfo->inode)->commit_mutex held by caller */ |
1763da12 | 1001 | int |
ea2cf228 FI |
1002 | nfs_scan_commit_list(struct list_head *src, struct list_head *dst, |
1003 | struct nfs_commit_info *cinfo, int max) | |
d6d6dc7c | 1004 | { |
137da553 | 1005 | struct nfs_page *req, *tmp; |
d6d6dc7c FI |
1006 | int ret = 0; |
1007 | ||
137da553 | 1008 | list_for_each_entry_safe(req, tmp, src, wb_list) { |
7ad84aa9 | 1009 | kref_get(&req->wb_kref); |
2ce209c4 | 1010 | if (!nfs_lock_request(req)) { |
2ce209c4 | 1011 | nfs_release_request(req); |
64a93dbf | 1012 | continue; |
2ce209c4 | 1013 | } |
ea2cf228 | 1014 | nfs_request_remove_commit_list(req, cinfo); |
5d2a9d9d | 1015 | clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); |
8dd37758 TM |
1016 | nfs_list_add_request(req, dst); |
1017 | ret++; | |
1763da12 | 1018 | if ((ret == max) && !cinfo->dreq) |
8dd37758 | 1019 | break; |
e824f99a | 1020 | cond_resched(); |
d6d6dc7c FI |
1021 | } |
1022 | return ret; | |
fb8a1f11 | 1023 | } |
5d2a9d9d | 1024 | EXPORT_SYMBOL_GPL(nfs_scan_commit_list); |
fb8a1f11 | 1025 | |
1da177e4 LT |
1026 | /* |
1027 | * nfs_scan_commit - Scan an inode for commit requests | |
1028 | * @inode: NFS inode to scan | |
ea2cf228 FI |
1029 | * @dst: mds destination list |
1030 | * @cinfo: mds and ds lists of reqs ready to commit | |
1da177e4 LT |
1031 | * |
1032 | * Moves requests from the inode's 'commit' request list. | |
1033 | * The requests are *not* checked to ensure that they form a contiguous set. | |
1034 | */ | |
1763da12 | 1035 | int |
ea2cf228 FI |
1036 | nfs_scan_commit(struct inode *inode, struct list_head *dst, |
1037 | struct nfs_commit_info *cinfo) | |
1da177e4 | 1038 | { |
d6d6dc7c | 1039 | int ret = 0; |
fb8a1f11 | 1040 | |
5cb953d4 TM |
1041 | if (!atomic_long_read(&cinfo->mds->ncommit)) |
1042 | return 0; | |
e824f99a | 1043 | mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); |
5cb953d4 | 1044 | if (atomic_long_read(&cinfo->mds->ncommit) > 0) { |
8dd37758 | 1045 | const int max = INT_MAX; |
d6d6dc7c | 1046 | |
ea2cf228 FI |
1047 | ret = nfs_scan_commit_list(&cinfo->mds->list, dst, |
1048 | cinfo, max); | |
1049 | ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); | |
d6d6dc7c | 1050 | } |
e824f99a | 1051 | mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); |
ff778d02 | 1052 | return ret; |
1da177e4 | 1053 | } |
d6d6dc7c | 1054 | |
1da177e4 | 1055 | /* |
e7d39069 TM |
1056 | * Search for an existing write request, and attempt to update |
1057 | * it to reflect a new dirty region on a given page. | |
1da177e4 | 1058 | * |
e7d39069 TM |
1059 | * If the attempt fails, then the existing request is flushed out |
1060 | * to disk. | |
1da177e4 | 1061 | */ |
0c493b5c TM |
1062 | static struct nfs_page *nfs_try_to_update_request(struct folio *folio, |
1063 | unsigned int offset, | |
1064 | unsigned int bytes) | |
1da177e4 | 1065 | { |
e7d39069 TM |
1066 | struct nfs_page *req; |
1067 | unsigned int rqend; | |
1068 | unsigned int end; | |
1069 | int error; | |
1070 | ||
1da177e4 LT |
1071 | end = offset + bytes; |
1072 | ||
0c493b5c | 1073 | req = nfs_lock_and_join_requests(folio); |
f6032f21 TM |
1074 | if (IS_ERR_OR_NULL(req)) |
1075 | return req; | |
1da177e4 | 1076 | |
f6032f21 TM |
1077 | rqend = req->wb_offset + req->wb_bytes; |
1078 | /* | |
1079 | * Tell the caller to flush out the request if | |
1080 | * the offsets are non-contiguous. | |
1081 | * Note: nfs_flush_incompatible() will already | |
1082 | * have flushed out requests having wrong owners. | |
1083 | */ | |
1084 | if (offset > rqend || end < req->wb_offset) | |
1085 | goto out_flushme; | |
1da177e4 LT |
1086 | |
1087 | /* Okay, the request matches. Update the region */ | |
1088 | if (offset < req->wb_offset) { | |
1089 | req->wb_offset = offset; | |
1090 | req->wb_pgbase = offset; | |
1da177e4 | 1091 | } |
1da177e4 LT |
1092 | if (end > rqend) |
1093 | req->wb_bytes = end - req->wb_offset; | |
e7d39069 TM |
1094 | else |
1095 | req->wb_bytes = rqend - req->wb_offset; | |
33344e0f | 1096 | req->wb_nio = 0; |
e7d39069 TM |
1097 | return req; |
1098 | out_flushme: | |
f6032f21 TM |
1099 | /* |
1100 | * Note: we mark the request dirty here because | |
1101 | * nfs_lock_and_join_requests() cannot preserve | |
1102 | * commit flags, so we have to replay the write. | |
1103 | */ | |
1104 | nfs_mark_request_dirty(req); | |
1105 | nfs_unlock_and_release_request(req); | |
7e8e78a0 | 1106 | error = nfs_wb_folio(folio->mapping->host, folio); |
f6032f21 | 1107 | return (error < 0) ? ERR_PTR(error) : NULL; |
e7d39069 TM |
1108 | } |
1109 | ||
1110 | /* | |
1111 | * Try to update an existing write request, or create one if there is none. | |
1112 | * | |
1113 | * Note: Should always be called with the Page Lock held to prevent races | |
1114 | * if we have to add a new request. Also assumes that the caller has | |
1115 | * already called nfs_flush_incompatible() if necessary. | |
1116 | */ | |
0c493b5c TM |
1117 | static struct nfs_page *nfs_setup_write_request(struct nfs_open_context *ctx, |
1118 | struct folio *folio, | |
1119 | unsigned int offset, | |
1120 | unsigned int bytes) | |
e7d39069 | 1121 | { |
0c493b5c | 1122 | struct nfs_page *req; |
1da177e4 | 1123 | |
0c493b5c | 1124 | req = nfs_try_to_update_request(folio, offset, bytes); |
e7d39069 TM |
1125 | if (req != NULL) |
1126 | goto out; | |
0c493b5c | 1127 | req = nfs_page_create_from_folio(ctx, folio, offset, bytes); |
e7d39069 TM |
1128 | if (IS_ERR(req)) |
1129 | goto out; | |
0c493b5c | 1130 | nfs_inode_add_request(req); |
efc91ed0 | 1131 | out: |
61e930a9 | 1132 | return req; |
1da177e4 LT |
1133 | } |
1134 | ||
0c493b5c TM |
1135 | static int nfs_writepage_setup(struct nfs_open_context *ctx, |
1136 | struct folio *folio, unsigned int offset, | |
1137 | unsigned int count) | |
e7d39069 | 1138 | { |
0c493b5c | 1139 | struct nfs_page *req; |
e7d39069 | 1140 | |
0c493b5c | 1141 | req = nfs_setup_write_request(ctx, folio, offset, count); |
e7d39069 TM |
1142 | if (IS_ERR(req)) |
1143 | return PTR_ERR(req); | |
1144 | /* Update file length */ | |
0c493b5c | 1145 | nfs_grow_file(folio, offset, count); |
d72ddcba | 1146 | nfs_mark_uptodate(req); |
a6305ddb | 1147 | nfs_mark_request_dirty(req); |
1d1afcbc | 1148 | nfs_unlock_and_release_request(req); |
e7d39069 TM |
1149 | return 0; |
1150 | } | |
1151 | ||
0c493b5c | 1152 | int nfs_flush_incompatible(struct file *file, struct folio *folio) |
1da177e4 | 1153 | { |
cd3758e3 | 1154 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
2a369153 | 1155 | struct nfs_lock_context *l_ctx; |
17b985de | 1156 | struct file_lock_context *flctx = locks_inode_context(file_inode(file)); |
1da177e4 | 1157 | struct nfs_page *req; |
1a54533e | 1158 | int do_flush, status; |
1da177e4 LT |
1159 | /* |
1160 | * Look for a request corresponding to this page. If there | |
1161 | * is one, and it belongs to another file, we flush it out | |
1162 | * before we try to copy anything into the page. Do this | |
1163 | * due to the lack of an ACCESS-type call in NFSv2. | |
1164 | * Also do the same if we find a request from an existing | |
1165 | * dropped page. | |
1166 | */ | |
1a54533e | 1167 | do { |
0c493b5c | 1168 | req = nfs_folio_find_head_request(folio); |
1a54533e TM |
1169 | if (req == NULL) |
1170 | return 0; | |
2a369153 | 1171 | l_ctx = req->wb_lock_context; |
0c493b5c TM |
1172 | do_flush = nfs_page_to_folio(req) != folio || |
1173 | !nfs_match_open_context(nfs_req_openctx(req), ctx); | |
bd61e0a9 JL |
1174 | if (l_ctx && flctx && |
1175 | !(list_empty_careful(&flctx->flc_posix) && | |
1176 | list_empty_careful(&flctx->flc_flock))) { | |
d51fdb87 | 1177 | do_flush |= l_ctx->lockowner != current->files; |
5263e31e | 1178 | } |
1da177e4 | 1179 | nfs_release_request(req); |
1a54533e TM |
1180 | if (!do_flush) |
1181 | return 0; | |
7e8e78a0 | 1182 | status = nfs_wb_folio(folio->mapping->host, folio); |
1a54533e TM |
1183 | } while (status == 0); |
1184 | return status; | |
1da177e4 LT |
1185 | } |
1186 | ||
dc24826b AA |
1187 | /* |
1188 | * Avoid buffered writes when a open context credential's key would | |
1189 | * expire soon. | |
1190 | * | |
1191 | * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL. | |
1192 | * | |
1193 | * Return 0 and set a credential flag which triggers the inode to flush | |
1194 | * and performs NFS_FILE_SYNC writes if the key will expired within | |
1195 | * RPC_KEY_EXPIRE_TIMEO. | |
1196 | */ | |
1197 | int | |
1198 | nfs_key_timeout_notify(struct file *filp, struct inode *inode) | |
1199 | { | |
1200 | struct nfs_open_context *ctx = nfs_file_open_context(filp); | |
dc24826b | 1201 | |
ddf529ee | 1202 | if (nfs_ctx_key_to_expire(ctx, inode) && |
ca05cbae | 1203 | !rcu_access_pointer(ctx->ll_cred)) |
ddf529ee N |
1204 | /* Already expired! */ |
1205 | return -EACCES; | |
1206 | return 0; | |
dc24826b AA |
1207 | } |
1208 | ||
1209 | /* | |
1210 | * Test if the open context credential key is marked to expire soon. | |
1211 | */ | |
ce52914e | 1212 | bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode) |
dc24826b | 1213 | { |
ce52914e | 1214 | struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; |
ca05cbae | 1215 | struct rpc_cred *cred, *new, *old = NULL; |
ddf529ee | 1216 | struct auth_cred acred = { |
a52458b4 | 1217 | .cred = ctx->cred, |
ddf529ee | 1218 | }; |
ca05cbae | 1219 | bool ret = false; |
ddf529ee | 1220 | |
ca05cbae TM |
1221 | rcu_read_lock(); |
1222 | cred = rcu_dereference(ctx->ll_cred); | |
1223 | if (cred && !(cred->cr_ops->crkey_timeout && | |
1224 | cred->cr_ops->crkey_timeout(cred))) | |
1225 | goto out; | |
1226 | rcu_read_unlock(); | |
1227 | ||
1228 | new = auth->au_ops->lookup_cred(auth, &acred, 0); | |
1229 | if (new == cred) { | |
1230 | put_rpccred(new); | |
ddf529ee | 1231 | return true; |
ca05cbae TM |
1232 | } |
1233 | if (IS_ERR_OR_NULL(new)) { | |
1234 | new = NULL; | |
1235 | ret = true; | |
1236 | } else if (new->cr_ops->crkey_timeout && | |
1237 | new->cr_ops->crkey_timeout(new)) | |
1238 | ret = true; | |
1239 | ||
1240 | rcu_read_lock(); | |
1241 | old = rcu_dereference_protected(xchg(&ctx->ll_cred, | |
1242 | RCU_INITIALIZER(new)), 1); | |
1243 | out: | |
1244 | rcu_read_unlock(); | |
1245 | put_rpccred(old); | |
1246 | return ret; | |
dc24826b AA |
1247 | } |
1248 | ||
5d47a356 TM |
1249 | /* |
1250 | * If the page cache is marked as unsafe or invalid, then we can't rely on | |
1251 | * the PageUptodate() flag. In this case, we will need to turn off | |
1252 | * write optimisations that depend on the page contents being correct. | |
1253 | */ | |
0c493b5c | 1254 | static bool nfs_folio_write_uptodate(struct folio *folio, unsigned int pagelen) |
5d47a356 | 1255 | { |
7e8e78a0 | 1256 | struct inode *inode = folio->mapping->host; |
d529ef83 JL |
1257 | struct nfs_inode *nfsi = NFS_I(inode); |
1258 | ||
8d197a56 TM |
1259 | if (nfs_have_delegated_attributes(inode)) |
1260 | goto out; | |
fc9dc401 | 1261 | if (nfsi->cache_validity & |
13c0b082 | 1262 | (NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE)) |
d529ef83 | 1263 | return false; |
4db72b40 | 1264 | smp_rmb(); |
fc9dc401 | 1265 | if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags) && pagelen != 0) |
8d197a56 TM |
1266 | return false; |
1267 | out: | |
fc9dc401 | 1268 | if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0) |
18dd78c4 | 1269 | return false; |
0c493b5c | 1270 | return folio_test_uptodate(folio) != 0; |
5d47a356 TM |
1271 | } |
1272 | ||
5263e31e JL |
1273 | static bool |
1274 | is_whole_file_wrlock(struct file_lock *fl) | |
1275 | { | |
1276 | return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && | |
d7c9616b | 1277 | lock_is_write(fl); |
5263e31e JL |
1278 | } |
1279 | ||
c7559663 SM |
1280 | /* If we know the page is up to date, and we're not using byte range locks (or |
1281 | * if we have the whole file locked for writing), it may be more efficient to | |
1282 | * extend the write to cover the entire page in order to avoid fragmentation | |
1283 | * inefficiencies. | |
1284 | * | |
263b4509 SM |
1285 | * If the file is opened for synchronous writes then we can just skip the rest |
1286 | * of the checks. | |
c7559663 | 1287 | */ |
0c493b5c TM |
1288 | static int nfs_can_extend_write(struct file *file, struct folio *folio, |
1289 | unsigned int pagelen) | |
c7559663 | 1290 | { |
0c493b5c | 1291 | struct inode *inode = file_inode(file); |
17b985de | 1292 | struct file_lock_context *flctx = locks_inode_context(inode); |
5263e31e | 1293 | struct file_lock *fl; |
0c493b5c | 1294 | int ret; |
dfb07e99 | 1295 | unsigned int mntflags = NFS_SERVER(inode)->flags; |
5263e31e | 1296 | |
dfb07e99 DA |
1297 | if (mntflags & NFS_MOUNT_NO_ALIGNWRITE) |
1298 | return 0; | |
c7559663 SM |
1299 | if (file->f_flags & O_DSYNC) |
1300 | return 0; | |
0c493b5c | 1301 | if (!nfs_folio_write_uptodate(folio, pagelen)) |
263b4509 | 1302 | return 0; |
4201916f | 1303 | if (nfs_have_write_delegation(inode)) |
c7559663 | 1304 | return 1; |
bd61e0a9 JL |
1305 | if (!flctx || (list_empty_careful(&flctx->flc_flock) && |
1306 | list_empty_careful(&flctx->flc_posix))) | |
8fa4592a | 1307 | return 1; |
5263e31e JL |
1308 | |
1309 | /* Check to see if there are whole file write locks */ | |
5263e31e | 1310 | ret = 0; |
6109c850 | 1311 | spin_lock(&flctx->flc_lock); |
bd61e0a9 JL |
1312 | if (!list_empty(&flctx->flc_posix)) { |
1313 | fl = list_first_entry(&flctx->flc_posix, struct file_lock, | |
dd1fac6a | 1314 | c.flc_list); |
bd61e0a9 JL |
1315 | if (is_whole_file_wrlock(fl)) |
1316 | ret = 1; | |
1317 | } else if (!list_empty(&flctx->flc_flock)) { | |
5263e31e | 1318 | fl = list_first_entry(&flctx->flc_flock, struct file_lock, |
dd1fac6a | 1319 | c.flc_list); |
d7c9616b | 1320 | if (lock_is_write(fl)) |
5263e31e JL |
1321 | ret = 1; |
1322 | } | |
6109c850 | 1323 | spin_unlock(&flctx->flc_lock); |
5263e31e | 1324 | return ret; |
c7559663 SM |
1325 | } |
1326 | ||
1da177e4 LT |
1327 | /* |
1328 | * Update and possibly write a cached page of an NFS file. | |
1329 | * | |
1330 | * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad | |
1331 | * things with a page scheduled for an RPC call (e.g. invalidate it). | |
1332 | */ | |
0c493b5c TM |
1333 | int nfs_update_folio(struct file *file, struct folio *folio, |
1334 | unsigned int offset, unsigned int count) | |
1da177e4 | 1335 | { |
cd3758e3 | 1336 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
7e8e78a0 | 1337 | struct address_space *mapping = folio->mapping; |
0c493b5c TM |
1338 | struct inode *inode = mapping->host; |
1339 | unsigned int pagelen = nfs_folio_length(folio); | |
1da177e4 LT |
1340 | int status = 0; |
1341 | ||
91d5b470 CL |
1342 | nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); |
1343 | ||
0c493b5c | 1344 | dprintk("NFS: nfs_update_folio(%pD2 %d@%lld)\n", file, count, |
237d2907 | 1345 | (long long)(folio_pos(folio) + offset)); |
1da177e4 | 1346 | |
149a4fdd BC |
1347 | if (!count) |
1348 | goto out; | |
1349 | ||
0c493b5c | 1350 | if (nfs_can_extend_write(file, folio, pagelen)) { |
39c910a4 CH |
1351 | unsigned int end = count + offset; |
1352 | ||
1353 | offset = round_down(offset, PAGE_SIZE); | |
1354 | if (end < pagelen) | |
1355 | end = min(round_up(end, PAGE_SIZE), pagelen); | |
1356 | count = end - offset; | |
1da177e4 LT |
1357 | } |
1358 | ||
0c493b5c | 1359 | status = nfs_writepage_setup(ctx, folio, offset, count); |
03fa9e84 | 1360 | if (status < 0) |
d2ceb7e5 | 1361 | nfs_set_pageerror(mapping); |
149a4fdd | 1362 | out: |
0c493b5c | 1363 | dprintk("NFS: nfs_update_folio returns %d (isize %lld)\n", |
1da177e4 | 1364 | status, (long long)i_size_read(inode)); |
1da177e4 LT |
1365 | return status; |
1366 | } | |
1367 | ||
3ff7576d | 1368 | static int flush_task_priority(int how) |
1da177e4 LT |
1369 | { |
1370 | switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { | |
1371 | case FLUSH_HIGHPRI: | |
1372 | return RPC_PRIORITY_HIGH; | |
1373 | case FLUSH_LOWPRI: | |
1374 | return RPC_PRIORITY_LOW; | |
1375 | } | |
1376 | return RPC_PRIORITY_NORMAL; | |
1377 | } | |
1378 | ||
d45f60c6 WAA |
1379 | static void nfs_initiate_write(struct nfs_pgio_header *hdr, |
1380 | struct rpc_message *msg, | |
abde71f4 | 1381 | const struct nfs_rpc_ops *rpc_ops, |
1ed26f33 | 1382 | struct rpc_task_setup *task_setup_data, int how) |
1da177e4 | 1383 | { |
3ff7576d | 1384 | int priority = flush_task_priority(how); |
d138d5d1 | 1385 | |
8db55a03 N |
1386 | if (IS_SWAPFILE(hdr->inode)) |
1387 | task_setup_data->flags |= RPC_TASK_SWAPPER; | |
1ed26f33 | 1388 | task_setup_data->priority = priority; |
fb91fb0e | 1389 | rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client); |
5bb2a7cb | 1390 | trace_nfs_initiate_write(hdr); |
275acaaf TM |
1391 | } |
1392 | ||
6d884e8f F |
1393 | /* If a nfs_flush_* function fails, it should remove reqs from @head and |
1394 | * call this on each, which will prepare them to be retried on next | |
1395 | * writeback using standard nfs. | |
1396 | */ | |
1397 | static void nfs_redirty_request(struct nfs_page *req) | |
1398 | { | |
6dd85e83 | 1399 | struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req)); |
67f4b5dc | 1400 | |
33344e0f TM |
1401 | /* Bump the transmission count */ |
1402 | req->wb_nio++; | |
6d884e8f | 1403 | nfs_mark_request_dirty(req); |
67f4b5dc | 1404 | atomic_long_inc(&nfsi->redirtied_pages); |
0c493b5c | 1405 | nfs_page_end_writeback(req); |
3aff4ebb | 1406 | nfs_release_request(req); |
6d884e8f F |
1407 | } |
1408 | ||
df3accb8 | 1409 | static void nfs_async_write_error(struct list_head *head, int error) |
6c75dc0d FI |
1410 | { |
1411 | struct nfs_page *req; | |
1412 | ||
1413 | while (!list_empty(head)) { | |
1414 | req = nfs_list_entry(head->next); | |
1415 | nfs_list_remove_request(req); | |
cea9ba72 | 1416 | if (nfs_error_is_fatal_on_server(error)) |
6fbda89b TM |
1417 | nfs_write_error(req, error); |
1418 | else | |
1419 | nfs_redirty_request(req); | |
6c75dc0d FI |
1420 | } |
1421 | } | |
1422 | ||
dc602dd7 TM |
1423 | static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) |
1424 | { | |
df3accb8 | 1425 | nfs_async_write_error(&hdr->pages, 0); |
dc602dd7 TM |
1426 | } |
1427 | ||
061ae2ed | 1428 | static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { |
919e3bd9 | 1429 | .init_hdr = nfs_async_write_init, |
061ae2ed FI |
1430 | .error_cleanup = nfs_async_write_error, |
1431 | .completion = nfs_write_completion, | |
dc602dd7 | 1432 | .reschedule_io = nfs_async_write_reschedule_io, |
061ae2ed FI |
1433 | }; |
1434 | ||
57208fa7 | 1435 | void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, |
a20c93e3 | 1436 | struct inode *inode, int ioflags, bool force_mds, |
061ae2ed | 1437 | const struct nfs_pgio_completion_ops *compl_ops) |
1da177e4 | 1438 | { |
a20c93e3 | 1439 | struct nfs_server *server = NFS_SERVER(inode); |
41d8d5b7 | 1440 | const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; |
a20c93e3 CH |
1441 | |
1442 | #ifdef CONFIG_NFS_V4_1 | |
1443 | if (server->pnfs_curr_ld && !force_mds) | |
1444 | pg_ops = server->pnfs_curr_ld->pg_write_ops; | |
1445 | #endif | |
4a0de55c | 1446 | nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops, |
3bde7afd | 1447 | server->wsize, ioflags); |
1751c363 | 1448 | } |
ddda8e0a | 1449 | EXPORT_SYMBOL_GPL(nfs_pageio_init_write); |
1da177e4 | 1450 | |
dce81290 TM |
1451 | void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) |
1452 | { | |
a7d42ddb WAA |
1453 | struct nfs_pgio_mirror *mirror; |
1454 | ||
6f29b9bb KM |
1455 | if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) |
1456 | pgio->pg_ops->pg_cleanup(pgio); | |
1457 | ||
41d8d5b7 | 1458 | pgio->pg_ops = &nfs_pgio_rw_ops; |
a7d42ddb WAA |
1459 | |
1460 | nfs_pageio_stop_mirroring(pgio); | |
1461 | ||
1462 | mirror = &pgio->pg_mirrors[0]; | |
1463 | mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; | |
dce81290 | 1464 | } |
1f945357 | 1465 | EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); |
dce81290 | 1466 | |
1da177e4 | 1467 | |
0b7c0153 FI |
1468 | void nfs_commit_prepare(struct rpc_task *task, void *calldata) |
1469 | { | |
1470 | struct nfs_commit_data *data = calldata; | |
1471 | ||
1472 | NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); | |
1473 | } | |
1474 | ||
a08a8cd3 TM |
1475 | static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, |
1476 | struct nfs_fattr *fattr) | |
1477 | { | |
1478 | struct nfs_pgio_args *argp = &hdr->args; | |
1479 | struct nfs_pgio_res *resp = &hdr->res; | |
2b83d3de | 1480 | u64 size = argp->offset + resp->count; |
a08a8cd3 TM |
1481 | |
1482 | if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) | |
2b83d3de TM |
1483 | fattr->size = size; |
1484 | if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { | |
1485 | fattr->valid &= ~NFS_ATTR_FATTR_SIZE; | |
a08a8cd3 | 1486 | return; |
2b83d3de TM |
1487 | } |
1488 | if (size != fattr->size) | |
a08a8cd3 TM |
1489 | return; |
1490 | /* Set attribute barrier */ | |
1491 | nfs_fattr_set_barrier(fattr); | |
2b83d3de TM |
1492 | /* ...and update size */ |
1493 | fattr->valid |= NFS_ATTR_FATTR_SIZE; | |
a08a8cd3 TM |
1494 | } |
1495 | ||
1496 | void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) | |
1497 | { | |
2b83d3de | 1498 | struct nfs_fattr *fattr = &hdr->fattr; |
a08a8cd3 TM |
1499 | struct inode *inode = hdr->inode; |
1500 | ||
e12912d9 TM |
1501 | if (nfs_have_delegated_mtime(inode)) { |
1502 | spin_lock(&inode->i_lock); | |
1503 | nfs_set_cache_invalid(inode, NFS_INO_INVALID_BLOCKS); | |
1504 | spin_unlock(&inode->i_lock); | |
1505 | return; | |
1506 | } | |
1507 | ||
a08a8cd3 TM |
1508 | spin_lock(&inode->i_lock); |
1509 | nfs_writeback_check_extend(hdr, fattr); | |
1510 | nfs_post_op_update_inode_force_wcc_locked(inode, fattr); | |
1511 | spin_unlock(&inode->i_lock); | |
1512 | } | |
1513 | EXPORT_SYMBOL_GPL(nfs_writeback_update_inode); | |
1514 | ||
1da177e4 LT |
1515 | /* |
1516 | * This function is called when the WRITE call is complete. | |
1517 | */ | |
d45f60c6 WAA |
1518 | static int nfs_writeback_done(struct rpc_task *task, |
1519 | struct nfs_pgio_header *hdr, | |
0eecb214 | 1520 | struct inode *inode) |
1da177e4 | 1521 | { |
788e7a89 | 1522 | int status; |
1da177e4 | 1523 | |
f551e44f CL |
1524 | /* |
1525 | * ->write_done will attempt to use post-op attributes to detect | |
1526 | * conflicting writes by other clients. A strict interpretation | |
1527 | * of close-to-open would allow us to continue caching even if | |
1528 | * another writer had changed the file, but some applications | |
1529 | * depend on tighter cache coherency when writing. | |
1530 | */ | |
d45f60c6 | 1531 | status = NFS_PROTO(inode)->write_done(task, hdr); |
788e7a89 | 1532 | if (status != 0) |
0eecb214 | 1533 | return status; |
8224b273 | 1534 | |
d45f60c6 | 1535 | nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); |
5bb2a7cb | 1536 | trace_nfs_writeback_done(task, hdr); |
91d5b470 | 1537 | |
69d96651 JL |
1538 | if (task->tk_status >= 0) { |
1539 | enum nfs3_stable_how committed = hdr->res.verf->committed; | |
1540 | ||
1541 | if (committed == NFS_UNSTABLE) { | |
1542 | /* | |
1543 | * We have some uncommitted data on the server at | |
1544 | * this point, so ensure that we keep track of that | |
1545 | * fact irrespective of what later writes do. | |
1546 | */ | |
1547 | set_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags); | |
1548 | } | |
1da177e4 | 1549 | |
69d96651 JL |
1550 | if (committed < hdr->args.stable) { |
1551 | /* We tried a write call, but the server did not | |
1552 | * commit data to stable storage even though we | |
1553 | * requested it. | |
1554 | * Note: There is a known bug in Tru64 < 5.0 in which | |
1555 | * the server reports NFS_DATA_SYNC, but performs | |
1556 | * NFS_FILE_SYNC. We therefore implement this checking | |
1557 | * as a dprintk() in order to avoid filling syslog. | |
1558 | */ | |
1559 | static unsigned long complain; | |
1560 | ||
1561 | /* Note this will print the MDS for a DS write */ | |
1562 | if (time_before(complain, jiffies)) { | |
1563 | dprintk("NFS: faulty NFS server %s:" | |
1564 | " (committed = %d) != (stable = %d)\n", | |
1565 | NFS_SERVER(inode)->nfs_client->cl_hostname, | |
1566 | committed, hdr->args.stable); | |
1567 | complain = jiffies + 300 * HZ; | |
1568 | } | |
1da177e4 LT |
1569 | } |
1570 | } | |
1f2edbe3 TM |
1571 | |
1572 | /* Deal with the suid/sgid bit corner case */ | |
16e14375 TM |
1573 | if (nfs_should_remove_suid(inode)) { |
1574 | spin_lock(&inode->i_lock); | |
720869eb | 1575 | nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE); |
16e14375 TM |
1576 | spin_unlock(&inode->i_lock); |
1577 | } | |
0eecb214 AS |
1578 | return 0; |
1579 | } | |
1580 | ||
1581 | /* | |
1582 | * This function is called when the WRITE call is complete. | |
1583 | */ | |
d45f60c6 WAA |
1584 | static void nfs_writeback_result(struct rpc_task *task, |
1585 | struct nfs_pgio_header *hdr) | |
0eecb214 | 1586 | { |
d45f60c6 WAA |
1587 | struct nfs_pgio_args *argp = &hdr->args; |
1588 | struct nfs_pgio_res *resp = &hdr->res; | |
1f2edbe3 TM |
1589 | |
1590 | if (resp->count < argp->count) { | |
1da177e4 LT |
1591 | static unsigned long complain; |
1592 | ||
6c75dc0d | 1593 | /* This a short write! */ |
d45f60c6 | 1594 | nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); |
91d5b470 | 1595 | |
1da177e4 | 1596 | /* Has the server at least made some progress? */ |
6c75dc0d FI |
1597 | if (resp->count == 0) { |
1598 | if (time_before(complain, jiffies)) { | |
1599 | printk(KERN_WARNING | |
1600 | "NFS: Server wrote zero bytes, expected %u.\n", | |
1601 | argp->count); | |
1602 | complain = jiffies + 300 * HZ; | |
1da177e4 | 1603 | } |
d45f60c6 | 1604 | nfs_set_pgio_error(hdr, -EIO, argp->offset); |
6c75dc0d | 1605 | task->tk_status = -EIO; |
13602896 | 1606 | return; |
1da177e4 | 1607 | } |
f8417b48 KM |
1608 | |
1609 | /* For non rpc-based layout drivers, retry-through-MDS */ | |
1610 | if (!task->tk_ops) { | |
1611 | hdr->pnfs_error = -EAGAIN; | |
1612 | return; | |
1613 | } | |
1614 | ||
6c75dc0d FI |
1615 | /* Was this an NFSv2 write or an NFSv3 stable write? */ |
1616 | if (resp->verf->committed != NFS_UNSTABLE) { | |
1617 | /* Resend from where the server left off */ | |
d45f60c6 | 1618 | hdr->mds_offset += resp->count; |
6c75dc0d FI |
1619 | argp->offset += resp->count; |
1620 | argp->pgbase += resp->count; | |
1621 | argp->count -= resp->count; | |
1622 | } else { | |
1623 | /* Resend as a stable write in order to avoid | |
1624 | * headaches in the case of a server crash. | |
1625 | */ | |
1626 | argp->stable = NFS_FILE_SYNC; | |
1da177e4 | 1627 | } |
8c9cb714 TM |
1628 | resp->count = 0; |
1629 | resp->verf->committed = 0; | |
6c75dc0d | 1630 | rpc_restart_call_prepare(task); |
1da177e4 | 1631 | } |
1da177e4 LT |
1632 | } |
1633 | ||
af7cf057 | 1634 | static int wait_on_commit(struct nfs_mds_commit_info *cinfo) |
71d0a611 | 1635 | { |
723c921e PZ |
1636 | return wait_var_event_killable(&cinfo->rpcs_out, |
1637 | !atomic_read(&cinfo->rpcs_out)); | |
af7cf057 | 1638 | } |
b8413f98 | 1639 | |
17f46b80 | 1640 | void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) |
af7cf057 TM |
1641 | { |
1642 | atomic_inc(&cinfo->rpcs_out); | |
71d0a611 TM |
1643 | } |
1644 | ||
133a48ab | 1645 | bool nfs_commit_end(struct nfs_mds_commit_info *cinfo) |
71d0a611 | 1646 | { |
133a48ab | 1647 | if (atomic_dec_and_test(&cinfo->rpcs_out)) { |
723c921e | 1648 | wake_up_var(&cinfo->rpcs_out); |
133a48ab TM |
1649 | return true; |
1650 | } | |
1651 | return false; | |
71d0a611 TM |
1652 | } |
1653 | ||
0b7c0153 | 1654 | void nfs_commitdata_release(struct nfs_commit_data *data) |
1da177e4 | 1655 | { |
0b7c0153 FI |
1656 | put_nfs_open_context(data->context); |
1657 | nfs_commit_free(data); | |
1da177e4 | 1658 | } |
e0c2b380 | 1659 | EXPORT_SYMBOL_GPL(nfs_commitdata_release); |
1da177e4 | 1660 | |
0b7c0153 | 1661 | int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, |
c36aae9a | 1662 | const struct nfs_rpc_ops *nfs_ops, |
9ace33cd | 1663 | const struct rpc_call_ops *call_ops, |
df24c483 MS |
1664 | int how, int flags, |
1665 | struct nfsd_file *localio) | |
1da177e4 | 1666 | { |
07737691 | 1667 | struct rpc_task *task; |
9ace33cd | 1668 | int priority = flush_task_priority(how); |
bdc7f021 TM |
1669 | struct rpc_message msg = { |
1670 | .rpc_argp = &data->args, | |
1671 | .rpc_resp = &data->res, | |
9ace33cd | 1672 | .rpc_cred = data->cred, |
bdc7f021 | 1673 | }; |
84115e1c | 1674 | struct rpc_task_setup task_setup_data = { |
07737691 | 1675 | .task = &data->task, |
9ace33cd | 1676 | .rpc_client = clnt, |
bdc7f021 | 1677 | .rpc_message = &msg, |
9ace33cd | 1678 | .callback_ops = call_ops, |
84115e1c | 1679 | .callback_data = data, |
101070ca | 1680 | .workqueue = nfsiod_workqueue, |
4fa7ef69 | 1681 | .flags = RPC_TASK_ASYNC | flags, |
3ff7576d | 1682 | .priority = priority, |
84115e1c | 1683 | }; |
118f09ed OK |
1684 | |
1685 | if (nfs_server_capable(data->inode, NFS_CAP_MOVEABLE)) | |
1686 | task_setup_data.flags |= RPC_TASK_MOVEABLE; | |
1687 | ||
9ace33cd | 1688 | /* Set up the initial task struct. */ |
e9ae1ee2 | 1689 | nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client); |
8224b273 | 1690 | trace_nfs_initiate_commit(data); |
9ace33cd | 1691 | |
b4839ebe | 1692 | dprintk("NFS: initiated commit call\n"); |
9ace33cd | 1693 | |
70ba381e WAA |
1694 | if (localio) |
1695 | return nfs_local_commit(localio, data, call_ops, how); | |
1696 | ||
9ace33cd FI |
1697 | task = rpc_run_task(&task_setup_data); |
1698 | if (IS_ERR(task)) | |
1699 | return PTR_ERR(task); | |
1700 | if (how & FLUSH_SYNC) | |
1701 | rpc_wait_for_completion_task(task); | |
1702 | rpc_put_task(task); | |
1703 | return 0; | |
1704 | } | |
e0c2b380 | 1705 | EXPORT_SYMBOL_GPL(nfs_initiate_commit); |
9ace33cd | 1706 | |
378520b8 PT |
1707 | static loff_t nfs_get_lwb(struct list_head *head) |
1708 | { | |
1709 | loff_t lwb = 0; | |
1710 | struct nfs_page *req; | |
1711 | ||
1712 | list_for_each_entry(req, head, wb_list) | |
1713 | if (lwb < (req_offset(req) + req->wb_bytes)) | |
1714 | lwb = req_offset(req) + req->wb_bytes; | |
1715 | ||
1716 | return lwb; | |
1717 | } | |
1718 | ||
9ace33cd FI |
1719 | /* |
1720 | * Set up the argument/result storage required for the RPC call. | |
1721 | */ | |
0b7c0153 | 1722 | void nfs_init_commit(struct nfs_commit_data *data, |
f453a54a FI |
1723 | struct list_head *head, |
1724 | struct pnfs_layout_segment *lseg, | |
1725 | struct nfs_commit_info *cinfo) | |
9ace33cd | 1726 | { |
19573c93 TM |
1727 | struct nfs_page *first; |
1728 | struct nfs_open_context *ctx; | |
1729 | struct inode *inode; | |
1da177e4 LT |
1730 | |
1731 | /* Set up the RPC argument and reply structs | |
1732 | * NB: take care not to mess about with data->commit et al. */ | |
1733 | ||
19573c93 TM |
1734 | if (head) |
1735 | list_splice_init(head, &data->pages); | |
1736 | ||
1737 | first = nfs_list_entry(data->pages.next); | |
1738 | ctx = nfs_req_openctx(first); | |
1739 | inode = d_inode(ctx->dentry); | |
1da177e4 | 1740 | |
1da177e4 | 1741 | data->inode = inode; |
9fcd5960 | 1742 | data->cred = ctx->cred; |
988b6dce | 1743 | data->lseg = lseg; /* reference transferred */ |
378520b8 PT |
1744 | /* only set lwb for pnfs commit */ |
1745 | if (lseg) | |
1746 | data->lwb = nfs_get_lwb(&data->pages); | |
9ace33cd | 1747 | data->mds_ops = &nfs_commit_ops; |
f453a54a | 1748 | data->completion_ops = cinfo->completion_ops; |
b359f9d0 | 1749 | data->dreq = cinfo->dreq; |
1da177e4 LT |
1750 | |
1751 | data->args.fh = NFS_FH(data->inode); | |
3da28eb1 TM |
1752 | /* Note: we always request a commit of the entire inode */ |
1753 | data->args.offset = 0; | |
1754 | data->args.count = 0; | |
9fcd5960 | 1755 | data->context = get_nfs_open_context(ctx); |
1da177e4 LT |
1756 | data->res.fattr = &data->fattr; |
1757 | data->res.verf = &data->verf; | |
0e574af1 | 1758 | nfs_fattr_init(&data->fattr); |
133a48ab | 1759 | nfs_commit_begin(cinfo->mds); |
1da177e4 | 1760 | } |
e0c2b380 | 1761 | EXPORT_SYMBOL_GPL(nfs_init_commit); |
1da177e4 | 1762 | |
e0c2b380 | 1763 | void nfs_retry_commit(struct list_head *page_list, |
ea2cf228 | 1764 | struct pnfs_layout_segment *lseg, |
b57ff130 WAA |
1765 | struct nfs_commit_info *cinfo, |
1766 | u32 ds_commit_idx) | |
64bfeb49 FI |
1767 | { |
1768 | struct nfs_page *req; | |
1769 | ||
1770 | while (!list_empty(page_list)) { | |
1771 | req = nfs_list_entry(page_list->next); | |
1772 | nfs_list_remove_request(req); | |
b57ff130 | 1773 | nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); |
0c493b5c | 1774 | nfs_folio_clear_commit(nfs_page_to_folio(req)); |
1d1afcbc | 1775 | nfs_unlock_and_release_request(req); |
64bfeb49 FI |
1776 | } |
1777 | } | |
e0c2b380 | 1778 | EXPORT_SYMBOL_GPL(nfs_retry_commit); |
64bfeb49 | 1779 | |
0c493b5c TM |
1780 | static void nfs_commit_resched_write(struct nfs_commit_info *cinfo, |
1781 | struct nfs_page *req) | |
b20135d0 | 1782 | { |
0c493b5c TM |
1783 | struct folio *folio = nfs_page_to_folio(req); |
1784 | ||
1785 | filemap_dirty_folio(folio_mapping(folio), folio); | |
b20135d0 TM |
1786 | } |
1787 | ||
1da177e4 LT |
1788 | /* |
1789 | * Commit dirty pages | |
1790 | */ | |
1791 | static int | |
ea2cf228 FI |
1792 | nfs_commit_list(struct inode *inode, struct list_head *head, int how, |
1793 | struct nfs_commit_info *cinfo) | |
1da177e4 | 1794 | { |
0b7c0153 | 1795 | struct nfs_commit_data *data; |
fa88a7d6 | 1796 | struct nfsd_file *localio; |
85e39fee | 1797 | unsigned short task_flags = 0; |
1da177e4 | 1798 | |
ade8febd WAA |
1799 | /* another commit raced with us */ |
1800 | if (list_empty(head)) | |
1801 | return 0; | |
1802 | ||
515dcdcd TM |
1803 | data = nfs_commitdata_alloc(); |
1804 | if (!data) { | |
1805 | nfs_retry_commit(head, NULL, cinfo, -1); | |
1806 | return -ENOMEM; | |
1807 | } | |
1da177e4 LT |
1808 | |
1809 | /* Set up the argument struct */ | |
f453a54a | 1810 | nfs_init_commit(data, head, NULL, cinfo); |
85e39fee OK |
1811 | if (NFS_SERVER(inode)->nfs_client->cl_minorversion) |
1812 | task_flags = RPC_TASK_MOVEABLE; | |
fa88a7d6 TM |
1813 | |
1814 | localio = nfs_local_open_fh(NFS_SERVER(inode)->nfs_client, data->cred, | |
86e00412 MS |
1815 | data->args.fh, &data->context->nfl, |
1816 | data->context->mode); | |
c36aae9a | 1817 | return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), |
85e39fee | 1818 | data->mds_ops, how, |
fa88a7d6 | 1819 | RPC_TASK_CRED_NOREF | task_flags, localio); |
67911c8f | 1820 | } |
67911c8f | 1821 | |
1da177e4 LT |
1822 | /* |
1823 | * COMMIT call returned | |
1824 | */ | |
788e7a89 | 1825 | static void nfs_commit_done(struct rpc_task *task, void *calldata) |
1da177e4 | 1826 | { |
0b7c0153 | 1827 | struct nfs_commit_data *data = calldata; |
1da177e4 | 1828 | |
788e7a89 | 1829 | /* Call the NFS version-specific code */ |
c0d0e96b | 1830 | NFS_PROTO(data->inode)->commit_done(task, data); |
7bdd297e | 1831 | trace_nfs_commit_done(task, data); |
c9d8f89d TM |
1832 | } |
1833 | ||
f453a54a | 1834 | static void nfs_commit_release_pages(struct nfs_commit_data *data) |
c9d8f89d | 1835 | { |
221203ce | 1836 | const struct nfs_writeverf *verf = data->res.verf; |
5917ce84 | 1837 | struct nfs_page *req; |
c9d8f89d | 1838 | int status = data->task.tk_status; |
f453a54a | 1839 | struct nfs_commit_info cinfo; |
0c493b5c | 1840 | struct folio *folio; |
788e7a89 | 1841 | |
1da177e4 LT |
1842 | while (!list_empty(&data->pages)) { |
1843 | req = nfs_list_entry(data->pages.next); | |
1844 | nfs_list_remove_request(req); | |
0c493b5c TM |
1845 | folio = nfs_page_to_folio(req); |
1846 | nfs_folio_clear_commit(folio); | |
1da177e4 | 1847 | |
1e8968c5 | 1848 | dprintk("NFS: commit (%s/%llu %d@%lld)", |
9fcd5960 TM |
1849 | nfs_req_openctx(req)->dentry->d_sb->s_id, |
1850 | (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)), | |
1da177e4 LT |
1851 | req->wb_bytes, |
1852 | (long long)req_offset(req)); | |
c9d8f89d | 1853 | if (status < 0) { |
0c493b5c | 1854 | if (folio) { |
af887e43 TM |
1855 | trace_nfs_commit_error(data->inode, req, |
1856 | status); | |
0c493b5c | 1857 | nfs_mapping_set_error(folio, status); |
38a33101 | 1858 | nfs_inode_remove_request(req); |
6fbda89b | 1859 | } |
ddeaa637 | 1860 | dprintk_cont(", error = %d\n", status); |
1da177e4 LT |
1861 | goto next; |
1862 | } | |
1863 | ||
1864 | /* Okay, COMMIT succeeded, apparently. Check the verifier | |
1865 | * returned by the server against all stored verfs. */ | |
1f28476d | 1866 | if (nfs_write_match_verf(verf, req)) { |
1da177e4 | 1867 | /* We have a match */ |
0c493b5c | 1868 | if (folio) |
38a33101 | 1869 | nfs_inode_remove_request(req); |
ddeaa637 | 1870 | dprintk_cont(" OK\n"); |
1da177e4 LT |
1871 | goto next; |
1872 | } | |
1873 | /* We have a mismatch. Write the page again */ | |
ddeaa637 | 1874 | dprintk_cont(" mismatch\n"); |
6d884e8f | 1875 | nfs_mark_request_dirty(req); |
67f4b5dc | 1876 | atomic_long_inc(&NFS_I(data->inode)->redirtied_pages); |
1da177e4 | 1877 | next: |
1d1afcbc | 1878 | nfs_unlock_and_release_request(req); |
7f1bda44 TM |
1879 | /* Latency breaker */ |
1880 | cond_resched(); | |
1da177e4 | 1881 | } |
353db796 | 1882 | |
f453a54a | 1883 | nfs_init_cinfo(&cinfo, data->inode, data->dreq); |
af7cf057 | 1884 | nfs_commit_end(cinfo.mds); |
5917ce84 FI |
1885 | } |
1886 | ||
1887 | static void nfs_commit_release(void *calldata) | |
1888 | { | |
0b7c0153 | 1889 | struct nfs_commit_data *data = calldata; |
5917ce84 | 1890 | |
f453a54a | 1891 | data->completion_ops->completion(data); |
c9d8f89d | 1892 | nfs_commitdata_release(calldata); |
1da177e4 | 1893 | } |
788e7a89 TM |
1894 | |
1895 | static const struct rpc_call_ops nfs_commit_ops = { | |
0b7c0153 | 1896 | .rpc_call_prepare = nfs_commit_prepare, |
788e7a89 TM |
1897 | .rpc_call_done = nfs_commit_done, |
1898 | .rpc_release = nfs_commit_release, | |
1899 | }; | |
1da177e4 | 1900 | |
f453a54a FI |
1901 | static const struct nfs_commit_completion_ops nfs_commit_completion_ops = { |
1902 | .completion = nfs_commit_release_pages, | |
b20135d0 | 1903 | .resched_write = nfs_commit_resched_write, |
f453a54a FI |
1904 | }; |
1905 | ||
1763da12 FI |
1906 | int nfs_generic_commit_list(struct inode *inode, struct list_head *head, |
1907 | int how, struct nfs_commit_info *cinfo) | |
84c53ab5 FI |
1908 | { |
1909 | int status; | |
1910 | ||
ea2cf228 | 1911 | status = pnfs_commit_list(inode, head, how, cinfo); |
84c53ab5 | 1912 | if (status == PNFS_NOT_ATTEMPTED) |
ea2cf228 | 1913 | status = nfs_commit_list(inode, head, how, cinfo); |
84c53ab5 FI |
1914 | return status; |
1915 | } | |
1916 | ||
c4f24df9 TM |
1917 | static int __nfs_commit_inode(struct inode *inode, int how, |
1918 | struct writeback_control *wbc) | |
1da177e4 | 1919 | { |
1da177e4 | 1920 | LIST_HEAD(head); |
ea2cf228 | 1921 | struct nfs_commit_info cinfo; |
71d0a611 | 1922 | int may_wait = how & FLUSH_SYNC; |
c4f24df9 | 1923 | int ret, nscan; |
1da177e4 | 1924 | |
64a93dbf | 1925 | how &= ~FLUSH_SYNC; |
ea2cf228 | 1926 | nfs_init_cinfo_from_inode(&cinfo, inode); |
af7cf057 | 1927 | nfs_commit_begin(cinfo.mds); |
c4f24df9 TM |
1928 | for (;;) { |
1929 | ret = nscan = nfs_scan_commit(inode, &head, &cinfo); | |
1930 | if (ret <= 0) | |
1931 | break; | |
1932 | ret = nfs_generic_commit_list(inode, &head, how, &cinfo); | |
1933 | if (ret < 0) | |
1934 | break; | |
1935 | ret = 0; | |
1936 | if (wbc && wbc->sync_mode == WB_SYNC_NONE) { | |
1937 | if (nscan < wbc->nr_to_write) | |
1938 | wbc->nr_to_write -= nscan; | |
1939 | else | |
1940 | wbc->nr_to_write = 0; | |
1941 | } | |
1942 | if (nscan < INT_MAX) | |
1943 | break; | |
1944 | cond_resched(); | |
1945 | } | |
af7cf057 | 1946 | nfs_commit_end(cinfo.mds); |
c4f24df9 TM |
1947 | if (ret || !may_wait) |
1948 | return ret; | |
1949 | return wait_on_commit(cinfo.mds); | |
1950 | } | |
1951 | ||
1952 | int nfs_commit_inode(struct inode *inode, int how) | |
1953 | { | |
1954 | return __nfs_commit_inode(inode, how, NULL); | |
1da177e4 | 1955 | } |
b20135d0 | 1956 | EXPORT_SYMBOL_GPL(nfs_commit_inode); |
8fc795f7 | 1957 | |
ae09c31f | 1958 | int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) |
8fc795f7 | 1959 | { |
420e3646 TM |
1960 | struct nfs_inode *nfsi = NFS_I(inode); |
1961 | int flags = FLUSH_SYNC; | |
1962 | int ret = 0; | |
8fc795f7 | 1963 | |
a00dd6c0 | 1964 | if (wbc->sync_mode == WB_SYNC_NONE) { |
c4f24df9 TM |
1965 | /* no commits means nothing needs to be done */ |
1966 | if (!atomic_long_read(&nfsi->commit_info.ncommit)) | |
1967 | goto check_requests_outstanding; | |
1968 | ||
a00dd6c0 JL |
1969 | /* Don't commit yet if this is a non-blocking flush and there |
1970 | * are a lot of outstanding writes for this mapping. | |
1971 | */ | |
1a4edf0f | 1972 | if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) |
a00dd6c0 | 1973 | goto out_mark_dirty; |
420e3646 | 1974 | |
a00dd6c0 | 1975 | /* don't wait for the COMMIT response */ |
420e3646 | 1976 | flags = 0; |
a00dd6c0 JL |
1977 | } |
1978 | ||
c4f24df9 TM |
1979 | ret = __nfs_commit_inode(inode, flags, wbc); |
1980 | if (!ret) { | |
1981 | if (flags & FLUSH_SYNC) | |
1982 | return 0; | |
1983 | } else if (atomic_long_read(&nfsi->commit_info.ncommit)) | |
1984 | goto out_mark_dirty; | |
1985 | ||
1986 | check_requests_outstanding: | |
1987 | if (!atomic_read(&nfsi->commit_info.rpcs_out)) | |
1988 | return ret; | |
420e3646 | 1989 | out_mark_dirty: |
8fc795f7 TM |
1990 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
1991 | return ret; | |
1992 | } | |
89d77c8f | 1993 | EXPORT_SYMBOL_GPL(nfs_write_inode); |
a8d8f02c | 1994 | |
837bb1d7 TM |
1995 | /* |
1996 | * Wrapper for filemap_write_and_wait_range() | |
1997 | * | |
1998 | * Needed for pNFS in order to ensure data becomes visible to the | |
1999 | * client. | |
2000 | */ | |
2001 | int nfs_filemap_write_and_wait_range(struct address_space *mapping, | |
2002 | loff_t lstart, loff_t lend) | |
2003 | { | |
2004 | int ret; | |
2005 | ||
2006 | ret = filemap_write_and_wait_range(mapping, lstart, lend); | |
2007 | if (ret == 0) | |
2008 | ret = pnfs_sync_inode(mapping->host, true); | |
2009 | return ret; | |
2010 | } | |
2011 | EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range); | |
2012 | ||
acdc53b2 TM |
2013 | /* |
2014 | * flush the inode to disk. | |
2015 | */ | |
2016 | int nfs_wb_all(struct inode *inode) | |
34901f70 | 2017 | { |
f4ce1299 TM |
2018 | int ret; |
2019 | ||
2020 | trace_nfs_writeback_inode_enter(inode); | |
2021 | ||
5bb89b47 | 2022 | ret = filemap_write_and_wait(inode->i_mapping); |
6b196875 CL |
2023 | if (ret) |
2024 | goto out; | |
2025 | ret = nfs_commit_inode(inode, FLUSH_SYNC); | |
2026 | if (ret < 0) | |
2027 | goto out; | |
2028 | pnfs_sync_inode(inode, true); | |
2029 | ret = 0; | |
34901f70 | 2030 | |
6b196875 | 2031 | out: |
f4ce1299 TM |
2032 | trace_nfs_writeback_inode_exit(inode, ret); |
2033 | return ret; | |
1c75950b | 2034 | } |
ddda8e0a | 2035 | EXPORT_SYMBOL_GPL(nfs_wb_all); |
1c75950b | 2036 | |
6d740c76 | 2037 | int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio) |
1b3b4a1a TM |
2038 | { |
2039 | struct nfs_page *req; | |
1b3b4a1a TM |
2040 | int ret = 0; |
2041 | ||
6d740c76 | 2042 | folio_wait_writeback(folio); |
3e217045 WAA |
2043 | |
2044 | /* blocking call to cancel all requests and join to a single (head) | |
2045 | * request */ | |
0c493b5c | 2046 | req = nfs_lock_and_join_requests(folio); |
3e217045 WAA |
2047 | |
2048 | if (IS_ERR(req)) { | |
2049 | ret = PTR_ERR(req); | |
2050 | } else if (req) { | |
6d740c76 | 2051 | /* all requests from this folio have been cancelled by |
3e217045 WAA |
2052 | * nfs_lock_and_join_requests, so just remove the head |
2053 | * request from the inode / page_private pointer and | |
2054 | * release it */ | |
2055 | nfs_inode_remove_request(req); | |
3e217045 | 2056 | nfs_unlock_and_release_request(req); |
1b3b4a1a | 2057 | } |
3e217045 | 2058 | |
1b3b4a1a TM |
2059 | return ret; |
2060 | } | |
2061 | ||
5241060e TM |
2062 | /** |
2063 | * nfs_wb_folio - Write back all requests on one page | |
2064 | * @inode: pointer to page | |
2065 | * @folio: pointer to folio | |
2066 | * | |
2067 | * Assumes that the folio has been locked by the caller, and will | |
2068 | * not unlock it. | |
7f2f12d9 | 2069 | */ |
5241060e | 2070 | int nfs_wb_folio(struct inode *inode, struct folio *folio) |
1c75950b | 2071 | { |
fada32ed CH |
2072 | loff_t range_start = folio_pos(folio); |
2073 | size_t len = folio_size(folio); | |
4d770ccf | 2074 | struct writeback_control wbc = { |
4d770ccf | 2075 | .sync_mode = WB_SYNC_ALL, |
7f2f12d9 | 2076 | .nr_to_write = 0, |
4d770ccf | 2077 | .range_start = range_start, |
fada32ed | 2078 | .range_end = range_start + len - 1, |
4d770ccf TM |
2079 | }; |
2080 | int ret; | |
1c75950b | 2081 | |
fada32ed | 2082 | trace_nfs_writeback_folio(inode, range_start, len); |
f4ce1299 | 2083 | |
0522f6ad | 2084 | for (;;) { |
5241060e TM |
2085 | folio_wait_writeback(folio); |
2086 | if (folio_clear_dirty_for_io(folio)) { | |
0c493b5c | 2087 | ret = nfs_writepage_locked(folio, &wbc); |
73e3302f TM |
2088 | if (ret < 0) |
2089 | goto out_error; | |
0522f6ad | 2090 | continue; |
7f2f12d9 | 2091 | } |
f4ce1299 | 2092 | ret = 0; |
5241060e | 2093 | if (!folio_test_private(folio)) |
0522f6ad TM |
2094 | break; |
2095 | ret = nfs_commit_inode(inode, FLUSH_SYNC); | |
ba8b06e6 | 2096 | if (ret < 0) |
73e3302f | 2097 | goto out_error; |
7f2f12d9 | 2098 | } |
73e3302f | 2099 | out_error: |
fada32ed | 2100 | trace_nfs_writeback_folio_done(inode, range_start, len, ret); |
4d770ccf | 2101 | return ret; |
1c75950b TM |
2102 | } |
2103 | ||
074cc1de | 2104 | #ifdef CONFIG_MIGRATION |
4ae84a80 MWO |
2105 | int nfs_migrate_folio(struct address_space *mapping, struct folio *dst, |
2106 | struct folio *src, enum migrate_mode mode) | |
074cc1de | 2107 | { |
2da95652 | 2108 | /* |
4ae84a80 | 2109 | * If the private flag is set, the folio is currently associated with |
2da95652 JL |
2110 | * an in-progress read or write request. Don't try to migrate it. |
2111 | * | |
2112 | * FIXME: we could do this in principle, but we'll need a way to ensure | |
2113 | * that we can safely release the inode reference while holding | |
4ae84a80 | 2114 | * the folio lock. |
2da95652 | 2115 | */ |
4ae84a80 | 2116 | if (folio_test_private(src)) |
2da95652 | 2117 | return -EBUSY; |
074cc1de | 2118 | |
2e9d7e4b | 2119 | if (folio_test_private_2(src)) { /* [DEPRECATED] */ |
16f2f4e6 DH |
2120 | if (mode == MIGRATE_ASYNC) |
2121 | return -EBUSY; | |
2e9d7e4b | 2122 | folio_wait_private_2(src); |
16f2f4e6 | 2123 | } |
074cc1de | 2124 | |
54184650 | 2125 | return migrate_folio(mapping, dst, src, mode); |
074cc1de TM |
2126 | } |
2127 | #endif | |
2128 | ||
f7b422b1 | 2129 | int __init nfs_init_writepagecache(void) |
1da177e4 LT |
2130 | { |
2131 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", | |
1e7f3a48 | 2132 | sizeof(struct nfs_pgio_header), |
1da177e4 | 2133 | 0, SLAB_HWCACHE_ALIGN, |
20c2df83 | 2134 | NULL); |
1da177e4 LT |
2135 | if (nfs_wdata_cachep == NULL) |
2136 | return -ENOMEM; | |
2137 | ||
93d2341c MD |
2138 | nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, |
2139 | nfs_wdata_cachep); | |
1da177e4 | 2140 | if (nfs_wdata_mempool == NULL) |
3dd4765f | 2141 | goto out_destroy_write_cache; |
1da177e4 | 2142 | |
0b7c0153 FI |
2143 | nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", |
2144 | sizeof(struct nfs_commit_data), | |
2145 | 0, SLAB_HWCACHE_ALIGN, | |
2146 | NULL); | |
2147 | if (nfs_cdata_cachep == NULL) | |
3dd4765f | 2148 | goto out_destroy_write_mempool; |
0b7c0153 | 2149 | |
93d2341c | 2150 | nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, |
4c100210 | 2151 | nfs_cdata_cachep); |
1da177e4 | 2152 | if (nfs_commit_mempool == NULL) |
3dd4765f | 2153 | goto out_destroy_commit_cache; |
1da177e4 | 2154 | |
89a09141 PZ |
2155 | /* |
2156 | * NFS congestion size, scale with available memory. | |
2157 | * | |
2158 | * 64MB: 8192k | |
2159 | * 128MB: 11585k | |
2160 | * 256MB: 16384k | |
2161 | * 512MB: 23170k | |
2162 | * 1GB: 32768k | |
2163 | * 2GB: 46340k | |
2164 | * 4GB: 65536k | |
2165 | * 8GB: 92681k | |
2166 | * 16GB: 131072k | |
2167 | * | |
2168 | * This allows larger machines to have larger/more transfers. | |
2169 | * Limit the default to 256M | |
2170 | */ | |
ca79b0c2 | 2171 | nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); |
89a09141 PZ |
2172 | if (nfs_congestion_kb > 256*1024) |
2173 | nfs_congestion_kb = 256*1024; | |
2174 | ||
1da177e4 | 2175 | return 0; |
3dd4765f JL |
2176 | |
2177 | out_destroy_commit_cache: | |
2178 | kmem_cache_destroy(nfs_cdata_cachep); | |
2179 | out_destroy_write_mempool: | |
2180 | mempool_destroy(nfs_wdata_mempool); | |
2181 | out_destroy_write_cache: | |
2182 | kmem_cache_destroy(nfs_wdata_cachep); | |
2183 | return -ENOMEM; | |
1da177e4 LT |
2184 | } |
2185 | ||
266bee88 | 2186 | void nfs_destroy_writepagecache(void) |
1da177e4 LT |
2187 | { |
2188 | mempool_destroy(nfs_commit_mempool); | |
3dd4765f | 2189 | kmem_cache_destroy(nfs_cdata_cachep); |
1da177e4 | 2190 | mempool_destroy(nfs_wdata_mempool); |
1a1d92c1 | 2191 | kmem_cache_destroy(nfs_wdata_cachep); |
1da177e4 LT |
2192 | } |
2193 | ||
4a0de55c AS |
2194 | static const struct nfs_rw_ops nfs_rw_write_ops = { |
2195 | .rw_alloc_header = nfs_writehdr_alloc, | |
2196 | .rw_free_header = nfs_writehdr_free, | |
0eecb214 AS |
2197 | .rw_done = nfs_writeback_done, |
2198 | .rw_result = nfs_writeback_result, | |
1ed26f33 | 2199 | .rw_initiate = nfs_initiate_write, |
4a0de55c | 2200 | }; |