NFS: Another cleanup of the read/write request coalescing code
[linux-2.6-block.git] / fs / nfs / write.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/nfs/write.c
3 *
7c85d900 4 * Write file data over NFS.
1da177e4
LT
5 *
6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7 */
8
1da177e4
LT
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/file.h>
1da177e4 14#include <linux/writeback.h>
89a09141 15#include <linux/swap.h>
1da177e4
LT
16
17#include <linux/sunrpc/clnt.h>
18#include <linux/nfs_fs.h>
19#include <linux/nfs_mount.h>
20#include <linux/nfs_page.h>
3fcfab16
AM
21#include <linux/backing-dev.h>
22
1da177e4
LT
23#include <asm/uaccess.h>
24#include <linux/smp_lock.h>
25
26#include "delegation.h"
49a70f27 27#include "internal.h"
91d5b470 28#include "iostat.h"
1da177e4
LT
29
30#define NFSDBG_FACILITY NFSDBG_PAGECACHE
31
32#define MIN_POOL_WRITE (32)
33#define MIN_POOL_COMMIT (4)
34
35/*
36 * Local function declarations
37 */
38static struct nfs_page * nfs_update_request(struct nfs_open_context*,
1da177e4
LT
39 struct page *,
40 unsigned int, unsigned int);
3f442547 41static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
788e7a89
TM
42static const struct rpc_call_ops nfs_write_partial_ops;
43static const struct rpc_call_ops nfs_write_full_ops;
44static const struct rpc_call_ops nfs_commit_ops;
1da177e4 45
e18b890b 46static struct kmem_cache *nfs_wdata_cachep;
3feb2d49 47static mempool_t *nfs_wdata_mempool;
1da177e4
LT
48static mempool_t *nfs_commit_mempool;
49
e9f7bee1 50struct nfs_write_data *nfs_commit_alloc(void)
1da177e4 51{
e6b4f8da 52 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
40859d7e 53
1da177e4
LT
54 if (p) {
55 memset(p, 0, sizeof(*p));
56 INIT_LIST_HEAD(&p->pages);
57 }
58 return p;
59}
60
8aca67f0 61void nfs_commit_rcu_free(struct rcu_head *head)
1da177e4 62{
8aca67f0 63 struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
40859d7e
CL
64 if (p && (p->pagevec != &p->page_array[0]))
65 kfree(p->pagevec);
1da177e4
LT
66 mempool_free(p, nfs_commit_mempool);
67}
68
8aca67f0
TM
69void nfs_commit_free(struct nfs_write_data *wdata)
70{
71 call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
72}
73
e9f7bee1 74struct nfs_write_data *nfs_writedata_alloc(size_t len)
3feb2d49 75{
e9f7bee1 76 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
e6b4f8da 77 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
3feb2d49
TM
78
79 if (p) {
80 memset(p, 0, sizeof(*p));
81 INIT_LIST_HEAD(&p->pages);
e9f7bee1 82 p->npages = pagecount;
0d0b5cb3
CL
83 if (pagecount <= ARRAY_SIZE(p->page_array))
84 p->pagevec = p->page_array;
3feb2d49 85 else {
0d0b5cb3
CL
86 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
87 if (!p->pagevec) {
3feb2d49
TM
88 mempool_free(p, nfs_wdata_mempool);
89 p = NULL;
90 }
91 }
92 }
93 return p;
94}
95
8aca67f0 96static void nfs_writedata_rcu_free(struct rcu_head *head)
3feb2d49 97{
8aca67f0 98 struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
3feb2d49
TM
99 if (p && (p->pagevec != &p->page_array[0]))
100 kfree(p->pagevec);
101 mempool_free(p, nfs_wdata_mempool);
102}
103
8aca67f0
TM
104static void nfs_writedata_free(struct nfs_write_data *wdata)
105{
106 call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);
107}
108
963d8fe5 109void nfs_writedata_release(void *wdata)
1da177e4 110{
1da177e4
LT
111 nfs_writedata_free(wdata);
112}
113
277459d2
TM
114static struct nfs_page *nfs_page_find_request_locked(struct page *page)
115{
116 struct nfs_page *req = NULL;
117
118 if (PagePrivate(page)) {
119 req = (struct nfs_page *)page_private(page);
120 if (req != NULL)
121 atomic_inc(&req->wb_count);
122 }
123 return req;
124}
125
126static struct nfs_page *nfs_page_find_request(struct page *page)
127{
128 struct nfs_page *req = NULL;
129 spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
130
131 spin_lock(req_lock);
132 req = nfs_page_find_request_locked(page);
133 spin_unlock(req_lock);
134 return req;
135}
136
1da177e4
LT
137/* Adjust the file length if we're writing beyond the end */
138static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
139{
140 struct inode *inode = page->mapping->host;
141 loff_t end, i_size = i_size_read(inode);
142 unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
143
144 if (i_size > 0 && page->index < end_index)
145 return;
146 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
147 if (i_size >= end)
148 return;
91d5b470 149 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
1da177e4
LT
150 i_size_write(inode, end);
151}
152
a301b777
TM
153/* A writeback failed: mark the page as bad, and invalidate the page cache */
154static void nfs_set_pageerror(struct page *page)
155{
156 SetPageError(page);
157 nfs_zap_mapping(page->mapping->host, page->mapping);
158}
159
1da177e4
LT
160/* We can set the PG_uptodate flag if we see that a write request
161 * covers the full page.
162 */
163static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
164{
1da177e4
LT
165 if (PageUptodate(page))
166 return;
167 if (base != 0)
168 return;
49a70f27 169 if (count != nfs_page_length(page))
1da177e4 170 return;
49a70f27 171 if (count != PAGE_CACHE_SIZE)
1da177e4 172 memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
49a70f27 173 SetPageUptodate(page);
1da177e4
LT
174}
175
e21195a7 176static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
1da177e4
LT
177 unsigned int offset, unsigned int count)
178{
179 struct nfs_page *req;
e21195a7 180 int ret;
1da177e4 181
e21195a7
TM
182 for (;;) {
183 req = nfs_update_request(ctx, page, offset, count);
184 if (!IS_ERR(req))
185 break;
186 ret = PTR_ERR(req);
187 if (ret != -EBUSY)
188 return ret;
189 ret = nfs_wb_page(page->mapping->host, page);
190 if (ret != 0)
191 return ret;
192 }
1da177e4
LT
193 /* Update file length */
194 nfs_grow_file(page, offset, count);
195 /* Set the PG_uptodate flag? */
196 nfs_mark_uptodate(page, offset, count);
197 nfs_unlock_request(req);
abd3e641 198 return 0;
1da177e4
LT
199}
200
201static int wb_priority(struct writeback_control *wbc)
202{
203 if (wbc->for_reclaim)
204 return FLUSH_HIGHPRI;
205 if (wbc->for_kupdate)
206 return FLUSH_LOWPRI;
207 return 0;
208}
209
89a09141
PZ
210/*
211 * NFS congestion control
212 */
213
214int nfs_congestion_kb;
215
216#define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
217#define NFS_CONGESTION_OFF_THRESH \
218 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
219
5a6d41b3 220static int nfs_set_page_writeback(struct page *page)
89a09141 221{
5a6d41b3
TM
222 int ret = test_set_page_writeback(page);
223
224 if (!ret) {
89a09141
PZ
225 struct inode *inode = page->mapping->host;
226 struct nfs_server *nfss = NFS_SERVER(inode);
227
228 if (atomic_inc_return(&nfss->writeback) >
229 NFS_CONGESTION_ON_THRESH)
230 set_bdi_congested(&nfss->backing_dev_info, WRITE);
231 }
5a6d41b3 232 return ret;
89a09141
PZ
233}
234
235static void nfs_end_page_writeback(struct page *page)
236{
237 struct inode *inode = page->mapping->host;
238 struct nfs_server *nfss = NFS_SERVER(inode);
239
240 end_page_writeback(page);
241 if (atomic_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) {
242 clear_bdi_congested(&nfss->backing_dev_info, WRITE);
243 congestion_end(WRITE);
244 }
245}
246
e261f51f
TM
247/*
248 * Find an associated nfs write request, and prepare to flush it out
249 * Returns 1 if there was no write request, or if the request was
250 * already tagged by nfs_set_page_dirty.Returns 0 if the request
251 * was not tagged.
252 * May also return an error if the user signalled nfs_wait_on_request().
253 */
254static int nfs_page_mark_flush(struct page *page)
255{
256 struct nfs_page *req;
612c9384
TM
257 struct nfs_inode *nfsi = NFS_I(page->mapping->host);
258 spinlock_t *req_lock = &nfsi->req_lock;
e261f51f
TM
259 int ret;
260
261 spin_lock(req_lock);
262 for(;;) {
263 req = nfs_page_find_request_locked(page);
264 if (req == NULL) {
265 spin_unlock(req_lock);
266 return 1;
267 }
268 if (nfs_lock_request_dontget(req))
269 break;
270 /* Note: If we hold the page lock, as is the case in nfs_writepage,
271 * then the call to nfs_lock_request_dontget() will always
272 * succeed provided that someone hasn't already marked the
273 * request as dirty (in which case we don't care).
274 */
275 spin_unlock(req_lock);
276 ret = nfs_wait_on_request(req);
277 nfs_release_request(req);
278 if (ret != 0)
279 return ret;
280 spin_lock(req_lock);
281 }
612c9384
TM
282 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
283 /* This request is marked for commit */
284 spin_unlock(req_lock);
285 nfs_unlock_request(req);
286 return 1;
287 }
eb4cac10
TM
288 if (nfs_set_page_writeback(page) == 0) {
289 nfs_list_remove_request(req);
612c9384
TM
290 /* add the request to the inode's dirty list. */
291 radix_tree_tag_set(&nfsi->nfs_page_tree,
292 req->wb_index, NFS_PAGE_TAG_DIRTY);
293 nfs_list_add_request(req, &nfsi->dirty);
294 nfsi->ndirty++;
295 spin_unlock(req_lock);
296 __mark_inode_dirty(page->mapping->host, I_DIRTY_PAGES);
297 } else
298 spin_unlock(req_lock);
e261f51f
TM
299 ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
300 nfs_unlock_request(req);
301 return ret;
302}
303
1da177e4
LT
304/*
305 * Write an mmapped page to the server.
306 */
4d770ccf 307static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
1da177e4
LT
308{
309 struct nfs_open_context *ctx;
310 struct inode *inode = page->mapping->host;
49a70f27 311 unsigned offset;
e261f51f 312 int err;
1da177e4 313
91d5b470
CL
314 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
315 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
316
e261f51f
TM
317 err = nfs_page_mark_flush(page);
318 if (err <= 0)
319 goto out;
320 err = 0;
49a70f27
TM
321 offset = nfs_page_length(page);
322 if (!offset)
1da177e4 323 goto out;
49a70f27 324
d530838b 325 ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
1da177e4
LT
326 if (ctx == NULL) {
327 err = -EBADF;
328 goto out;
329 }
200baa21 330 err = nfs_writepage_setup(ctx, page, 0, offset);
1da177e4 331 put_nfs_open_context(ctx);
e261f51f
TM
332 if (err != 0)
333 goto out;
334 err = nfs_page_mark_flush(page);
335 if (err > 0)
336 err = 0;
1da177e4 337out:
200baa21 338 if (!wbc->for_writepages)
02241bc4 339 nfs_flush_mapping(page->mapping, wbc, FLUSH_STABLE|wb_priority(wbc));
4d770ccf
TM
340 return err;
341}
342
343int nfs_writepage(struct page *page, struct writeback_control *wbc)
344{
345 int err;
346
347 err = nfs_writepage_locked(page, wbc);
1da177e4 348 unlock_page(page);
1da177e4
LT
349 return err;
350}
351
1da177e4
LT
352int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
353{
1da177e4
LT
354 struct inode *inode = mapping->host;
355 int err;
356
91d5b470
CL
357 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
358
1da177e4
LT
359 err = generic_writepages(mapping, wbc);
360 if (err)
361 return err;
28c6925f 362 err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc));
1da177e4
LT
363 if (err < 0)
364 goto out;
91d5b470 365 nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
d30c8348 366 err = 0;
1da177e4 367out:
1da177e4
LT
368 return err;
369}
370
371/*
372 * Insert a write request into an inode
373 */
374static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
375{
376 struct nfs_inode *nfsi = NFS_I(inode);
377 int error;
378
379 error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
380 BUG_ON(error == -EEXIST);
381 if (error)
382 return error;
383 if (!nfsi->npages) {
384 igrab(inode);
385 nfs_begin_data_update(inode);
386 if (nfs_have_delegation(inode, FMODE_WRITE))
387 nfsi->change_attr++;
388 }
deb7d638 389 SetPagePrivate(req->wb_page);
277459d2 390 set_page_private(req->wb_page, (unsigned long)req);
2b82f190
TM
391 if (PageDirty(req->wb_page))
392 set_bit(PG_NEED_FLUSH, &req->wb_flags);
1da177e4
LT
393 nfsi->npages++;
394 atomic_inc(&req->wb_count);
395 return 0;
396}
397
398/*
89a09141 399 * Remove a write request from an inode
1da177e4
LT
400 */
401static void nfs_inode_remove_request(struct nfs_page *req)
402{
403 struct inode *inode = req->wb_context->dentry->d_inode;
404 struct nfs_inode *nfsi = NFS_I(inode);
405
406 BUG_ON (!NFS_WBACK_BUSY(req));
407
408 spin_lock(&nfsi->req_lock);
277459d2 409 set_page_private(req->wb_page, 0);
deb7d638 410 ClearPagePrivate(req->wb_page);
1da177e4 411 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
2b82f190
TM
412 if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags))
413 __set_page_dirty_nobuffers(req->wb_page);
1da177e4
LT
414 nfsi->npages--;
415 if (!nfsi->npages) {
416 spin_unlock(&nfsi->req_lock);
951a143b 417 nfs_end_data_update(inode);
1da177e4
LT
418 iput(inode);
419 } else
420 spin_unlock(&nfsi->req_lock);
421 nfs_clear_request(req);
422 nfs_release_request(req);
423}
424
61822ab5
TM
425static void
426nfs_redirty_request(struct nfs_page *req)
427{
61822ab5
TM
428 __set_page_dirty_nobuffers(req->wb_page);
429}
430
1da177e4
LT
431/*
432 * Check if a request is dirty
433 */
434static inline int
435nfs_dirty_request(struct nfs_page *req)
436{
5a6d41b3
TM
437 struct page *page = req->wb_page;
438
612c9384 439 if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))
5a6d41b3
TM
440 return 0;
441 return !PageWriteback(req->wb_page);
1da177e4
LT
442}
443
444#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
445/*
446 * Add a request to the inode's commit list.
447 */
448static void
449nfs_mark_request_commit(struct nfs_page *req)
450{
451 struct inode *inode = req->wb_context->dentry->d_inode;
452 struct nfs_inode *nfsi = NFS_I(inode);
453
454 spin_lock(&nfsi->req_lock);
455 nfs_list_add_request(req, &nfsi->commit);
456 nfsi->ncommit++;
612c9384 457 set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
1da177e4 458 spin_unlock(&nfsi->req_lock);
fd39fc85 459 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
a1803044 460 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1da177e4 461}
8e821cad
TM
462
463static inline
464int nfs_write_need_commit(struct nfs_write_data *data)
465{
466 return data->verf.committed != NFS_FILE_SYNC;
467}
468
469static inline
470int nfs_reschedule_unstable_write(struct nfs_page *req)
471{
612c9384 472 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
8e821cad
TM
473 nfs_mark_request_commit(req);
474 return 1;
475 }
476 if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
477 nfs_redirty_request(req);
478 return 1;
479 }
480 return 0;
481}
482#else
483static inline void
484nfs_mark_request_commit(struct nfs_page *req)
485{
486}
487
488static inline
489int nfs_write_need_commit(struct nfs_write_data *data)
490{
491 return 0;
492}
493
494static inline
495int nfs_reschedule_unstable_write(struct nfs_page *req)
496{
497 return 0;
498}
1da177e4
LT
499#endif
500
501/*
502 * Wait for a request to complete.
503 *
504 * Interruptible by signals only if mounted with intr flag.
505 */
c42de9dd 506static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages)
1da177e4
LT
507{
508 struct nfs_inode *nfsi = NFS_I(inode);
509 struct nfs_page *req;
510 unsigned long idx_end, next;
511 unsigned int res = 0;
512 int error;
513
514 if (npages == 0)
515 idx_end = ~0;
516 else
517 idx_end = idx_start + npages - 1;
518
1da177e4 519 next = idx_start;
c6a556b8 520 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
1da177e4
LT
521 if (req->wb_index > idx_end)
522 break;
523
524 next = req->wb_index + 1;
c6a556b8 525 BUG_ON(!NFS_WBACK_BUSY(req));
1da177e4
LT
526
527 atomic_inc(&req->wb_count);
528 spin_unlock(&nfsi->req_lock);
529 error = nfs_wait_on_request(req);
530 nfs_release_request(req);
c42de9dd 531 spin_lock(&nfsi->req_lock);
1da177e4
LT
532 if (error < 0)
533 return error;
1da177e4
LT
534 res++;
535 }
1da177e4
LT
536 return res;
537}
538
83715ad5 539static void nfs_cancel_dirty_list(struct list_head *head)
d2ccddf0
TM
540{
541 struct nfs_page *req;
542 while(!list_empty(head)) {
543 req = nfs_list_entry(head->next);
544 nfs_list_remove_request(req);
5a6d41b3 545 nfs_end_page_writeback(req->wb_page);
d2ccddf0
TM
546 nfs_inode_remove_request(req);
547 nfs_clear_page_writeback(req);
548 }
549}
550
83715ad5
TM
551static void nfs_cancel_commit_list(struct list_head *head)
552{
553 struct nfs_page *req;
554
555 while(!list_empty(head)) {
556 req = nfs_list_entry(head->next);
b6dff26a 557 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
83715ad5 558 nfs_list_remove_request(req);
612c9384 559 clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
83715ad5 560 nfs_inode_remove_request(req);
b6dff26a 561 nfs_unlock_request(req);
83715ad5
TM
562 }
563}
564
1da177e4
LT
565#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
566/*
567 * nfs_scan_commit - Scan an inode for commit requests
568 * @inode: NFS inode to scan
569 * @dst: destination list
570 * @idx_start: lower bound of page->index to scan.
571 * @npages: idx_start + npages sets the upper bound to scan.
572 *
573 * Moves requests from the inode's 'commit' request list.
574 * The requests are *not* checked to ensure that they form a contiguous set.
575 */
576static int
577nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
578{
579 struct nfs_inode *nfsi = NFS_I(inode);
3da28eb1
TM
580 int res = 0;
581
582 if (nfsi->ncommit != 0) {
d2ccddf0 583 res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages);
3da28eb1
TM
584 nfsi->ncommit -= res;
585 if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
586 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
587 }
1da177e4
LT
588 return res;
589}
c42de9dd
TM
590#else
591static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
592{
593 return 0;
594}
1da177e4
LT
595#endif
596
1da177e4
LT
597/*
598 * Try to update any existing write request, or create one if there is none.
599 * In order to match, the request's credentials must match those of
600 * the calling process.
601 *
602 * Note: Should always be called with the Page Lock held!
603 */
604static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
e21195a7 605 struct page *page, unsigned int offset, unsigned int bytes)
1da177e4 606{
89a09141
PZ
607 struct address_space *mapping = page->mapping;
608 struct inode *inode = mapping->host;
1da177e4
LT
609 struct nfs_inode *nfsi = NFS_I(inode);
610 struct nfs_page *req, *new = NULL;
611 unsigned long rqend, end;
612
613 end = offset + bytes;
614
1da177e4
LT
615 for (;;) {
616 /* Loop over all inode entries and see if we find
617 * A request for the page we wish to update
618 */
619 spin_lock(&nfsi->req_lock);
277459d2 620 req = nfs_page_find_request_locked(page);
1da177e4
LT
621 if (req) {
622 if (!nfs_lock_request_dontget(req)) {
623 int error;
277459d2 624
1da177e4
LT
625 spin_unlock(&nfsi->req_lock);
626 error = nfs_wait_on_request(req);
627 nfs_release_request(req);
1dd594b2
NB
628 if (error < 0) {
629 if (new)
630 nfs_release_request(new);
1da177e4 631 return ERR_PTR(error);
1dd594b2 632 }
1da177e4
LT
633 continue;
634 }
635 spin_unlock(&nfsi->req_lock);
636 if (new)
637 nfs_release_request(new);
638 break;
639 }
640
641 if (new) {
642 int error;
643 nfs_lock_request_dontget(new);
644 error = nfs_inode_add_request(inode, new);
645 if (error) {
646 spin_unlock(&nfsi->req_lock);
647 nfs_unlock_request(new);
648 return ERR_PTR(error);
649 }
650 spin_unlock(&nfsi->req_lock);
1da177e4
LT
651 return new;
652 }
653 spin_unlock(&nfsi->req_lock);
654
655 new = nfs_create_request(ctx, inode, page, offset, bytes);
656 if (IS_ERR(new))
657 return new;
658 }
659
660 /* We have a request for our page.
661 * If the creds don't match, or the
662 * page addresses don't match,
663 * tell the caller to wait on the conflicting
664 * request.
665 */
666 rqend = req->wb_offset + req->wb_bytes;
667 if (req->wb_context != ctx
668 || req->wb_page != page
669 || !nfs_dirty_request(req)
670 || offset > rqend || end < req->wb_offset) {
671 nfs_unlock_request(req);
672 return ERR_PTR(-EBUSY);
673 }
674
675 /* Okay, the request matches. Update the region */
676 if (offset < req->wb_offset) {
677 req->wb_offset = offset;
678 req->wb_pgbase = offset;
679 req->wb_bytes = rqend - req->wb_offset;
680 }
681
682 if (end > rqend)
683 req->wb_bytes = end - req->wb_offset;
684
685 return req;
686}
687
688int nfs_flush_incompatible(struct file *file, struct page *page)
689{
690 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
1da177e4 691 struct nfs_page *req;
1a54533e 692 int do_flush, status;
1da177e4
LT
693 /*
694 * Look for a request corresponding to this page. If there
695 * is one, and it belongs to another file, we flush it out
696 * before we try to copy anything into the page. Do this
697 * due to the lack of an ACCESS-type call in NFSv2.
698 * Also do the same if we find a request from an existing
699 * dropped page.
700 */
1a54533e
TM
701 do {
702 req = nfs_page_find_request(page);
703 if (req == NULL)
704 return 0;
705 do_flush = req->wb_page != page || req->wb_context != ctx
e261f51f 706 || !nfs_dirty_request(req);
1da177e4 707 nfs_release_request(req);
1a54533e
TM
708 if (!do_flush)
709 return 0;
710 status = nfs_wb_page(page->mapping->host, page);
711 } while (status == 0);
712 return status;
1da177e4
LT
713}
714
715/*
716 * Update and possibly write a cached page of an NFS file.
717 *
718 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
719 * things with a page scheduled for an RPC call (e.g. invalidate it).
720 */
721int nfs_updatepage(struct file *file, struct page *page,
722 unsigned int offset, unsigned int count)
723{
724 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
1da177e4 725 struct inode *inode = page->mapping->host;
1da177e4
LT
726 int status = 0;
727
91d5b470
CL
728 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
729
1da177e4 730 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
01cce933
JJS
731 file->f_path.dentry->d_parent->d_name.name,
732 file->f_path.dentry->d_name.name, count,
0bbacc40 733 (long long)(page_offset(page) +offset));
1da177e4 734
1da177e4
LT
735 /* If we're not using byte range locks, and we know the page
736 * is entirely in cache, it may be more efficient to avoid
737 * fragmenting write requests.
738 */
ab0a3dbe 739 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
49a70f27 740 count = max(count + offset, nfs_page_length(page));
1da177e4 741 offset = 0;
1da177e4
LT
742 }
743
e21195a7 744 status = nfs_writepage_setup(ctx, page, offset, count);
e261f51f 745 __set_page_dirty_nobuffers(page);
1da177e4 746
1da177e4
LT
747 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
748 status, (long long)i_size_read(inode));
749 if (status < 0)
a301b777 750 nfs_set_pageerror(page);
1da177e4
LT
751 return status;
752}
753
754static void nfs_writepage_release(struct nfs_page *req)
755{
1da177e4 756
8e821cad
TM
757 if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
758 nfs_end_page_writeback(req->wb_page);
759 nfs_inode_remove_request(req);
760 } else
761 nfs_end_page_writeback(req->wb_page);
c6a556b8 762 nfs_clear_page_writeback(req);
1da177e4
LT
763}
764
765static inline int flush_task_priority(int how)
766{
767 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
768 case FLUSH_HIGHPRI:
769 return RPC_PRIORITY_HIGH;
770 case FLUSH_LOWPRI:
771 return RPC_PRIORITY_LOW;
772 }
773 return RPC_PRIORITY_NORMAL;
774}
775
776/*
777 * Set up the argument/result storage required for the RPC call.
778 */
779static void nfs_write_rpcsetup(struct nfs_page *req,
780 struct nfs_write_data *data,
788e7a89 781 const struct rpc_call_ops *call_ops,
1da177e4
LT
782 unsigned int count, unsigned int offset,
783 int how)
784{
1da177e4 785 struct inode *inode;
788e7a89 786 int flags;
1da177e4
LT
787
788 /* Set up the RPC argument and reply structs
789 * NB: take care not to mess about with data->commit et al. */
790
791 data->req = req;
792 data->inode = inode = req->wb_context->dentry->d_inode;
793 data->cred = req->wb_context->cred;
794
795 data->args.fh = NFS_FH(inode);
796 data->args.offset = req_offset(req) + offset;
797 data->args.pgbase = req->wb_pgbase + offset;
798 data->args.pages = data->pagevec;
799 data->args.count = count;
800 data->args.context = req->wb_context;
801
802 data->res.fattr = &data->fattr;
803 data->res.count = count;
804 data->res.verf = &data->verf;
0e574af1 805 nfs_fattr_init(&data->fattr);
1da177e4 806
788e7a89
TM
807 /* Set up the initial task struct. */
808 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
809 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
1da177e4
LT
810 NFS_PROTO(inode)->write_setup(data, how);
811
812 data->task.tk_priority = flush_task_priority(how);
813 data->task.tk_cookie = (unsigned long)inode;
1da177e4 814
a3f565b1
CL
815 dprintk("NFS: %5u initiated write call "
816 "(req %s/%Ld, %u bytes @ offset %Lu)\n",
0bbacc40 817 data->task.tk_pid,
1da177e4
LT
818 inode->i_sb->s_id,
819 (long long)NFS_FILEID(inode),
820 count,
821 (unsigned long long)data->args.offset);
822}
823
824static void nfs_execute_write(struct nfs_write_data *data)
825{
826 struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
827 sigset_t oldset;
828
829 rpc_clnt_sigmask(clnt, &oldset);
1da177e4 830 rpc_execute(&data->task);
1da177e4
LT
831 rpc_clnt_sigunmask(clnt, &oldset);
832}
833
834/*
835 * Generate multiple small requests to write out a single
836 * contiguous dirty area on one page.
837 */
bcb71bba 838static int nfs_flush_multi(struct inode *inode, struct list_head *head, size_t count, int how)
1da177e4
LT
839{
840 struct nfs_page *req = nfs_list_entry(head->next);
841 struct page *page = req->wb_page;
842 struct nfs_write_data *data;
e9f7bee1
TM
843 size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
844 unsigned int offset;
1da177e4
LT
845 int requests = 0;
846 LIST_HEAD(list);
847
848 nfs_list_remove_request(req);
849
bcb71bba 850 nbytes = count;
e9f7bee1
TM
851 do {
852 size_t len = min(nbytes, wsize);
853
854 data = nfs_writedata_alloc(len);
1da177e4
LT
855 if (!data)
856 goto out_bad;
857 list_add(&data->pages, &list);
858 requests++;
e9f7bee1
TM
859 nbytes -= len;
860 } while (nbytes != 0);
1da177e4
LT
861 atomic_set(&req->wb_complete, requests);
862
863 ClearPageError(page);
1da177e4 864 offset = 0;
bcb71bba 865 nbytes = count;
1da177e4
LT
866 do {
867 data = list_entry(list.next, struct nfs_write_data, pages);
868 list_del_init(&data->pages);
869
870 data->pagevec[0] = page;
1da177e4 871
bcb71bba
TM
872 if (nbytes < wsize)
873 wsize = nbytes;
874 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
875 wsize, offset, how);
876 offset += wsize;
877 nbytes -= wsize;
1da177e4
LT
878 nfs_execute_write(data);
879 } while (nbytes != 0);
880
881 return 0;
882
883out_bad:
884 while (!list_empty(&list)) {
885 data = list_entry(list.next, struct nfs_write_data, pages);
886 list_del(&data->pages);
8aca67f0 887 nfs_writedata_release(data);
1da177e4 888 }
61822ab5 889 nfs_redirty_request(req);
6d677e35 890 nfs_end_page_writeback(req->wb_page);
c6a556b8 891 nfs_clear_page_writeback(req);
1da177e4
LT
892 return -ENOMEM;
893}
894
895/*
896 * Create an RPC task for the given write request and kick it.
897 * The page must have been locked by the caller.
898 *
899 * It may happen that the page we're passed is not marked dirty.
900 * This is the case if nfs_updatepage detects a conflicting request
901 * that has been written but not committed.
902 */
bcb71bba 903static int nfs_flush_one(struct inode *inode, struct list_head *head, size_t count, int how)
1da177e4
LT
904{
905 struct nfs_page *req;
906 struct page **pages;
907 struct nfs_write_data *data;
1da177e4 908
bcb71bba 909 data = nfs_writedata_alloc(count);
1da177e4
LT
910 if (!data)
911 goto out_bad;
912
913 pages = data->pagevec;
1da177e4
LT
914 while (!list_empty(head)) {
915 req = nfs_list_entry(head->next);
916 nfs_list_remove_request(req);
917 nfs_list_add_request(req, &data->pages);
918 ClearPageError(req->wb_page);
1da177e4 919 *pages++ = req->wb_page;
1da177e4
LT
920 }
921 req = nfs_list_entry(data->pages.next);
922
1da177e4 923 /* Set up the argument struct */
788e7a89 924 nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
1da177e4
LT
925
926 nfs_execute_write(data);
927 return 0;
928 out_bad:
929 while (!list_empty(head)) {
930 struct nfs_page *req = nfs_list_entry(head->next);
931 nfs_list_remove_request(req);
61822ab5 932 nfs_redirty_request(req);
6d677e35 933 nfs_end_page_writeback(req->wb_page);
c6a556b8 934 nfs_clear_page_writeback(req);
1da177e4
LT
935 }
936 return -ENOMEM;
937}
938
7d46a49f 939static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how)
1da177e4 940{
d8a5ad75 941 struct nfs_pageio_descriptor desc;
7d46a49f
TM
942 int wpages = NFS_SERVER(inode)->wpages;
943 int wsize = NFS_SERVER(inode)->wsize;
1da177e4 944
7d46a49f
TM
945 /* For single writes, FLUSH_STABLE is more efficient */
946 if (npages <= wpages && npages == NFS_I(inode)->npages
947 && nfs_list_entry(head->next)->wb_bytes <= wsize)
948 how |= FLUSH_STABLE;
949
bcb71bba
TM
950 if (wsize < PAGE_CACHE_SIZE)
951 nfs_pageio_init(&desc, inode, nfs_flush_multi, wsize, how);
952 else
953 nfs_pageio_init(&desc, inode, nfs_flush_one, wsize, how);
954 nfs_pageio_add_list(&desc, head);
955 nfs_pageio_complete(&desc);
956 if (desc.pg_error == 0)
957 return 0;
1da177e4 958 while (!list_empty(head)) {
d8a5ad75 959 struct nfs_page *req = nfs_list_entry(head->next);
1da177e4 960 nfs_list_remove_request(req);
61822ab5 961 nfs_redirty_request(req);
6d677e35 962 nfs_end_page_writeback(req->wb_page);
c6a556b8 963 nfs_clear_page_writeback(req);
1da177e4 964 }
bcb71bba 965 return desc.pg_error;
1da177e4
LT
966}
967
968/*
969 * Handle a write reply that flushed part of a page.
970 */
788e7a89 971static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1da177e4 972{
788e7a89 973 struct nfs_write_data *data = calldata;
1da177e4
LT
974 struct nfs_page *req = data->req;
975 struct page *page = req->wb_page;
976
977 dprintk("NFS: write (%s/%Ld %d@%Ld)",
978 req->wb_context->dentry->d_inode->i_sb->s_id,
979 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
980 req->wb_bytes,
981 (long long)req_offset(req));
982
788e7a89
TM
983 if (nfs_writeback_done(task, data) != 0)
984 return;
985
986 if (task->tk_status < 0) {
a301b777 987 nfs_set_pageerror(page);
788e7a89
TM
988 req->wb_context->error = task->tk_status;
989 dprintk(", error = %d\n", task->tk_status);
8e821cad 990 goto out;
1da177e4
LT
991 }
992
8e821cad
TM
993 if (nfs_write_need_commit(data)) {
994 spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
995
996 spin_lock(req_lock);
997 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
998 /* Do nothing we need to resend the writes */
999 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1000 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1001 dprintk(" defer commit\n");
1002 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1003 set_bit(PG_NEED_RESCHED, &req->wb_flags);
1004 clear_bit(PG_NEED_COMMIT, &req->wb_flags);
1005 dprintk(" server reboot detected\n");
1006 }
1007 spin_unlock(req_lock);
1008 } else
1009 dprintk(" OK\n");
1010
1011out:
1da177e4
LT
1012 if (atomic_dec_and_test(&req->wb_complete))
1013 nfs_writepage_release(req);
1014}
1015
788e7a89
TM
1016static const struct rpc_call_ops nfs_write_partial_ops = {
1017 .rpc_call_done = nfs_writeback_done_partial,
1018 .rpc_release = nfs_writedata_release,
1019};
1020
1da177e4
LT
1021/*
1022 * Handle a write reply that flushes a whole page.
1023 *
1024 * FIXME: There is an inherent race with invalidate_inode_pages and
1025 * writebacks since the page->count is kept > 1 for as long
1026 * as the page has a write request pending.
1027 */
788e7a89 1028static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1da177e4 1029{
788e7a89 1030 struct nfs_write_data *data = calldata;
1da177e4
LT
1031 struct nfs_page *req;
1032 struct page *page;
1033
788e7a89
TM
1034 if (nfs_writeback_done(task, data) != 0)
1035 return;
1036
1da177e4
LT
1037 /* Update attributes as result of writeback. */
1038 while (!list_empty(&data->pages)) {
1039 req = nfs_list_entry(data->pages.next);
1040 nfs_list_remove_request(req);
1041 page = req->wb_page;
1042
1043 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1044 req->wb_context->dentry->d_inode->i_sb->s_id,
1045 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1046 req->wb_bytes,
1047 (long long)req_offset(req));
1048
788e7a89 1049 if (task->tk_status < 0) {
a301b777 1050 nfs_set_pageerror(page);
788e7a89 1051 req->wb_context->error = task->tk_status;
788e7a89 1052 dprintk(", error = %d\n", task->tk_status);
8e821cad 1053 goto remove_request;
1da177e4 1054 }
1da177e4 1055
8e821cad
TM
1056 if (nfs_write_need_commit(data)) {
1057 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1058 nfs_mark_request_commit(req);
1059 nfs_end_page_writeback(page);
1060 dprintk(" marked for commit\n");
1da177e4
LT
1061 goto next;
1062 }
8e821cad
TM
1063 dprintk(" OK\n");
1064remove_request:
1065 nfs_end_page_writeback(page);
1da177e4 1066 nfs_inode_remove_request(req);
1da177e4 1067 next:
c6a556b8 1068 nfs_clear_page_writeback(req);
1da177e4
LT
1069 }
1070}
1071
788e7a89
TM
1072static const struct rpc_call_ops nfs_write_full_ops = {
1073 .rpc_call_done = nfs_writeback_done_full,
1074 .rpc_release = nfs_writedata_release,
1075};
1076
1077
1da177e4
LT
1078/*
1079 * This function is called when the WRITE call is complete.
1080 */
462d5b32 1081int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1da177e4 1082{
1da177e4
LT
1083 struct nfs_writeargs *argp = &data->args;
1084 struct nfs_writeres *resp = &data->res;
788e7a89 1085 int status;
1da177e4 1086
a3f565b1 1087 dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1da177e4
LT
1088 task->tk_pid, task->tk_status);
1089
f551e44f
CL
1090 /*
1091 * ->write_done will attempt to use post-op attributes to detect
1092 * conflicting writes by other clients. A strict interpretation
1093 * of close-to-open would allow us to continue caching even if
1094 * another writer had changed the file, but some applications
1095 * depend on tighter cache coherency when writing.
1096 */
788e7a89
TM
1097 status = NFS_PROTO(data->inode)->write_done(task, data);
1098 if (status != 0)
1099 return status;
91d5b470
CL
1100 nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1101
1da177e4
LT
1102#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1103 if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1104 /* We tried a write call, but the server did not
1105 * commit data to stable storage even though we
1106 * requested it.
1107 * Note: There is a known bug in Tru64 < 5.0 in which
1108 * the server reports NFS_DATA_SYNC, but performs
1109 * NFS_FILE_SYNC. We therefore implement this checking
1110 * as a dprintk() in order to avoid filling syslog.
1111 */
1112 static unsigned long complain;
1113
1114 if (time_before(complain, jiffies)) {
1115 dprintk("NFS: faulty NFS server %s:"
1116 " (committed = %d) != (stable = %d)\n",
54ceac45 1117 NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1da177e4
LT
1118 resp->verf->committed, argp->stable);
1119 complain = jiffies + 300 * HZ;
1120 }
1121 }
1122#endif
1123 /* Is this a short write? */
1124 if (task->tk_status >= 0 && resp->count < argp->count) {
1125 static unsigned long complain;
1126
91d5b470
CL
1127 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1128
1da177e4
LT
1129 /* Has the server at least made some progress? */
1130 if (resp->count != 0) {
1131 /* Was this an NFSv2 write or an NFSv3 stable write? */
1132 if (resp->verf->committed != NFS_UNSTABLE) {
1133 /* Resend from where the server left off */
1134 argp->offset += resp->count;
1135 argp->pgbase += resp->count;
1136 argp->count -= resp->count;
1137 } else {
1138 /* Resend as a stable write in order to avoid
1139 * headaches in the case of a server crash.
1140 */
1141 argp->stable = NFS_FILE_SYNC;
1142 }
1143 rpc_restart_call(task);
788e7a89 1144 return -EAGAIN;
1da177e4
LT
1145 }
1146 if (time_before(complain, jiffies)) {
1147 printk(KERN_WARNING
1148 "NFS: Server wrote zero bytes, expected %u.\n",
1149 argp->count);
1150 complain = jiffies + 300 * HZ;
1151 }
1152 /* Can't do anything about it except throw an error. */
1153 task->tk_status = -EIO;
1154 }
788e7a89 1155 return 0;
1da177e4
LT
1156}
1157
1158
1159#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
963d8fe5 1160void nfs_commit_release(void *wdata)
1da177e4 1161{
1da177e4
LT
1162 nfs_commit_free(wdata);
1163}
1164
1165/*
1166 * Set up the argument/result storage required for the RPC call.
1167 */
1168static void nfs_commit_rpcsetup(struct list_head *head,
788e7a89
TM
1169 struct nfs_write_data *data,
1170 int how)
1da177e4 1171{
3da28eb1 1172 struct nfs_page *first;
1da177e4 1173 struct inode *inode;
788e7a89 1174 int flags;
1da177e4
LT
1175
1176 /* Set up the RPC argument and reply structs
1177 * NB: take care not to mess about with data->commit et al. */
1178
1179 list_splice_init(head, &data->pages);
1180 first = nfs_list_entry(data->pages.next);
1da177e4
LT
1181 inode = first->wb_context->dentry->d_inode;
1182
1da177e4
LT
1183 data->inode = inode;
1184 data->cred = first->wb_context->cred;
1185
1186 data->args.fh = NFS_FH(data->inode);
3da28eb1
TM
1187 /* Note: we always request a commit of the entire inode */
1188 data->args.offset = 0;
1189 data->args.count = 0;
1190 data->res.count = 0;
1da177e4
LT
1191 data->res.fattr = &data->fattr;
1192 data->res.verf = &data->verf;
0e574af1 1193 nfs_fattr_init(&data->fattr);
788e7a89
TM
1194
1195 /* Set up the initial task struct. */
1196 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1197 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1da177e4
LT
1198 NFS_PROTO(inode)->commit_setup(data, how);
1199
1200 data->task.tk_priority = flush_task_priority(how);
1201 data->task.tk_cookie = (unsigned long)inode;
1da177e4 1202
a3f565b1 1203 dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1da177e4
LT
1204}
1205
1206/*
1207 * Commit dirty pages
1208 */
1209static int
40859d7e 1210nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1da177e4
LT
1211{
1212 struct nfs_write_data *data;
1213 struct nfs_page *req;
1214
e9f7bee1 1215 data = nfs_commit_alloc();
1da177e4
LT
1216
1217 if (!data)
1218 goto out_bad;
1219
1220 /* Set up the argument struct */
1221 nfs_commit_rpcsetup(head, data, how);
1222
1223 nfs_execute_write(data);
1224 return 0;
1225 out_bad:
1226 while (!list_empty(head)) {
1227 req = nfs_list_entry(head->next);
1228 nfs_list_remove_request(req);
1229 nfs_mark_request_commit(req);
83715ad5 1230 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
5c2d97cb 1231 nfs_clear_page_writeback(req);
1da177e4
LT
1232 }
1233 return -ENOMEM;
1234}
1235
1236/*
1237 * COMMIT call returned
1238 */
788e7a89 1239static void nfs_commit_done(struct rpc_task *task, void *calldata)
1da177e4 1240{
963d8fe5 1241 struct nfs_write_data *data = calldata;
1da177e4 1242 struct nfs_page *req;
1da177e4 1243
a3f565b1 1244 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1da177e4
LT
1245 task->tk_pid, task->tk_status);
1246
788e7a89
TM
1247 /* Call the NFS version-specific code */
1248 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1249 return;
1250
1da177e4
LT
1251 while (!list_empty(&data->pages)) {
1252 req = nfs_list_entry(data->pages.next);
1253 nfs_list_remove_request(req);
612c9384 1254 clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
fd39fc85 1255 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1da177e4
LT
1256
1257 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1258 req->wb_context->dentry->d_inode->i_sb->s_id,
1259 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1260 req->wb_bytes,
1261 (long long)req_offset(req));
1262 if (task->tk_status < 0) {
1263 req->wb_context->error = task->tk_status;
1264 nfs_inode_remove_request(req);
1265 dprintk(", error = %d\n", task->tk_status);
1266 goto next;
1267 }
1268
1269 /* Okay, COMMIT succeeded, apparently. Check the verifier
1270 * returned by the server against all stored verfs. */
1271 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1272 /* We have a match */
1273 nfs_inode_remove_request(req);
1274 dprintk(" OK\n");
1275 goto next;
1276 }
1277 /* We have a mismatch. Write the page again */
1278 dprintk(" mismatch\n");
61822ab5 1279 nfs_redirty_request(req);
1da177e4 1280 next:
c6a556b8 1281 nfs_clear_page_writeback(req);
1da177e4 1282 }
1da177e4 1283}
788e7a89
TM
1284
1285static const struct rpc_call_ops nfs_commit_ops = {
1286 .rpc_call_done = nfs_commit_done,
1287 .rpc_release = nfs_commit_release,
1288};
c42de9dd
TM
1289#else
1290static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1291{
1292 return 0;
1293}
1da177e4
LT
1294#endif
1295
3f442547 1296static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
1da177e4 1297{
28c6925f 1298 struct nfs_inode *nfsi = NFS_I(mapping->host);
1da177e4 1299 LIST_HEAD(head);
3f442547 1300 long res;
1da177e4
LT
1301
1302 spin_lock(&nfsi->req_lock);
3f442547 1303 res = nfs_scan_dirty(mapping, wbc, &head);
1da177e4 1304 spin_unlock(&nfsi->req_lock);
ab0a3dbe 1305 if (res) {
28c6925f 1306 int error = nfs_flush_list(mapping->host, &head, res, how);
7d46a49f
TM
1307 if (error < 0)
1308 return error;
ab0a3dbe 1309 }
1da177e4
LT
1310 return res;
1311}
1312
1313#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
3da28eb1 1314int nfs_commit_inode(struct inode *inode, int how)
1da177e4
LT
1315{
1316 struct nfs_inode *nfsi = NFS_I(inode);
1317 LIST_HEAD(head);
7d46a49f 1318 int res;
1da177e4
LT
1319
1320 spin_lock(&nfsi->req_lock);
3da28eb1
TM
1321 res = nfs_scan_commit(inode, &head, 0, 0);
1322 spin_unlock(&nfsi->req_lock);
1da177e4 1323 if (res) {
7d46a49f 1324 int error = nfs_commit_list(inode, &head, how);
3da28eb1
TM
1325 if (error < 0)
1326 return error;
1327 }
1da177e4
LT
1328 return res;
1329}
1330#endif
1331
1c75950b 1332long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
1da177e4 1333{
1c75950b 1334 struct inode *inode = mapping->host;
c42de9dd 1335 struct nfs_inode *nfsi = NFS_I(inode);
1c75950b
TM
1336 unsigned long idx_start, idx_end;
1337 unsigned int npages = 0;
c42de9dd 1338 LIST_HEAD(head);
70b9ecbd 1339 int nocommit = how & FLUSH_NOCOMMIT;
3f442547 1340 long pages, ret;
1da177e4 1341
1c75950b
TM
1342 /* FIXME */
1343 if (wbc->range_cyclic)
1344 idx_start = 0;
1345 else {
1346 idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
1347 idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
1348 if (idx_end > idx_start) {
1349 unsigned long l_npages = 1 + idx_end - idx_start;
1350 npages = l_npages;
1351 if (sizeof(npages) != sizeof(l_npages) &&
1352 (unsigned long)npages != l_npages)
1353 npages = 0;
1354 }
1355 }
c42de9dd
TM
1356 how &= ~FLUSH_NOCOMMIT;
1357 spin_lock(&nfsi->req_lock);
1da177e4 1358 do {
1c75950b 1359 wbc->pages_skipped = 0;
c42de9dd
TM
1360 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1361 if (ret != 0)
70b9ecbd 1362 continue;
1c75950b 1363 pages = nfs_scan_dirty(mapping, wbc, &head);
c42de9dd
TM
1364 if (pages != 0) {
1365 spin_unlock(&nfsi->req_lock);
e8e058e8 1366 if (how & FLUSH_INVALIDATE) {
83715ad5 1367 nfs_cancel_dirty_list(&head);
e8e058e8
TM
1368 ret = pages;
1369 } else
d2ccddf0 1370 ret = nfs_flush_list(inode, &head, pages, how);
c42de9dd
TM
1371 spin_lock(&nfsi->req_lock);
1372 continue;
1373 }
1c75950b
TM
1374 if (wbc->pages_skipped != 0)
1375 continue;
c42de9dd
TM
1376 if (nocommit)
1377 break;
d2ccddf0 1378 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1c75950b
TM
1379 if (pages == 0) {
1380 if (wbc->pages_skipped != 0)
1381 continue;
c42de9dd 1382 break;
1c75950b 1383 }
d2ccddf0
TM
1384 if (how & FLUSH_INVALIDATE) {
1385 spin_unlock(&nfsi->req_lock);
83715ad5 1386 nfs_cancel_commit_list(&head);
e8e058e8 1387 ret = pages;
d2ccddf0
TM
1388 spin_lock(&nfsi->req_lock);
1389 continue;
1390 }
1391 pages += nfs_scan_commit(inode, &head, 0, 0);
c42de9dd
TM
1392 spin_unlock(&nfsi->req_lock);
1393 ret = nfs_commit_list(inode, &head, how);
1394 spin_lock(&nfsi->req_lock);
1395 } while (ret >= 0);
1396 spin_unlock(&nfsi->req_lock);
1397 return ret;
1da177e4
LT
1398}
1399
1c75950b
TM
1400/*
1401 * flush the inode to disk.
1402 */
1403int nfs_wb_all(struct inode *inode)
1404{
1405 struct address_space *mapping = inode->i_mapping;
1406 struct writeback_control wbc = {
1407 .bdi = mapping->backing_dev_info,
1408 .sync_mode = WB_SYNC_ALL,
1409 .nr_to_write = LONG_MAX,
61822ab5 1410 .for_writepages = 1,
1c75950b
TM
1411 .range_cyclic = 1,
1412 };
1413 int ret;
1414
61822ab5
TM
1415 ret = generic_writepages(mapping, &wbc);
1416 if (ret < 0)
1417 goto out;
1c75950b
TM
1418 ret = nfs_sync_mapping_wait(mapping, &wbc, 0);
1419 if (ret >= 0)
1420 return 0;
61822ab5 1421out:
e507d9eb 1422 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1c75950b
TM
1423 return ret;
1424}
1425
1426int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how)
1427{
1428 struct writeback_control wbc = {
1429 .bdi = mapping->backing_dev_info,
1430 .sync_mode = WB_SYNC_ALL,
1431 .nr_to_write = LONG_MAX,
1432 .range_start = range_start,
1433 .range_end = range_end,
61822ab5 1434 .for_writepages = 1,
1c75950b
TM
1435 };
1436 int ret;
1437
61822ab5
TM
1438 if (!(how & FLUSH_NOWRITEPAGE)) {
1439 ret = generic_writepages(mapping, &wbc);
1440 if (ret < 0)
1441 goto out;
1442 }
1c75950b
TM
1443 ret = nfs_sync_mapping_wait(mapping, &wbc, how);
1444 if (ret >= 0)
1445 return 0;
61822ab5 1446out:
e507d9eb 1447 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1c75950b
TM
1448 return ret;
1449}
1450
61822ab5 1451int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
1c75950b
TM
1452{
1453 loff_t range_start = page_offset(page);
1454 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
4d770ccf
TM
1455 struct writeback_control wbc = {
1456 .bdi = page->mapping->backing_dev_info,
1457 .sync_mode = WB_SYNC_ALL,
1458 .nr_to_write = LONG_MAX,
1459 .range_start = range_start,
1460 .range_end = range_end,
1461 };
1462 int ret;
1c75950b 1463
4d770ccf
TM
1464 BUG_ON(!PageLocked(page));
1465 if (!(how & FLUSH_NOWRITEPAGE) && clear_page_dirty_for_io(page)) {
1466 ret = nfs_writepage_locked(page, &wbc);
1467 if (ret < 0)
1468 goto out;
1469 }
f40313ac
TM
1470 if (!PagePrivate(page))
1471 return 0;
4d770ccf
TM
1472 ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
1473 if (ret >= 0)
1474 return 0;
1475out:
e507d9eb 1476 __mark_inode_dirty(inode, I_DIRTY_PAGES);
4d770ccf 1477 return ret;
1c75950b
TM
1478}
1479
1480/*
1481 * Write back all requests on one page - we do this before reading it.
1482 */
1483int nfs_wb_page(struct inode *inode, struct page* page)
1484{
4d770ccf 1485 return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
1c75950b
TM
1486}
1487
1a54533e
TM
1488int nfs_set_page_dirty(struct page *page)
1489{
d585158b
TM
1490 struct address_space *mapping = page->mapping;
1491 struct inode *inode;
1492 spinlock_t *req_lock;
1a54533e 1493 struct nfs_page *req;
2b82f190 1494 int ret;
1a54533e 1495
d585158b
TM
1496 if (!mapping)
1497 goto out_raced;
1498 inode = mapping->host;
1499 if (!inode)
1500 goto out_raced;
1501 req_lock = &NFS_I(inode)->req_lock;
2b82f190
TM
1502 spin_lock(req_lock);
1503 req = nfs_page_find_request_locked(page);
1a54533e
TM
1504 if (req != NULL) {
1505 /* Mark any existing write requests for flushing */
2b82f190
TM
1506 ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
1507 spin_unlock(req_lock);
1a54533e 1508 nfs_release_request(req);
2b82f190 1509 return ret;
1a54533e 1510 }
2b82f190
TM
1511 ret = __set_page_dirty_nobuffers(page);
1512 spin_unlock(req_lock);
1513 return ret;
d585158b
TM
1514out_raced:
1515 return !TestSetPageDirty(page);
1a54533e
TM
1516}
1517
1c75950b 1518
f7b422b1 1519int __init nfs_init_writepagecache(void)
1da177e4
LT
1520{
1521 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1522 sizeof(struct nfs_write_data),
1523 0, SLAB_HWCACHE_ALIGN,
1524 NULL, NULL);
1525 if (nfs_wdata_cachep == NULL)
1526 return -ENOMEM;
1527
93d2341c
MD
1528 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1529 nfs_wdata_cachep);
1da177e4
LT
1530 if (nfs_wdata_mempool == NULL)
1531 return -ENOMEM;
1532
93d2341c
MD
1533 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1534 nfs_wdata_cachep);
1da177e4
LT
1535 if (nfs_commit_mempool == NULL)
1536 return -ENOMEM;
1537
89a09141
PZ
1538 /*
1539 * NFS congestion size, scale with available memory.
1540 *
1541 * 64MB: 8192k
1542 * 128MB: 11585k
1543 * 256MB: 16384k
1544 * 512MB: 23170k
1545 * 1GB: 32768k
1546 * 2GB: 46340k
1547 * 4GB: 65536k
1548 * 8GB: 92681k
1549 * 16GB: 131072k
1550 *
1551 * This allows larger machines to have larger/more transfers.
1552 * Limit the default to 256M
1553 */
1554 nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1555 if (nfs_congestion_kb > 256*1024)
1556 nfs_congestion_kb = 256*1024;
1557
1da177e4
LT
1558 return 0;
1559}
1560
266bee88 1561void nfs_destroy_writepagecache(void)
1da177e4
LT
1562{
1563 mempool_destroy(nfs_commit_mempool);
1564 mempool_destroy(nfs_wdata_mempool);
1a1d92c1 1565 kmem_cache_destroy(nfs_wdata_cachep);
1da177e4
LT
1566}
1567