Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/nfs/pagelist.c | |
3 | * | |
4 | * A set of helper functions for managing NFS read and write requests. | |
5 | * The main purpose of these routines is to provide support for the | |
6 | * coalescing of several requests into a single RPC call. | |
7 | * | |
8 | * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no> | |
9 | * | |
10 | */ | |
11 | ||
1da177e4 LT |
12 | #include <linux/slab.h> |
13 | #include <linux/file.h> | |
14 | #include <linux/sunrpc/clnt.h> | |
15 | #include <linux/nfs3.h> | |
16 | #include <linux/nfs4.h> | |
17 | #include <linux/nfs_page.h> | |
18 | #include <linux/nfs_fs.h> | |
19 | #include <linux/nfs_mount.h> | |
3f442547 | 20 | #include <linux/writeback.h> |
1da177e4 LT |
21 | |
22 | #define NFS_PARANOIA 1 | |
23 | ||
e18b890b | 24 | static struct kmem_cache *nfs_page_cachep; |
1da177e4 LT |
25 | |
26 | static inline struct nfs_page * | |
27 | nfs_page_alloc(void) | |
28 | { | |
29 | struct nfs_page *p; | |
e94b1766 | 30 | p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL); |
1da177e4 LT |
31 | if (p) { |
32 | memset(p, 0, sizeof(*p)); | |
33 | INIT_LIST_HEAD(&p->wb_list); | |
34 | } | |
35 | return p; | |
36 | } | |
37 | ||
38 | static inline void | |
39 | nfs_page_free(struct nfs_page *p) | |
40 | { | |
41 | kmem_cache_free(nfs_page_cachep, p); | |
42 | } | |
43 | ||
44 | /** | |
45 | * nfs_create_request - Create an NFS read/write request. | |
46 | * @file: file descriptor to use | |
47 | * @inode: inode to which the request is attached | |
48 | * @page: page to write | |
49 | * @offset: starting offset within the page for the write | |
50 | * @count: number of bytes to read/write | |
51 | * | |
52 | * The page must be locked by the caller. This makes sure we never | |
53 | * create two different requests for the same page, and avoids | |
54 | * a possible deadlock when we reach the hard limit on the number | |
55 | * of dirty pages. | |
56 | * User should ensure it is safe to sleep in this function. | |
57 | */ | |
58 | struct nfs_page * | |
59 | nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, | |
60 | struct page *page, | |
61 | unsigned int offset, unsigned int count) | |
62 | { | |
63 | struct nfs_server *server = NFS_SERVER(inode); | |
64 | struct nfs_page *req; | |
65 | ||
66 | /* Deal with hard limits. */ | |
67 | for (;;) { | |
68 | /* try to allocate the request struct */ | |
69 | req = nfs_page_alloc(); | |
70 | if (req != NULL) | |
71 | break; | |
72 | ||
73 | /* Try to free up at least one request in order to stay | |
74 | * below the hard limit | |
75 | */ | |
76 | if (signalled() && (server->flags & NFS_MOUNT_INTR)) | |
77 | return ERR_PTR(-ERESTARTSYS); | |
78 | yield(); | |
79 | } | |
80 | ||
81 | /* Initialize the request struct. Initially, we assume a | |
82 | * long write-back delay. This will be adjusted in | |
83 | * update_nfs_request below if the region is not locked. */ | |
84 | req->wb_page = page; | |
85 | atomic_set(&req->wb_complete, 0); | |
86 | req->wb_index = page->index; | |
87 | page_cache_get(page); | |
cd52ed35 TM |
88 | BUG_ON(PagePrivate(page)); |
89 | BUG_ON(!PageLocked(page)); | |
90 | BUG_ON(page->mapping->host != inode); | |
1da177e4 LT |
91 | req->wb_offset = offset; |
92 | req->wb_pgbase = offset; | |
93 | req->wb_bytes = count; | |
94 | atomic_set(&req->wb_count, 1); | |
95 | req->wb_context = get_nfs_open_context(ctx); | |
96 | ||
97 | return req; | |
98 | } | |
99 | ||
100 | /** | |
101 | * nfs_unlock_request - Unlock request and wake up sleepers. | |
102 | * @req: | |
103 | */ | |
104 | void nfs_unlock_request(struct nfs_page *req) | |
105 | { | |
106 | if (!NFS_WBACK_BUSY(req)) { | |
107 | printk(KERN_ERR "NFS: Invalid unlock attempted\n"); | |
108 | BUG(); | |
109 | } | |
110 | smp_mb__before_clear_bit(); | |
111 | clear_bit(PG_BUSY, &req->wb_flags); | |
112 | smp_mb__after_clear_bit(); | |
464a98bd | 113 | wake_up_bit(&req->wb_flags, PG_BUSY); |
1da177e4 LT |
114 | nfs_release_request(req); |
115 | } | |
116 | ||
c6a556b8 TM |
117 | /** |
118 | * nfs_set_page_writeback_locked - Lock a request for writeback | |
119 | * @req: | |
120 | */ | |
121 | int nfs_set_page_writeback_locked(struct nfs_page *req) | |
122 | { | |
123 | struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); | |
124 | ||
125 | if (!nfs_lock_request(req)) | |
126 | return 0; | |
127 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); | |
128 | return 1; | |
129 | } | |
130 | ||
131 | /** | |
132 | * nfs_clear_page_writeback - Unlock request and wake up sleepers | |
133 | */ | |
134 | void nfs_clear_page_writeback(struct nfs_page *req) | |
135 | { | |
136 | struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); | |
137 | ||
deb7d638 TM |
138 | if (req->wb_page != NULL) { |
139 | spin_lock(&nfsi->req_lock); | |
140 | radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); | |
141 | spin_unlock(&nfsi->req_lock); | |
142 | } | |
c6a556b8 TM |
143 | nfs_unlock_request(req); |
144 | } | |
145 | ||
1da177e4 LT |
146 | /** |
147 | * nfs_clear_request - Free up all resources allocated to the request | |
148 | * @req: | |
149 | * | |
150 | * Release page resources associated with a write request after it | |
151 | * has completed. | |
152 | */ | |
153 | void nfs_clear_request(struct nfs_page *req) | |
154 | { | |
cd52ed35 TM |
155 | struct page *page = req->wb_page; |
156 | if (page != NULL) { | |
cd52ed35 | 157 | page_cache_release(page); |
1da177e4 LT |
158 | req->wb_page = NULL; |
159 | } | |
160 | } | |
161 | ||
162 | ||
163 | /** | |
164 | * nfs_release_request - Release the count on an NFS read/write request | |
165 | * @req: request to release | |
166 | * | |
167 | * Note: Should never be called with the spinlock held! | |
168 | */ | |
169 | void | |
170 | nfs_release_request(struct nfs_page *req) | |
171 | { | |
172 | if (!atomic_dec_and_test(&req->wb_count)) | |
173 | return; | |
174 | ||
175 | #ifdef NFS_PARANOIA | |
176 | BUG_ON (!list_empty(&req->wb_list)); | |
177 | BUG_ON (NFS_WBACK_BUSY(req)); | |
178 | #endif | |
179 | ||
180 | /* Release struct file or cached credential */ | |
181 | nfs_clear_request(req); | |
182 | put_nfs_open_context(req->wb_context); | |
183 | nfs_page_free(req); | |
184 | } | |
185 | ||
464a98bd TM |
186 | static int nfs_wait_bit_interruptible(void *word) |
187 | { | |
188 | int ret = 0; | |
189 | ||
190 | if (signal_pending(current)) | |
191 | ret = -ERESTARTSYS; | |
192 | else | |
193 | schedule(); | |
194 | return ret; | |
195 | } | |
196 | ||
1da177e4 LT |
197 | /** |
198 | * nfs_wait_on_request - Wait for a request to complete. | |
199 | * @req: request to wait upon. | |
200 | * | |
201 | * Interruptible by signals only if mounted with intr flag. | |
202 | * The user is responsible for holding a count on the request. | |
203 | */ | |
204 | int | |
205 | nfs_wait_on_request(struct nfs_page *req) | |
206 | { | |
464a98bd TM |
207 | struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode); |
208 | sigset_t oldmask; | |
209 | int ret = 0; | |
210 | ||
211 | if (!test_bit(PG_BUSY, &req->wb_flags)) | |
212 | goto out; | |
213 | /* | |
214 | * Note: the call to rpc_clnt_sigmask() suffices to ensure that we | |
215 | * are not interrupted if intr flag is not set | |
216 | */ | |
217 | rpc_clnt_sigmask(clnt, &oldmask); | |
218 | ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY, | |
219 | nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE); | |
220 | rpc_clnt_sigunmask(clnt, &oldmask); | |
221 | out: | |
222 | return ret; | |
1da177e4 LT |
223 | } |
224 | ||
225 | /** | |
d8a5ad75 TM |
226 | * nfs_pageio_init - initialise a page io descriptor |
227 | * @desc: pointer to descriptor | |
228 | * @iosize: io block size | |
229 | */ | |
230 | void nfs_pageio_init(struct nfs_pageio_descriptor *desc, unsigned int bsize) | |
231 | { | |
232 | INIT_LIST_HEAD(&desc->pg_list); | |
233 | desc->pg_count = 0; | |
234 | desc->pg_bsize = bsize; | |
235 | desc->pg_base = 0; | |
236 | } | |
237 | ||
238 | /** | |
239 | * nfs_can_coalesce_requests - test two requests for compatibility | |
240 | * @prev: pointer to nfs_page | |
241 | * @req: pointer to nfs_page | |
242 | * | |
243 | * The nfs_page structures 'prev' and 'req' are compared to ensure that the | |
244 | * page data area they describe is contiguous, and that their RPC | |
245 | * credentials, NFSv4 open state, and lockowners are the same. | |
246 | * | |
247 | * Return 'true' if this is the case, else return 'false'. | |
248 | */ | |
249 | static int nfs_can_coalesce_requests(struct nfs_page *prev, | |
250 | struct nfs_page *req) | |
251 | { | |
252 | if (req->wb_context->cred != prev->wb_context->cred) | |
253 | return 0; | |
254 | if (req->wb_context->lockowner != prev->wb_context->lockowner) | |
255 | return 0; | |
256 | if (req->wb_context->state != prev->wb_context->state) | |
257 | return 0; | |
258 | if (req->wb_index != (prev->wb_index + 1)) | |
259 | return 0; | |
260 | if (req->wb_pgbase != 0) | |
261 | return 0; | |
262 | if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) | |
263 | return 0; | |
264 | return 1; | |
265 | } | |
266 | ||
267 | /** | |
268 | * nfs_pageio_add_request - Attempt to coalesce a request into a page list. | |
269 | * @desc: destination io descriptor | |
270 | * @req: request | |
271 | * | |
272 | * Returns true if the request 'req' was successfully coalesced into the | |
273 | * existing list of pages 'desc'. | |
274 | */ | |
275 | static int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, | |
276 | struct nfs_page *req) | |
277 | { | |
278 | size_t newlen = req->wb_bytes; | |
279 | ||
280 | if (desc->pg_count != 0) { | |
281 | struct nfs_page *prev; | |
282 | ||
283 | /* | |
284 | * FIXME: ideally we should be able to coalesce all requests | |
285 | * that are not block boundary aligned, but currently this | |
286 | * is problematic for the case of bsize < PAGE_CACHE_SIZE, | |
287 | * since nfs_flush_multi and nfs_pagein_multi assume you | |
288 | * can have only one struct nfs_page. | |
289 | */ | |
290 | newlen += desc->pg_count; | |
291 | if (desc->pg_base + newlen > desc->pg_bsize) | |
292 | return 0; | |
293 | prev = nfs_list_entry(desc->pg_list.prev); | |
294 | if (!nfs_can_coalesce_requests(prev, req)) | |
295 | return 0; | |
296 | } else | |
297 | desc->pg_base = req->wb_pgbase; | |
298 | nfs_list_remove_request(req); | |
299 | nfs_list_add_request(req, &desc->pg_list); | |
300 | desc->pg_count = newlen; | |
301 | return 1; | |
302 | } | |
303 | ||
304 | /** | |
305 | * nfs_pageio_add_list - Split coalesced requests out from a list. | |
306 | * @desc: destination io descriptor | |
1da177e4 | 307 | * @head: source list |
1da177e4 LT |
308 | * |
309 | * Moves a maximum of 'nmax' elements from one list to another. | |
310 | * The elements are checked to ensure that they form a contiguous set | |
311 | * of pages, and that the RPC credentials are the same. | |
312 | */ | |
d8a5ad75 TM |
313 | void nfs_pageio_add_list(struct nfs_pageio_descriptor *desc, |
314 | struct list_head *head) | |
1da177e4 | 315 | { |
1da177e4 | 316 | while (!list_empty(head)) { |
d8a5ad75 TM |
317 | struct nfs_page *req = nfs_list_entry(head->next); |
318 | if (!nfs_pageio_add_request(desc, req)) | |
1da177e4 LT |
319 | break; |
320 | } | |
1da177e4 LT |
321 | } |
322 | ||
3da28eb1 TM |
323 | #define NFS_SCAN_MAXENTRIES 16 |
324 | /** | |
3f442547 TM |
325 | * nfs_scan_dirty - Scan the radix tree for dirty requests |
326 | * @mapping: pointer to address space | |
327 | * @wbc: writeback_control structure | |
3da28eb1 | 328 | * @dst: Destination list |
3da28eb1 TM |
329 | * |
330 | * Moves elements from one of the inode request lists. | |
331 | * If the number of requests is set to 0, the entire address_space | |
332 | * starting at index idx_start, is scanned. | |
333 | * The requests are *not* checked to ensure that they form a contiguous set. | |
334 | * You must be holding the inode's req_lock when calling this function | |
335 | */ | |
3f442547 TM |
336 | long nfs_scan_dirty(struct address_space *mapping, |
337 | struct writeback_control *wbc, | |
338 | struct list_head *dst) | |
3da28eb1 | 339 | { |
3f442547 | 340 | struct nfs_inode *nfsi = NFS_I(mapping->host); |
3da28eb1 TM |
341 | struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; |
342 | struct nfs_page *req; | |
3f442547 | 343 | pgoff_t idx_start, idx_end; |
3f442547 | 344 | long res = 0; |
3da28eb1 | 345 | int found, i; |
3da28eb1 | 346 | |
9cf85e0a | 347 | if (nfsi->ndirty == 0) |
3f442547 TM |
348 | return 0; |
349 | if (wbc->range_cyclic) { | |
350 | idx_start = 0; | |
351 | idx_end = ULONG_MAX; | |
352 | } else if (wbc->range_end == 0) { | |
353 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; | |
354 | idx_end = ULONG_MAX; | |
355 | } else { | |
356 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; | |
357 | idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; | |
358 | } | |
3da28eb1 TM |
359 | |
360 | for (;;) { | |
3f442547 TM |
361 | unsigned int toscan = NFS_SCAN_MAXENTRIES; |
362 | ||
3da28eb1 | 363 | found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, |
3f442547 | 364 | (void **)&pgvec[0], idx_start, toscan, |
3da28eb1 | 365 | NFS_PAGE_TAG_DIRTY); |
3f442547 TM |
366 | |
367 | /* Did we make progress? */ | |
3da28eb1 TM |
368 | if (found <= 0) |
369 | break; | |
3f442547 | 370 | |
3da28eb1 TM |
371 | for (i = 0; i < found; i++) { |
372 | req = pgvec[i]; | |
3f442547 | 373 | if (!wbc->range_cyclic && req->wb_index > idx_end) |
3da28eb1 TM |
374 | goto out; |
375 | ||
3f442547 TM |
376 | /* Try to lock request and mark it for writeback */ |
377 | if (!nfs_set_page_writeback_locked(req)) | |
378 | goto next; | |
379 | radix_tree_tag_clear(&nfsi->nfs_page_tree, | |
380 | req->wb_index, NFS_PAGE_TAG_DIRTY); | |
381 | nfsi->ndirty--; | |
382 | nfs_list_remove_request(req); | |
383 | nfs_list_add_request(req, dst); | |
3f442547 TM |
384 | res++; |
385 | if (res == LONG_MAX) | |
386 | goto out; | |
3f442547 TM |
387 | next: |
388 | idx_start = req->wb_index + 1; | |
3da28eb1 TM |
389 | } |
390 | } | |
391 | out: | |
3f442547 | 392 | WARN_ON ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)); |
3da28eb1 TM |
393 | return res; |
394 | } | |
395 | ||
1da177e4 LT |
396 | /** |
397 | * nfs_scan_list - Scan a list for matching requests | |
d2ccddf0 | 398 | * @nfsi: NFS inode |
1da177e4 LT |
399 | * @head: One of the NFS inode request lists |
400 | * @dst: Destination list | |
401 | * @idx_start: lower bound of page->index to scan | |
402 | * @npages: idx_start + npages sets the upper bound to scan. | |
403 | * | |
404 | * Moves elements from one of the inode request lists. | |
405 | * If the number of requests is set to 0, the entire address_space | |
406 | * starting at index idx_start, is scanned. | |
407 | * The requests are *not* checked to ensure that they form a contiguous set. | |
408 | * You must be holding the inode's req_lock when calling this function | |
409 | */ | |
d2ccddf0 TM |
410 | int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, |
411 | struct list_head *dst, unsigned long idx_start, | |
412 | unsigned int npages) | |
1da177e4 | 413 | { |
d2ccddf0 TM |
414 | struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; |
415 | struct nfs_page *req; | |
416 | unsigned long idx_end; | |
417 | int found, i; | |
418 | int res; | |
1da177e4 LT |
419 | |
420 | res = 0; | |
421 | if (npages == 0) | |
422 | idx_end = ~0; | |
423 | else | |
424 | idx_end = idx_start + npages - 1; | |
425 | ||
d2ccddf0 TM |
426 | for (;;) { |
427 | found = radix_tree_gang_lookup(&nfsi->nfs_page_tree, | |
428 | (void **)&pgvec[0], idx_start, | |
429 | NFS_SCAN_MAXENTRIES); | |
430 | if (found <= 0) | |
1da177e4 | 431 | break; |
d2ccddf0 TM |
432 | for (i = 0; i < found; i++) { |
433 | req = pgvec[i]; | |
434 | if (req->wb_index > idx_end) | |
435 | goto out; | |
436 | idx_start = req->wb_index + 1; | |
437 | if (req->wb_list_head != head) | |
438 | continue; | |
439 | if (nfs_set_page_writeback_locked(req)) { | |
440 | nfs_list_remove_request(req); | |
441 | nfs_list_add_request(req, dst); | |
442 | res++; | |
443 | } | |
444 | } | |
1da177e4 | 445 | |
1da177e4 | 446 | } |
d2ccddf0 | 447 | out: |
1da177e4 LT |
448 | return res; |
449 | } | |
450 | ||
f7b422b1 | 451 | int __init nfs_init_nfspagecache(void) |
1da177e4 LT |
452 | { |
453 | nfs_page_cachep = kmem_cache_create("nfs_page", | |
454 | sizeof(struct nfs_page), | |
455 | 0, SLAB_HWCACHE_ALIGN, | |
456 | NULL, NULL); | |
457 | if (nfs_page_cachep == NULL) | |
458 | return -ENOMEM; | |
459 | ||
460 | return 0; | |
461 | } | |
462 | ||
266bee88 | 463 | void nfs_destroy_nfspagecache(void) |
1da177e4 | 464 | { |
1a1d92c1 | 465 | kmem_cache_destroy(nfs_page_cachep); |
1da177e4 LT |
466 | } |
467 |