NFSv4.1: Send lseg down into nfs_write_rpcsetup
[linux-2.6-block.git] / fs / nfs / pnfs.c
CommitLineData
85e174ba
RL
1/*
2 * pNFS functions to call and manage layout drivers.
3 *
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
6 * All Rights Reserved
7 *
8 * Dean Hildebrand <dhildebz@umich.edu>
9 *
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
18 *
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
27 * such damages.
28 */
29
30#include <linux/nfs_fs.h>
974cec8c 31#include "internal.h"
85e174ba 32#include "pnfs.h"
64419a9b 33#include "iostat.h"
85e174ba
RL
34
35#define NFSDBG_FACILITY NFSDBG_PNFS
36
02c35fca
FI
37/* Locking:
38 *
39 * pnfs_spinlock:
40 * protects pnfs_modules_tbl.
41 */
42static DEFINE_SPINLOCK(pnfs_spinlock);
43
44/*
45 * pnfs_modules_tbl holds all pnfs modules
46 */
47static LIST_HEAD(pnfs_modules_tbl);
48
49/* Return the registered pnfs layout driver module matching given id */
50static struct pnfs_layoutdriver_type *
51find_pnfs_driver_locked(u32 id)
52{
53 struct pnfs_layoutdriver_type *local;
54
55 list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
56 if (local->id == id)
57 goto out;
58 local = NULL;
59out:
60 dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
61 return local;
62}
63
85e174ba
RL
64static struct pnfs_layoutdriver_type *
65find_pnfs_driver(u32 id)
66{
02c35fca
FI
67 struct pnfs_layoutdriver_type *local;
68
69 spin_lock(&pnfs_spinlock);
70 local = find_pnfs_driver_locked(id);
71 spin_unlock(&pnfs_spinlock);
72 return local;
85e174ba
RL
73}
74
75void
76unset_pnfs_layoutdriver(struct nfs_server *nfss)
77{
ea8eecdd 78 if (nfss->pnfs_curr_ld)
02c35fca 79 module_put(nfss->pnfs_curr_ld->owner);
85e174ba
RL
80 nfss->pnfs_curr_ld = NULL;
81}
82
83/*
84 * Try to set the server's pnfs module to the pnfs layout type specified by id.
85 * Currently only one pNFS layout driver per filesystem is supported.
86 *
87 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
88 */
89void
90set_pnfs_layoutdriver(struct nfs_server *server, u32 id)
91{
92 struct pnfs_layoutdriver_type *ld_type = NULL;
93
94 if (id == 0)
95 goto out_no_driver;
96 if (!(server->nfs_client->cl_exchange_flags &
97 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
98 printk(KERN_ERR "%s: id %u cl_exchange_flags 0x%x\n", __func__,
99 id, server->nfs_client->cl_exchange_flags);
100 goto out_no_driver;
101 }
102 ld_type = find_pnfs_driver(id);
103 if (!ld_type) {
104 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
105 ld_type = find_pnfs_driver(id);
106 if (!ld_type) {
107 dprintk("%s: No pNFS module found for %u.\n",
108 __func__, id);
109 goto out_no_driver;
110 }
111 }
02c35fca
FI
112 if (!try_module_get(ld_type->owner)) {
113 dprintk("%s: Could not grab reference on module\n", __func__);
114 goto out_no_driver;
115 }
85e174ba 116 server->pnfs_curr_ld = ld_type;
ea8eecdd 117
85e174ba
RL
118 dprintk("%s: pNFS module for %u set\n", __func__, id);
119 return;
120
121out_no_driver:
122 dprintk("%s: Using NFSv4 I/O\n", __func__);
123 server->pnfs_curr_ld = NULL;
124}
02c35fca
FI
125
126int
127pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
128{
129 int status = -EINVAL;
130 struct pnfs_layoutdriver_type *tmp;
131
132 if (ld_type->id == 0) {
133 printk(KERN_ERR "%s id 0 is reserved\n", __func__);
134 return status;
135 }
b1f69b75
AA
136 if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
137 printk(KERN_ERR "%s Layout driver must provide "
138 "alloc_lseg and free_lseg.\n", __func__);
139 return status;
140 }
02c35fca
FI
141
142 spin_lock(&pnfs_spinlock);
143 tmp = find_pnfs_driver_locked(ld_type->id);
144 if (!tmp) {
145 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
146 status = 0;
147 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
148 ld_type->name);
149 } else {
150 printk(KERN_ERR "%s Module with id %d already loaded!\n",
151 __func__, ld_type->id);
152 }
153 spin_unlock(&pnfs_spinlock);
154
155 return status;
156}
157EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
158
159void
160pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
161{
162 dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
163 spin_lock(&pnfs_spinlock);
164 list_del(&ld_type->pnfs_tblid);
165 spin_unlock(&pnfs_spinlock);
166}
167EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
e5e94017 168
b1f69b75
AA
169/*
170 * pNFS client layout cache
171 */
172
cc6e5340 173/* Need to hold i_lock if caller does not already hold reference */
43f1b3da 174void
cc6e5340 175get_layout_hdr(struct pnfs_layout_hdr *lo)
e5e94017 176{
cc6e5340 177 atomic_inc(&lo->plh_refcount);
e5e94017
BH
178}
179
180static void
cc6e5340 181destroy_layout_hdr(struct pnfs_layout_hdr *lo)
e5e94017 182{
cc6e5340
FI
183 dprintk("%s: freeing layout cache %p\n", __func__, lo);
184 BUG_ON(!list_empty(&lo->plh_layouts));
185 NFS_I(lo->plh_inode)->layout = NULL;
186 kfree(lo);
187}
e5e94017 188
cc6e5340
FI
189static void
190put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
191{
192 if (atomic_dec_and_test(&lo->plh_refcount))
193 destroy_layout_hdr(lo);
e5e94017
BH
194}
195
b1f69b75 196void
cc6e5340 197put_layout_hdr(struct pnfs_layout_hdr *lo)
974cec8c 198{
cc6e5340
FI
199 struct inode *inode = lo->plh_inode;
200
201 if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
202 destroy_layout_hdr(lo);
203 spin_unlock(&inode->i_lock);
204 }
974cec8c
AA
205}
206
207static void
208init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
209{
566052c5 210 INIT_LIST_HEAD(&lseg->pls_list);
4541d16c
FI
211 atomic_set(&lseg->pls_refcount, 1);
212 smp_mb();
213 set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
566052c5 214 lseg->pls_layout = lo;
974cec8c
AA
215}
216
4541d16c 217static void free_lseg(struct pnfs_layout_segment *lseg)
974cec8c 218{
b7edfaa1 219 struct inode *ino = lseg->pls_layout->plh_inode;
974cec8c 220
b1f69b75 221 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
52fabd73 222 /* Matched by get_layout_hdr in pnfs_insert_layout */
cc6e5340 223 put_layout_hdr(NFS_I(ino)->layout);
974cec8c
AA
224}
225
d684d2ae
FI
226static void
227put_lseg_common(struct pnfs_layout_segment *lseg)
228{
229 struct inode *inode = lseg->pls_layout->plh_inode;
230
231 BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
232 list_del_init(&lseg->pls_list);
233 if (list_empty(&lseg->pls_layout->plh_segs)) {
234 set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
235 /* Matched by initial refcount set in alloc_init_layout_hdr */
236 put_layout_hdr_locked(lseg->pls_layout);
237 }
238 rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
239}
240
bae724ef 241void
d684d2ae 242put_lseg(struct pnfs_layout_segment *lseg)
974cec8c 243{
d684d2ae
FI
244 struct inode *inode;
245
246 if (!lseg)
247 return;
248
4541d16c
FI
249 dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
250 atomic_read(&lseg->pls_refcount),
251 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
d684d2ae
FI
252 inode = lseg->pls_layout->plh_inode;
253 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
254 LIST_HEAD(free_me);
4541d16c 255
d684d2ae
FI
256 put_lseg_common(lseg);
257 list_add(&lseg->pls_list, &free_me);
258 spin_unlock(&inode->i_lock);
259 pnfs_free_lseg_list(&free_me);
4541d16c 260 }
4541d16c 261}
974cec8c 262
4541d16c
FI
263static bool
264should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
265{
266 return (recall_iomode == IOMODE_ANY ||
267 lseg_iomode == recall_iomode);
974cec8c
AA
268}
269
4541d16c
FI
270/* Returns 1 if lseg is removed from list, 0 otherwise */
271static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
272 struct list_head *tmp_list)
273{
274 int rv = 0;
275
276 if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
277 /* Remove the reference keeping the lseg in the
278 * list. It will now be removed when all
279 * outstanding io is finished.
280 */
d684d2ae
FI
281 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
282 atomic_read(&lseg->pls_refcount));
283 if (atomic_dec_and_test(&lseg->pls_refcount)) {
284 put_lseg_common(lseg);
285 list_add(&lseg->pls_list, tmp_list);
286 rv = 1;
287 }
4541d16c
FI
288 }
289 return rv;
290}
291
292/* Returns count of number of matching invalid lsegs remaining in list
293 * after call.
294 */
43f1b3da 295int
4541d16c
FI
296mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
297 struct list_head *tmp_list,
298 u32 iomode)
974cec8c
AA
299{
300 struct pnfs_layout_segment *lseg, *next;
4541d16c 301 int invalid = 0, removed = 0;
974cec8c
AA
302
303 dprintk("%s:Begin lo %p\n", __func__, lo);
304
38511722
FI
305 if (list_empty(&lo->plh_segs)) {
306 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
307 put_layout_hdr_locked(lo);
308 return 0;
309 }
4541d16c
FI
310 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
311 if (should_free_lseg(lseg->pls_range.iomode, iomode)) {
312 dprintk("%s: freeing lseg %p iomode %d "
313 "offset %llu length %llu\n", __func__,
314 lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
315 lseg->pls_range.length);
316 invalid++;
317 removed += mark_lseg_invalid(lseg, tmp_list);
318 }
319 dprintk("%s:Return %i\n", __func__, invalid - removed);
320 return invalid - removed;
974cec8c
AA
321}
322
f49f9baa 323/* note free_me must contain lsegs from a single layout_hdr */
43f1b3da 324void
4541d16c 325pnfs_free_lseg_list(struct list_head *free_me)
974cec8c 326{
4541d16c 327 struct pnfs_layout_segment *lseg, *tmp;
f49f9baa
FI
328 struct pnfs_layout_hdr *lo;
329
330 if (list_empty(free_me))
331 return;
332
333 lo = list_first_entry(free_me, struct pnfs_layout_segment,
334 pls_list)->pls_layout;
974cec8c 335
f49f9baa
FI
336 if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
337 struct nfs_client *clp;
338
339 clp = NFS_SERVER(lo->plh_inode)->nfs_client;
340 spin_lock(&clp->cl_lock);
341 list_del_init(&lo->plh_layouts);
342 spin_unlock(&clp->cl_lock);
343 }
4541d16c 344 list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
566052c5 345 list_del(&lseg->pls_list);
4541d16c 346 free_lseg(lseg);
974cec8c
AA
347 }
348}
349
e5e94017
BH
350void
351pnfs_destroy_layout(struct nfs_inode *nfsi)
352{
353 struct pnfs_layout_hdr *lo;
974cec8c 354 LIST_HEAD(tmp_list);
e5e94017
BH
355
356 spin_lock(&nfsi->vfs_inode.i_lock);
357 lo = nfsi->layout;
358 if (lo) {
38511722 359 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
4541d16c 360 mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY);
e5e94017
BH
361 }
362 spin_unlock(&nfsi->vfs_inode.i_lock);
974cec8c
AA
363 pnfs_free_lseg_list(&tmp_list);
364}
365
366/*
367 * Called by the state manger to remove all layouts established under an
368 * expired lease.
369 */
370void
371pnfs_destroy_all_layouts(struct nfs_client *clp)
372{
373 struct pnfs_layout_hdr *lo;
374 LIST_HEAD(tmp_list);
375
376 spin_lock(&clp->cl_lock);
377 list_splice_init(&clp->cl_layouts, &tmp_list);
378 spin_unlock(&clp->cl_lock);
379
380 while (!list_empty(&tmp_list)) {
381 lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
b7edfaa1 382 plh_layouts);
974cec8c 383 dprintk("%s freeing layout for inode %lu\n", __func__,
b7edfaa1
FI
384 lo->plh_inode->i_ino);
385 pnfs_destroy_layout(NFS_I(lo->plh_inode));
974cec8c 386 }
e5e94017
BH
387}
388
fd6002e9 389/* update lo->plh_stateid with new if is more recent */
43f1b3da
FI
390void
391pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
392 bool update_barrier)
b1f69b75 393{
fd6002e9 394 u32 oldseq, newseq;
b1f69b75 395
fd6002e9
FI
396 oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid);
397 newseq = be32_to_cpu(new->stateid.seqid);
43f1b3da 398 if ((int)(newseq - oldseq) > 0) {
fd6002e9 399 memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid));
43f1b3da
FI
400 if (update_barrier) {
401 u32 new_barrier = be32_to_cpu(new->stateid.seqid);
402
403 if ((int)(new_barrier - lo->plh_barrier))
404 lo->plh_barrier = new_barrier;
405 } else {
406 /* Because of wraparound, we want to keep the barrier
407 * "close" to the current seqids. It needs to be
408 * within 2**31 to count as "behind", so if it
409 * gets too near that limit, give us a litle leeway
410 * and bring it to within 2**30.
411 * NOTE - and yes, this is all unsigned arithmetic.
412 */
413 if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
414 lo->plh_barrier = newseq - (1 << 30);
415 }
416 }
b1f69b75
AA
417}
418
cf7d63f1
FI
419/* lget is set to 1 if called from inside send_layoutget call chain */
420static bool
43f1b3da
FI
421pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
422 int lget)
423{
424 if ((stateid) &&
425 (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
426 return true;
f7e8917a 427 return lo->plh_block_lgets ||
38511722 428 test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
f7e8917a 429 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
43f1b3da 430 (list_empty(&lo->plh_segs) &&
cf7d63f1
FI
431 (atomic_read(&lo->plh_outstanding) > lget));
432}
433
fd6002e9
FI
434int
435pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
436 struct nfs4_state *open_state)
b1f69b75 437{
fd6002e9 438 int status = 0;
974cec8c 439
b1f69b75 440 dprintk("--> %s\n", __func__);
fd6002e9 441 spin_lock(&lo->plh_inode->i_lock);
43f1b3da 442 if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
cf7d63f1
FI
443 status = -EAGAIN;
444 } else if (list_empty(&lo->plh_segs)) {
fd6002e9
FI
445 int seq;
446
447 do {
448 seq = read_seqbegin(&open_state->seqlock);
449 memcpy(dst->data, open_state->stateid.data,
450 sizeof(open_state->stateid.data));
451 } while (read_seqretry(&open_state->seqlock, seq));
452 } else
453 memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data));
454 spin_unlock(&lo->plh_inode->i_lock);
b1f69b75 455 dprintk("<-- %s\n", __func__);
fd6002e9 456 return status;
b1f69b75
AA
457}
458
459/*
460* Get layout from server.
461* for now, assume that whole file layouts are requested.
462* arg->offset: 0
463* arg->length: all ones
464*/
e5e94017
BH
465static struct pnfs_layout_segment *
466send_layoutget(struct pnfs_layout_hdr *lo,
467 struct nfs_open_context *ctx,
468 u32 iomode)
469{
b7edfaa1 470 struct inode *ino = lo->plh_inode;
b1f69b75
AA
471 struct nfs_server *server = NFS_SERVER(ino);
472 struct nfs4_layoutget *lgp;
473 struct pnfs_layout_segment *lseg = NULL;
474
475 dprintk("--> %s\n", __func__);
e5e94017 476
b1f69b75
AA
477 BUG_ON(ctx == NULL);
478 lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
cf7d63f1 479 if (lgp == NULL)
b1f69b75 480 return NULL;
b1f69b75
AA
481 lgp->args.minlength = NFS4_MAX_UINT64;
482 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
483 lgp->args.range.iomode = iomode;
484 lgp->args.range.offset = 0;
485 lgp->args.range.length = NFS4_MAX_UINT64;
486 lgp->args.type = server->pnfs_curr_ld->id;
487 lgp->args.inode = ino;
488 lgp->args.ctx = get_nfs_open_context(ctx);
489 lgp->lsegpp = &lseg;
490
491 /* Synchronously retrieve layout information from server and
492 * store in lseg.
493 */
494 nfs4_proc_layoutget(lgp);
974cec8c 495 if (!lseg) {
b1f69b75 496 /* remember that LAYOUTGET failed and suspend trying */
566052c5 497 set_bit(lo_fail_bit(iomode), &lo->plh_flags);
974cec8c 498 }
974cec8c
AA
499 return lseg;
500}
501
f7e8917a
FI
502bool pnfs_roc(struct inode *ino)
503{
504 struct pnfs_layout_hdr *lo;
505 struct pnfs_layout_segment *lseg, *tmp;
506 LIST_HEAD(tmp_list);
507 bool found = false;
508
509 spin_lock(&ino->i_lock);
510 lo = NFS_I(ino)->layout;
511 if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
512 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
513 goto out_nolayout;
514 list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
515 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
516 mark_lseg_invalid(lseg, &tmp_list);
517 found = true;
518 }
519 if (!found)
520 goto out_nolayout;
521 lo->plh_block_lgets++;
522 get_layout_hdr(lo); /* matched in pnfs_roc_release */
523 spin_unlock(&ino->i_lock);
524 pnfs_free_lseg_list(&tmp_list);
525 return true;
526
527out_nolayout:
528 spin_unlock(&ino->i_lock);
529 return false;
530}
531
532void pnfs_roc_release(struct inode *ino)
533{
534 struct pnfs_layout_hdr *lo;
535
536 spin_lock(&ino->i_lock);
537 lo = NFS_I(ino)->layout;
538 lo->plh_block_lgets--;
539 put_layout_hdr_locked(lo);
540 spin_unlock(&ino->i_lock);
541}
542
543void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
544{
545 struct pnfs_layout_hdr *lo;
546
547 spin_lock(&ino->i_lock);
548 lo = NFS_I(ino)->layout;
549 if ((int)(barrier - lo->plh_barrier) > 0)
550 lo->plh_barrier = barrier;
551 spin_unlock(&ino->i_lock);
552}
553
554bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
555{
556 struct nfs_inode *nfsi = NFS_I(ino);
557 struct pnfs_layout_segment *lseg;
558 bool found = false;
559
560 spin_lock(&ino->i_lock);
561 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
562 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
563 found = true;
564 break;
565 }
566 if (!found) {
567 struct pnfs_layout_hdr *lo = nfsi->layout;
568 u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid);
569
570 /* Since close does not return a layout stateid for use as
571 * a barrier, we choose the worst-case barrier.
572 */
573 *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
574 }
575 spin_unlock(&ino->i_lock);
576 return found;
577}
578
b1f69b75
AA
579/*
580 * Compare two layout segments for sorting into layout cache.
581 * We want to preferentially return RW over RO layouts, so ensure those
582 * are seen first.
583 */
584static s64
585cmp_layout(u32 iomode1, u32 iomode2)
586{
587 /* read > read/write */
588 return (int)(iomode2 == IOMODE_READ) - (int)(iomode1 == IOMODE_READ);
589}
590
974cec8c
AA
591static void
592pnfs_insert_layout(struct pnfs_layout_hdr *lo,
593 struct pnfs_layout_segment *lseg)
594{
b1f69b75
AA
595 struct pnfs_layout_segment *lp;
596 int found = 0;
597
974cec8c
AA
598 dprintk("%s:Begin\n", __func__);
599
b7edfaa1 600 assert_spin_locked(&lo->plh_inode->i_lock);
b7edfaa1 601 list_for_each_entry(lp, &lo->plh_segs, pls_list) {
566052c5 602 if (cmp_layout(lp->pls_range.iomode, lseg->pls_range.iomode) > 0)
b1f69b75 603 continue;
566052c5 604 list_add_tail(&lseg->pls_list, &lp->pls_list);
b1f69b75
AA
605 dprintk("%s: inserted lseg %p "
606 "iomode %d offset %llu length %llu before "
607 "lp %p iomode %d offset %llu length %llu\n",
566052c5
FI
608 __func__, lseg, lseg->pls_range.iomode,
609 lseg->pls_range.offset, lseg->pls_range.length,
610 lp, lp->pls_range.iomode, lp->pls_range.offset,
611 lp->pls_range.length);
b1f69b75
AA
612 found = 1;
613 break;
614 }
615 if (!found) {
b7edfaa1 616 list_add_tail(&lseg->pls_list, &lo->plh_segs);
b1f69b75
AA
617 dprintk("%s: inserted lseg %p "
618 "iomode %d offset %llu length %llu at tail\n",
566052c5
FI
619 __func__, lseg, lseg->pls_range.iomode,
620 lseg->pls_range.offset, lseg->pls_range.length);
974cec8c 621 }
cc6e5340 622 get_layout_hdr(lo);
974cec8c
AA
623
624 dprintk("%s:Return\n", __func__);
e5e94017
BH
625}
626
627static struct pnfs_layout_hdr *
628alloc_init_layout_hdr(struct inode *ino)
629{
630 struct pnfs_layout_hdr *lo;
631
632 lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
633 if (!lo)
634 return NULL;
cc6e5340 635 atomic_set(&lo->plh_refcount, 1);
b7edfaa1
FI
636 INIT_LIST_HEAD(&lo->plh_layouts);
637 INIT_LIST_HEAD(&lo->plh_segs);
43f1b3da 638 INIT_LIST_HEAD(&lo->plh_bulk_recall);
b7edfaa1 639 lo->plh_inode = ino;
e5e94017
BH
640 return lo;
641}
642
643static struct pnfs_layout_hdr *
644pnfs_find_alloc_layout(struct inode *ino)
645{
646 struct nfs_inode *nfsi = NFS_I(ino);
647 struct pnfs_layout_hdr *new = NULL;
648
649 dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
650
651 assert_spin_locked(&ino->i_lock);
4541d16c
FI
652 if (nfsi->layout) {
653 if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
654 return NULL;
655 else
656 return nfsi->layout;
657 }
e5e94017
BH
658 spin_unlock(&ino->i_lock);
659 new = alloc_init_layout_hdr(ino);
660 spin_lock(&ino->i_lock);
661
662 if (likely(nfsi->layout == NULL)) /* Won the race? */
663 nfsi->layout = new;
664 else
665 kfree(new);
666 return nfsi->layout;
667}
668
b1f69b75
AA
669/*
670 * iomode matching rules:
671 * iomode lseg match
672 * ----- ----- -----
673 * ANY READ true
674 * ANY RW true
675 * RW READ false
676 * RW RW true
677 * READ READ true
678 * READ RW true
679 */
680static int
681is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
682{
566052c5 683 return (iomode != IOMODE_RW || lseg->pls_range.iomode == IOMODE_RW);
b1f69b75
AA
684}
685
686/*
687 * lookup range in layout
688 */
e5e94017 689static struct pnfs_layout_segment *
43f1b3da 690pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
e5e94017 691{
b1f69b75
AA
692 struct pnfs_layout_segment *lseg, *ret = NULL;
693
694 dprintk("%s:Begin\n", __func__);
695
b7edfaa1
FI
696 assert_spin_locked(&lo->plh_inode->i_lock);
697 list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
4541d16c
FI
698 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
699 is_matching_lseg(lseg, iomode)) {
d684d2ae 700 ret = get_lseg(lseg);
b1f69b75
AA
701 break;
702 }
566052c5 703 if (cmp_layout(iomode, lseg->pls_range.iomode) > 0)
b1f69b75
AA
704 break;
705 }
706
707 dprintk("%s:Return lseg %p ref %d\n",
4541d16c 708 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
b1f69b75 709 return ret;
e5e94017
BH
710}
711
712/*
713 * Layout segment is retreived from the server if not cached.
714 * The appropriate layout segment is referenced and returned to the caller.
715 */
716struct pnfs_layout_segment *
717pnfs_update_layout(struct inode *ino,
718 struct nfs_open_context *ctx,
719 enum pnfs_iomode iomode)
720{
721 struct nfs_inode *nfsi = NFS_I(ino);
2130ff66 722 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
e5e94017
BH
723 struct pnfs_layout_hdr *lo;
724 struct pnfs_layout_segment *lseg = NULL;
f49f9baa 725 bool first = false;
e5e94017
BH
726
727 if (!pnfs_enabled_sb(NFS_SERVER(ino)))
728 return NULL;
729 spin_lock(&ino->i_lock);
730 lo = pnfs_find_alloc_layout(ino);
731 if (lo == NULL) {
732 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
733 goto out_unlock;
734 }
735
43f1b3da
FI
736 /* Do we even need to bother with this? */
737 if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
738 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
739 dprintk("%s matches recall, use MDS\n", __func__);
e5e94017
BH
740 goto out_unlock;
741 }
742
743 /* if LAYOUTGET already failed once we don't try again */
566052c5 744 if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
e5e94017
BH
745 goto out_unlock;
746
568e8c49
AA
747 /* Check to see if the layout for the given range already exists */
748 lseg = pnfs_find_lseg(lo, iomode);
749 if (lseg)
750 goto out_unlock;
751
43f1b3da 752 if (pnfs_layoutgets_blocked(lo, NULL, 0))
cf7d63f1
FI
753 goto out_unlock;
754 atomic_inc(&lo->plh_outstanding);
755
cc6e5340 756 get_layout_hdr(lo);
f49f9baa
FI
757 if (list_empty(&lo->plh_segs))
758 first = true;
759 spin_unlock(&ino->i_lock);
760 if (first) {
2130ff66
FI
761 /* The lo must be on the clp list if there is any
762 * chance of a CB_LAYOUTRECALL(FILE) coming in.
763 */
764 spin_lock(&clp->cl_lock);
765 BUG_ON(!list_empty(&lo->plh_layouts));
766 list_add_tail(&lo->plh_layouts, &clp->cl_layouts);
767 spin_unlock(&clp->cl_lock);
768 }
e5e94017
BH
769
770 lseg = send_layoutget(lo, ctx, iomode);
f49f9baa
FI
771 if (!lseg && first) {
772 spin_lock(&clp->cl_lock);
773 list_del_init(&lo->plh_layouts);
774 spin_unlock(&clp->cl_lock);
2130ff66 775 }
cf7d63f1 776 atomic_dec(&lo->plh_outstanding);
cc6e5340 777 put_layout_hdr(lo);
e5e94017
BH
778out:
779 dprintk("%s end, state 0x%lx lseg %p\n", __func__,
bf9c1387 780 nfsi->layout ? nfsi->layout->plh_flags : -1, lseg);
e5e94017
BH
781 return lseg;
782out_unlock:
783 spin_unlock(&ino->i_lock);
784 goto out;
785}
b1f69b75
AA
786
787int
788pnfs_layout_process(struct nfs4_layoutget *lgp)
789{
790 struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
791 struct nfs4_layoutget_res *res = &lgp->res;
792 struct pnfs_layout_segment *lseg;
b7edfaa1 793 struct inode *ino = lo->plh_inode;
43f1b3da 794 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
b1f69b75
AA
795 int status = 0;
796
fc1794c5
FI
797 /* Verify we got what we asked for.
798 * Note that because the xdr parsing only accepts a single
799 * element array, this can fail even if the server is behaving
800 * correctly.
801 */
802 if (lgp->args.range.iomode > res->range.iomode ||
803 res->range.offset != 0 ||
804 res->range.length != NFS4_MAX_UINT64) {
805 status = -EINVAL;
806 goto out;
807 }
b1f69b75
AA
808 /* Inject layout blob into I/O device driver */
809 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
810 if (!lseg || IS_ERR(lseg)) {
811 if (!lseg)
812 status = -ENOMEM;
813 else
814 status = PTR_ERR(lseg);
815 dprintk("%s: Could not allocate layout: error %d\n",
816 __func__, status);
817 goto out;
818 }
819
820 spin_lock(&ino->i_lock);
43f1b3da
FI
821 if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
822 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
823 dprintk("%s forget reply due to recall\n", __func__);
824 goto out_forget_reply;
825 }
826
827 if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
828 dprintk("%s forget reply due to state\n", __func__);
829 goto out_forget_reply;
830 }
b1f69b75 831 init_lseg(lo, lseg);
566052c5 832 lseg->pls_range = res->range;
d684d2ae 833 *lgp->lsegpp = get_lseg(lseg);
b1f69b75
AA
834 pnfs_insert_layout(lo, lseg);
835
f7e8917a
FI
836 if (res->return_on_close) {
837 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
838 set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
839 }
840
b1f69b75 841 /* Done processing layoutget. Set the layout stateid */
43f1b3da 842 pnfs_set_layout_stateid(lo, &res->stateid, false);
b1f69b75
AA
843 spin_unlock(&ino->i_lock);
844out:
845 return status;
43f1b3da
FI
846
847out_forget_reply:
848 spin_unlock(&ino->i_lock);
849 lseg->pls_layout = lo;
850 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
851 goto out;
b1f69b75
AA
852}
853
bae724ef
FI
854static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio,
855 struct nfs_page *prev,
856 struct nfs_page *req)
94ad1c80 857{
bae724ef
FI
858 if (pgio->pg_count == prev->wb_bytes) {
859 /* This is first coelesce call for a series of nfs_pages */
860 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
861 prev->wb_context,
862 IOMODE_READ);
863 }
864 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
94ad1c80
FI
865}
866
867void
bae724ef 868pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
94ad1c80 869{
bae724ef
FI
870 struct pnfs_layoutdriver_type *ld;
871
872 ld = NFS_SERVER(inode)->pnfs_curr_ld;
873 pgio->pg_test = (ld && ld->pg_test) ? pnfs_read_pg_test : NULL;
94ad1c80
FI
874}
875
64419a9b
AA
876/*
877 * Call the appropriate parallel I/O subsystem read function.
878 */
879enum pnfs_try_status
880pnfs_try_to_read_data(struct nfs_read_data *rdata,
881 const struct rpc_call_ops *call_ops)
882{
883 struct inode *inode = rdata->inode;
884 struct nfs_server *nfss = NFS_SERVER(inode);
885 enum pnfs_try_status trypnfs;
886
887 rdata->mds_ops = call_ops;
888
889 dprintk("%s: Reading ino:%lu %u@%llu\n",
890 __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
891
892 trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
893 if (trypnfs == PNFS_NOT_ATTEMPTED) {
894 put_lseg(rdata->lseg);
895 rdata->lseg = NULL;
896 } else {
897 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
898 }
899 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
900 return trypnfs;
901}