NFSv4.1: filelayout driver specific code for COMMIT
[linux-block.git] / fs / nfs / pnfs.c
CommitLineData
85e174ba
RL
1/*
2 * pNFS functions to call and manage layout drivers.
3 *
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
6 * All Rights Reserved
7 *
8 * Dean Hildebrand <dhildebz@umich.edu>
9 *
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
18 *
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
27 * such damages.
28 */
29
30#include <linux/nfs_fs.h>
974cec8c 31#include "internal.h"
85e174ba 32#include "pnfs.h"
64419a9b 33#include "iostat.h"
85e174ba
RL
34
35#define NFSDBG_FACILITY NFSDBG_PNFS
36
02c35fca
FI
37/* Locking:
38 *
39 * pnfs_spinlock:
40 * protects pnfs_modules_tbl.
41 */
42static DEFINE_SPINLOCK(pnfs_spinlock);
43
44/*
45 * pnfs_modules_tbl holds all pnfs modules
46 */
47static LIST_HEAD(pnfs_modules_tbl);
48
49/* Return the registered pnfs layout driver module matching given id */
50static struct pnfs_layoutdriver_type *
51find_pnfs_driver_locked(u32 id)
52{
53 struct pnfs_layoutdriver_type *local;
54
55 list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
56 if (local->id == id)
57 goto out;
58 local = NULL;
59out:
60 dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
61 return local;
62}
63
85e174ba
RL
64static struct pnfs_layoutdriver_type *
65find_pnfs_driver(u32 id)
66{
02c35fca
FI
67 struct pnfs_layoutdriver_type *local;
68
69 spin_lock(&pnfs_spinlock);
70 local = find_pnfs_driver_locked(id);
71 spin_unlock(&pnfs_spinlock);
72 return local;
85e174ba
RL
73}
74
75void
76unset_pnfs_layoutdriver(struct nfs_server *nfss)
77{
ea8eecdd 78 if (nfss->pnfs_curr_ld)
02c35fca 79 module_put(nfss->pnfs_curr_ld->owner);
85e174ba
RL
80 nfss->pnfs_curr_ld = NULL;
81}
82
83/*
84 * Try to set the server's pnfs module to the pnfs layout type specified by id.
85 * Currently only one pNFS layout driver per filesystem is supported.
86 *
87 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
88 */
89void
90set_pnfs_layoutdriver(struct nfs_server *server, u32 id)
91{
92 struct pnfs_layoutdriver_type *ld_type = NULL;
93
94 if (id == 0)
95 goto out_no_driver;
96 if (!(server->nfs_client->cl_exchange_flags &
97 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
98 printk(KERN_ERR "%s: id %u cl_exchange_flags 0x%x\n", __func__,
99 id, server->nfs_client->cl_exchange_flags);
100 goto out_no_driver;
101 }
102 ld_type = find_pnfs_driver(id);
103 if (!ld_type) {
104 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
105 ld_type = find_pnfs_driver(id);
106 if (!ld_type) {
107 dprintk("%s: No pNFS module found for %u.\n",
108 __func__, id);
109 goto out_no_driver;
110 }
111 }
02c35fca
FI
112 if (!try_module_get(ld_type->owner)) {
113 dprintk("%s: Could not grab reference on module\n", __func__);
114 goto out_no_driver;
115 }
85e174ba 116 server->pnfs_curr_ld = ld_type;
ea8eecdd 117
85e174ba
RL
118 dprintk("%s: pNFS module for %u set\n", __func__, id);
119 return;
120
121out_no_driver:
122 dprintk("%s: Using NFSv4 I/O\n", __func__);
123 server->pnfs_curr_ld = NULL;
124}
02c35fca
FI
125
126int
127pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
128{
129 int status = -EINVAL;
130 struct pnfs_layoutdriver_type *tmp;
131
132 if (ld_type->id == 0) {
133 printk(KERN_ERR "%s id 0 is reserved\n", __func__);
134 return status;
135 }
b1f69b75
AA
136 if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
137 printk(KERN_ERR "%s Layout driver must provide "
138 "alloc_lseg and free_lseg.\n", __func__);
139 return status;
140 }
02c35fca
FI
141
142 spin_lock(&pnfs_spinlock);
143 tmp = find_pnfs_driver_locked(ld_type->id);
144 if (!tmp) {
145 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
146 status = 0;
147 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
148 ld_type->name);
149 } else {
150 printk(KERN_ERR "%s Module with id %d already loaded!\n",
151 __func__, ld_type->id);
152 }
153 spin_unlock(&pnfs_spinlock);
154
155 return status;
156}
157EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
158
159void
160pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
161{
162 dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
163 spin_lock(&pnfs_spinlock);
164 list_del(&ld_type->pnfs_tblid);
165 spin_unlock(&pnfs_spinlock);
166}
167EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
e5e94017 168
b1f69b75
AA
169/*
170 * pNFS client layout cache
171 */
172
cc6e5340 173/* Need to hold i_lock if caller does not already hold reference */
43f1b3da 174void
cc6e5340 175get_layout_hdr(struct pnfs_layout_hdr *lo)
e5e94017 176{
cc6e5340 177 atomic_inc(&lo->plh_refcount);
e5e94017
BH
178}
179
180static void
cc6e5340 181destroy_layout_hdr(struct pnfs_layout_hdr *lo)
e5e94017 182{
cc6e5340
FI
183 dprintk("%s: freeing layout cache %p\n", __func__, lo);
184 BUG_ON(!list_empty(&lo->plh_layouts));
185 NFS_I(lo->plh_inode)->layout = NULL;
186 kfree(lo);
187}
e5e94017 188
cc6e5340
FI
189static void
190put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
191{
192 if (atomic_dec_and_test(&lo->plh_refcount))
193 destroy_layout_hdr(lo);
e5e94017
BH
194}
195
b1f69b75 196void
cc6e5340 197put_layout_hdr(struct pnfs_layout_hdr *lo)
974cec8c 198{
cc6e5340
FI
199 struct inode *inode = lo->plh_inode;
200
201 if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
202 destroy_layout_hdr(lo);
203 spin_unlock(&inode->i_lock);
204 }
974cec8c
AA
205}
206
207static void
208init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
209{
566052c5 210 INIT_LIST_HEAD(&lseg->pls_list);
4541d16c
FI
211 atomic_set(&lseg->pls_refcount, 1);
212 smp_mb();
213 set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
566052c5 214 lseg->pls_layout = lo;
974cec8c
AA
215}
216
4541d16c 217static void free_lseg(struct pnfs_layout_segment *lseg)
974cec8c 218{
b7edfaa1 219 struct inode *ino = lseg->pls_layout->plh_inode;
974cec8c 220
b1f69b75 221 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
52fabd73 222 /* Matched by get_layout_hdr in pnfs_insert_layout */
cc6e5340 223 put_layout_hdr(NFS_I(ino)->layout);
974cec8c
AA
224}
225
d684d2ae
FI
226static void
227put_lseg_common(struct pnfs_layout_segment *lseg)
228{
229 struct inode *inode = lseg->pls_layout->plh_inode;
230
231 BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
232 list_del_init(&lseg->pls_list);
233 if (list_empty(&lseg->pls_layout->plh_segs)) {
234 set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
235 /* Matched by initial refcount set in alloc_init_layout_hdr */
236 put_layout_hdr_locked(lseg->pls_layout);
237 }
238 rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
239}
240
bae724ef 241void
d684d2ae 242put_lseg(struct pnfs_layout_segment *lseg)
974cec8c 243{
d684d2ae
FI
244 struct inode *inode;
245
246 if (!lseg)
247 return;
248
4541d16c
FI
249 dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
250 atomic_read(&lseg->pls_refcount),
251 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
d684d2ae
FI
252 inode = lseg->pls_layout->plh_inode;
253 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
254 LIST_HEAD(free_me);
4541d16c 255
d684d2ae
FI
256 put_lseg_common(lseg);
257 list_add(&lseg->pls_list, &free_me);
258 spin_unlock(&inode->i_lock);
259 pnfs_free_lseg_list(&free_me);
4541d16c 260 }
4541d16c 261}
e0c2b380 262EXPORT_SYMBOL_GPL(put_lseg);
974cec8c 263
4541d16c
FI
264static bool
265should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
266{
267 return (recall_iomode == IOMODE_ANY ||
268 lseg_iomode == recall_iomode);
974cec8c
AA
269}
270
4541d16c
FI
271/* Returns 1 if lseg is removed from list, 0 otherwise */
272static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
273 struct list_head *tmp_list)
274{
275 int rv = 0;
276
277 if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
278 /* Remove the reference keeping the lseg in the
279 * list. It will now be removed when all
280 * outstanding io is finished.
281 */
d684d2ae
FI
282 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
283 atomic_read(&lseg->pls_refcount));
284 if (atomic_dec_and_test(&lseg->pls_refcount)) {
285 put_lseg_common(lseg);
286 list_add(&lseg->pls_list, tmp_list);
287 rv = 1;
288 }
4541d16c
FI
289 }
290 return rv;
291}
292
293/* Returns count of number of matching invalid lsegs remaining in list
294 * after call.
295 */
43f1b3da 296int
4541d16c
FI
297mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
298 struct list_head *tmp_list,
299 u32 iomode)
974cec8c
AA
300{
301 struct pnfs_layout_segment *lseg, *next;
4541d16c 302 int invalid = 0, removed = 0;
974cec8c
AA
303
304 dprintk("%s:Begin lo %p\n", __func__, lo);
305
38511722
FI
306 if (list_empty(&lo->plh_segs)) {
307 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
308 put_layout_hdr_locked(lo);
309 return 0;
310 }
4541d16c
FI
311 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
312 if (should_free_lseg(lseg->pls_range.iomode, iomode)) {
313 dprintk("%s: freeing lseg %p iomode %d "
314 "offset %llu length %llu\n", __func__,
315 lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
316 lseg->pls_range.length);
317 invalid++;
318 removed += mark_lseg_invalid(lseg, tmp_list);
319 }
320 dprintk("%s:Return %i\n", __func__, invalid - removed);
321 return invalid - removed;
974cec8c
AA
322}
323
f49f9baa 324/* note free_me must contain lsegs from a single layout_hdr */
43f1b3da 325void
4541d16c 326pnfs_free_lseg_list(struct list_head *free_me)
974cec8c 327{
4541d16c 328 struct pnfs_layout_segment *lseg, *tmp;
f49f9baa
FI
329 struct pnfs_layout_hdr *lo;
330
331 if (list_empty(free_me))
332 return;
333
334 lo = list_first_entry(free_me, struct pnfs_layout_segment,
335 pls_list)->pls_layout;
974cec8c 336
f49f9baa
FI
337 if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
338 struct nfs_client *clp;
339
340 clp = NFS_SERVER(lo->plh_inode)->nfs_client;
341 spin_lock(&clp->cl_lock);
342 list_del_init(&lo->plh_layouts);
343 spin_unlock(&clp->cl_lock);
344 }
4541d16c 345 list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
566052c5 346 list_del(&lseg->pls_list);
4541d16c 347 free_lseg(lseg);
974cec8c
AA
348 }
349}
350
e5e94017
BH
351void
352pnfs_destroy_layout(struct nfs_inode *nfsi)
353{
354 struct pnfs_layout_hdr *lo;
974cec8c 355 LIST_HEAD(tmp_list);
e5e94017
BH
356
357 spin_lock(&nfsi->vfs_inode.i_lock);
358 lo = nfsi->layout;
359 if (lo) {
38511722 360 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
4541d16c 361 mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY);
e5e94017
BH
362 }
363 spin_unlock(&nfsi->vfs_inode.i_lock);
974cec8c
AA
364 pnfs_free_lseg_list(&tmp_list);
365}
366
367/*
368 * Called by the state manger to remove all layouts established under an
369 * expired lease.
370 */
371void
372pnfs_destroy_all_layouts(struct nfs_client *clp)
373{
374 struct pnfs_layout_hdr *lo;
375 LIST_HEAD(tmp_list);
376
377 spin_lock(&clp->cl_lock);
378 list_splice_init(&clp->cl_layouts, &tmp_list);
379 spin_unlock(&clp->cl_lock);
380
381 while (!list_empty(&tmp_list)) {
382 lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
b7edfaa1 383 plh_layouts);
974cec8c 384 dprintk("%s freeing layout for inode %lu\n", __func__,
b7edfaa1
FI
385 lo->plh_inode->i_ino);
386 pnfs_destroy_layout(NFS_I(lo->plh_inode));
974cec8c 387 }
e5e94017
BH
388}
389
fd6002e9 390/* update lo->plh_stateid with new if is more recent */
43f1b3da
FI
391void
392pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
393 bool update_barrier)
b1f69b75 394{
fd6002e9 395 u32 oldseq, newseq;
b1f69b75 396
fd6002e9
FI
397 oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid);
398 newseq = be32_to_cpu(new->stateid.seqid);
43f1b3da 399 if ((int)(newseq - oldseq) > 0) {
fd6002e9 400 memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid));
43f1b3da
FI
401 if (update_barrier) {
402 u32 new_barrier = be32_to_cpu(new->stateid.seqid);
403
404 if ((int)(new_barrier - lo->plh_barrier))
405 lo->plh_barrier = new_barrier;
406 } else {
407 /* Because of wraparound, we want to keep the barrier
408 * "close" to the current seqids. It needs to be
409 * within 2**31 to count as "behind", so if it
410 * gets too near that limit, give us a litle leeway
411 * and bring it to within 2**30.
412 * NOTE - and yes, this is all unsigned arithmetic.
413 */
414 if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
415 lo->plh_barrier = newseq - (1 << 30);
416 }
417 }
b1f69b75
AA
418}
419
cf7d63f1
FI
420/* lget is set to 1 if called from inside send_layoutget call chain */
421static bool
43f1b3da
FI
422pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
423 int lget)
424{
425 if ((stateid) &&
426 (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
427 return true;
f7e8917a 428 return lo->plh_block_lgets ||
38511722 429 test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
f7e8917a 430 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
43f1b3da 431 (list_empty(&lo->plh_segs) &&
cf7d63f1
FI
432 (atomic_read(&lo->plh_outstanding) > lget));
433}
434
fd6002e9
FI
435int
436pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
437 struct nfs4_state *open_state)
b1f69b75 438{
fd6002e9 439 int status = 0;
974cec8c 440
b1f69b75 441 dprintk("--> %s\n", __func__);
fd6002e9 442 spin_lock(&lo->plh_inode->i_lock);
43f1b3da 443 if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
cf7d63f1
FI
444 status = -EAGAIN;
445 } else if (list_empty(&lo->plh_segs)) {
fd6002e9
FI
446 int seq;
447
448 do {
449 seq = read_seqbegin(&open_state->seqlock);
450 memcpy(dst->data, open_state->stateid.data,
451 sizeof(open_state->stateid.data));
452 } while (read_seqretry(&open_state->seqlock, seq));
453 } else
454 memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data));
455 spin_unlock(&lo->plh_inode->i_lock);
b1f69b75 456 dprintk("<-- %s\n", __func__);
fd6002e9 457 return status;
b1f69b75
AA
458}
459
460/*
461* Get layout from server.
462* for now, assume that whole file layouts are requested.
463* arg->offset: 0
464* arg->length: all ones
465*/
e5e94017
BH
466static struct pnfs_layout_segment *
467send_layoutget(struct pnfs_layout_hdr *lo,
468 struct nfs_open_context *ctx,
469 u32 iomode)
470{
b7edfaa1 471 struct inode *ino = lo->plh_inode;
b1f69b75
AA
472 struct nfs_server *server = NFS_SERVER(ino);
473 struct nfs4_layoutget *lgp;
474 struct pnfs_layout_segment *lseg = NULL;
475
476 dprintk("--> %s\n", __func__);
e5e94017 477
b1f69b75
AA
478 BUG_ON(ctx == NULL);
479 lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
cf7d63f1 480 if (lgp == NULL)
b1f69b75 481 return NULL;
b1f69b75
AA
482 lgp->args.minlength = NFS4_MAX_UINT64;
483 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
484 lgp->args.range.iomode = iomode;
485 lgp->args.range.offset = 0;
486 lgp->args.range.length = NFS4_MAX_UINT64;
487 lgp->args.type = server->pnfs_curr_ld->id;
488 lgp->args.inode = ino;
489 lgp->args.ctx = get_nfs_open_context(ctx);
490 lgp->lsegpp = &lseg;
491
492 /* Synchronously retrieve layout information from server and
493 * store in lseg.
494 */
495 nfs4_proc_layoutget(lgp);
974cec8c 496 if (!lseg) {
b1f69b75 497 /* remember that LAYOUTGET failed and suspend trying */
566052c5 498 set_bit(lo_fail_bit(iomode), &lo->plh_flags);
974cec8c 499 }
974cec8c
AA
500 return lseg;
501}
502
f7e8917a
FI
503bool pnfs_roc(struct inode *ino)
504{
505 struct pnfs_layout_hdr *lo;
506 struct pnfs_layout_segment *lseg, *tmp;
507 LIST_HEAD(tmp_list);
508 bool found = false;
509
510 spin_lock(&ino->i_lock);
511 lo = NFS_I(ino)->layout;
512 if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
513 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
514 goto out_nolayout;
515 list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
516 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
517 mark_lseg_invalid(lseg, &tmp_list);
518 found = true;
519 }
520 if (!found)
521 goto out_nolayout;
522 lo->plh_block_lgets++;
523 get_layout_hdr(lo); /* matched in pnfs_roc_release */
524 spin_unlock(&ino->i_lock);
525 pnfs_free_lseg_list(&tmp_list);
526 return true;
527
528out_nolayout:
529 spin_unlock(&ino->i_lock);
530 return false;
531}
532
533void pnfs_roc_release(struct inode *ino)
534{
535 struct pnfs_layout_hdr *lo;
536
537 spin_lock(&ino->i_lock);
538 lo = NFS_I(ino)->layout;
539 lo->plh_block_lgets--;
540 put_layout_hdr_locked(lo);
541 spin_unlock(&ino->i_lock);
542}
543
544void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
545{
546 struct pnfs_layout_hdr *lo;
547
548 spin_lock(&ino->i_lock);
549 lo = NFS_I(ino)->layout;
550 if ((int)(barrier - lo->plh_barrier) > 0)
551 lo->plh_barrier = barrier;
552 spin_unlock(&ino->i_lock);
553}
554
555bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
556{
557 struct nfs_inode *nfsi = NFS_I(ino);
558 struct pnfs_layout_segment *lseg;
559 bool found = false;
560
561 spin_lock(&ino->i_lock);
562 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
563 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
564 found = true;
565 break;
566 }
567 if (!found) {
568 struct pnfs_layout_hdr *lo = nfsi->layout;
569 u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid);
570
571 /* Since close does not return a layout stateid for use as
572 * a barrier, we choose the worst-case barrier.
573 */
574 *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
575 }
576 spin_unlock(&ino->i_lock);
577 return found;
578}
579
b1f69b75
AA
580/*
581 * Compare two layout segments for sorting into layout cache.
582 * We want to preferentially return RW over RO layouts, so ensure those
583 * are seen first.
584 */
585static s64
586cmp_layout(u32 iomode1, u32 iomode2)
587{
588 /* read > read/write */
589 return (int)(iomode2 == IOMODE_READ) - (int)(iomode1 == IOMODE_READ);
590}
591
974cec8c
AA
592static void
593pnfs_insert_layout(struct pnfs_layout_hdr *lo,
594 struct pnfs_layout_segment *lseg)
595{
b1f69b75
AA
596 struct pnfs_layout_segment *lp;
597 int found = 0;
598
974cec8c
AA
599 dprintk("%s:Begin\n", __func__);
600
b7edfaa1 601 assert_spin_locked(&lo->plh_inode->i_lock);
b7edfaa1 602 list_for_each_entry(lp, &lo->plh_segs, pls_list) {
566052c5 603 if (cmp_layout(lp->pls_range.iomode, lseg->pls_range.iomode) > 0)
b1f69b75 604 continue;
566052c5 605 list_add_tail(&lseg->pls_list, &lp->pls_list);
b1f69b75
AA
606 dprintk("%s: inserted lseg %p "
607 "iomode %d offset %llu length %llu before "
608 "lp %p iomode %d offset %llu length %llu\n",
566052c5
FI
609 __func__, lseg, lseg->pls_range.iomode,
610 lseg->pls_range.offset, lseg->pls_range.length,
611 lp, lp->pls_range.iomode, lp->pls_range.offset,
612 lp->pls_range.length);
b1f69b75
AA
613 found = 1;
614 break;
615 }
616 if (!found) {
b7edfaa1 617 list_add_tail(&lseg->pls_list, &lo->plh_segs);
b1f69b75
AA
618 dprintk("%s: inserted lseg %p "
619 "iomode %d offset %llu length %llu at tail\n",
566052c5
FI
620 __func__, lseg, lseg->pls_range.iomode,
621 lseg->pls_range.offset, lseg->pls_range.length);
974cec8c 622 }
cc6e5340 623 get_layout_hdr(lo);
974cec8c
AA
624
625 dprintk("%s:Return\n", __func__);
e5e94017
BH
626}
627
628static struct pnfs_layout_hdr *
629alloc_init_layout_hdr(struct inode *ino)
630{
631 struct pnfs_layout_hdr *lo;
632
633 lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
634 if (!lo)
635 return NULL;
cc6e5340 636 atomic_set(&lo->plh_refcount, 1);
b7edfaa1
FI
637 INIT_LIST_HEAD(&lo->plh_layouts);
638 INIT_LIST_HEAD(&lo->plh_segs);
43f1b3da 639 INIT_LIST_HEAD(&lo->plh_bulk_recall);
b7edfaa1 640 lo->plh_inode = ino;
e5e94017
BH
641 return lo;
642}
643
644static struct pnfs_layout_hdr *
645pnfs_find_alloc_layout(struct inode *ino)
646{
647 struct nfs_inode *nfsi = NFS_I(ino);
648 struct pnfs_layout_hdr *new = NULL;
649
650 dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
651
652 assert_spin_locked(&ino->i_lock);
4541d16c
FI
653 if (nfsi->layout) {
654 if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
655 return NULL;
656 else
657 return nfsi->layout;
658 }
e5e94017
BH
659 spin_unlock(&ino->i_lock);
660 new = alloc_init_layout_hdr(ino);
661 spin_lock(&ino->i_lock);
662
663 if (likely(nfsi->layout == NULL)) /* Won the race? */
664 nfsi->layout = new;
665 else
666 kfree(new);
667 return nfsi->layout;
668}
669
b1f69b75
AA
670/*
671 * iomode matching rules:
672 * iomode lseg match
673 * ----- ----- -----
674 * ANY READ true
675 * ANY RW true
676 * RW READ false
677 * RW RW true
678 * READ READ true
679 * READ RW true
680 */
681static int
682is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
683{
566052c5 684 return (iomode != IOMODE_RW || lseg->pls_range.iomode == IOMODE_RW);
b1f69b75
AA
685}
686
687/*
688 * lookup range in layout
689 */
e5e94017 690static struct pnfs_layout_segment *
43f1b3da 691pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
e5e94017 692{
b1f69b75
AA
693 struct pnfs_layout_segment *lseg, *ret = NULL;
694
695 dprintk("%s:Begin\n", __func__);
696
b7edfaa1
FI
697 assert_spin_locked(&lo->plh_inode->i_lock);
698 list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
4541d16c
FI
699 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
700 is_matching_lseg(lseg, iomode)) {
d684d2ae 701 ret = get_lseg(lseg);
b1f69b75
AA
702 break;
703 }
566052c5 704 if (cmp_layout(iomode, lseg->pls_range.iomode) > 0)
b1f69b75
AA
705 break;
706 }
707
708 dprintk("%s:Return lseg %p ref %d\n",
4541d16c 709 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
b1f69b75 710 return ret;
e5e94017
BH
711}
712
713/*
714 * Layout segment is retreived from the server if not cached.
715 * The appropriate layout segment is referenced and returned to the caller.
716 */
717struct pnfs_layout_segment *
718pnfs_update_layout(struct inode *ino,
719 struct nfs_open_context *ctx,
720 enum pnfs_iomode iomode)
721{
722 struct nfs_inode *nfsi = NFS_I(ino);
2130ff66 723 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
e5e94017
BH
724 struct pnfs_layout_hdr *lo;
725 struct pnfs_layout_segment *lseg = NULL;
f49f9baa 726 bool first = false;
e5e94017
BH
727
728 if (!pnfs_enabled_sb(NFS_SERVER(ino)))
729 return NULL;
730 spin_lock(&ino->i_lock);
731 lo = pnfs_find_alloc_layout(ino);
732 if (lo == NULL) {
733 dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
734 goto out_unlock;
735 }
736
43f1b3da
FI
737 /* Do we even need to bother with this? */
738 if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
739 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
740 dprintk("%s matches recall, use MDS\n", __func__);
e5e94017
BH
741 goto out_unlock;
742 }
743
744 /* if LAYOUTGET already failed once we don't try again */
566052c5 745 if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
e5e94017
BH
746 goto out_unlock;
747
568e8c49
AA
748 /* Check to see if the layout for the given range already exists */
749 lseg = pnfs_find_lseg(lo, iomode);
750 if (lseg)
751 goto out_unlock;
752
43f1b3da 753 if (pnfs_layoutgets_blocked(lo, NULL, 0))
cf7d63f1
FI
754 goto out_unlock;
755 atomic_inc(&lo->plh_outstanding);
756
cc6e5340 757 get_layout_hdr(lo);
f49f9baa
FI
758 if (list_empty(&lo->plh_segs))
759 first = true;
760 spin_unlock(&ino->i_lock);
761 if (first) {
2130ff66
FI
762 /* The lo must be on the clp list if there is any
763 * chance of a CB_LAYOUTRECALL(FILE) coming in.
764 */
765 spin_lock(&clp->cl_lock);
766 BUG_ON(!list_empty(&lo->plh_layouts));
767 list_add_tail(&lo->plh_layouts, &clp->cl_layouts);
768 spin_unlock(&clp->cl_lock);
769 }
e5e94017
BH
770
771 lseg = send_layoutget(lo, ctx, iomode);
f49f9baa
FI
772 if (!lseg && first) {
773 spin_lock(&clp->cl_lock);
774 list_del_init(&lo->plh_layouts);
775 spin_unlock(&clp->cl_lock);
2130ff66 776 }
cf7d63f1 777 atomic_dec(&lo->plh_outstanding);
cc6e5340 778 put_layout_hdr(lo);
e5e94017
BH
779out:
780 dprintk("%s end, state 0x%lx lseg %p\n", __func__,
bf9c1387 781 nfsi->layout ? nfsi->layout->plh_flags : -1, lseg);
e5e94017
BH
782 return lseg;
783out_unlock:
784 spin_unlock(&ino->i_lock);
785 goto out;
786}
b1f69b75
AA
787
788int
789pnfs_layout_process(struct nfs4_layoutget *lgp)
790{
791 struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
792 struct nfs4_layoutget_res *res = &lgp->res;
793 struct pnfs_layout_segment *lseg;
b7edfaa1 794 struct inode *ino = lo->plh_inode;
43f1b3da 795 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
b1f69b75
AA
796 int status = 0;
797
fc1794c5
FI
798 /* Verify we got what we asked for.
799 * Note that because the xdr parsing only accepts a single
800 * element array, this can fail even if the server is behaving
801 * correctly.
802 */
803 if (lgp->args.range.iomode > res->range.iomode ||
804 res->range.offset != 0 ||
805 res->range.length != NFS4_MAX_UINT64) {
806 status = -EINVAL;
807 goto out;
808 }
b1f69b75
AA
809 /* Inject layout blob into I/O device driver */
810 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
811 if (!lseg || IS_ERR(lseg)) {
812 if (!lseg)
813 status = -ENOMEM;
814 else
815 status = PTR_ERR(lseg);
816 dprintk("%s: Could not allocate layout: error %d\n",
817 __func__, status);
818 goto out;
819 }
820
821 spin_lock(&ino->i_lock);
43f1b3da
FI
822 if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
823 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
824 dprintk("%s forget reply due to recall\n", __func__);
825 goto out_forget_reply;
826 }
827
828 if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
829 dprintk("%s forget reply due to state\n", __func__);
830 goto out_forget_reply;
831 }
b1f69b75 832 init_lseg(lo, lseg);
566052c5 833 lseg->pls_range = res->range;
d684d2ae 834 *lgp->lsegpp = get_lseg(lseg);
b1f69b75
AA
835 pnfs_insert_layout(lo, lseg);
836
f7e8917a
FI
837 if (res->return_on_close) {
838 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
839 set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
840 }
841
b1f69b75 842 /* Done processing layoutget. Set the layout stateid */
43f1b3da 843 pnfs_set_layout_stateid(lo, &res->stateid, false);
b1f69b75
AA
844 spin_unlock(&ino->i_lock);
845out:
846 return status;
43f1b3da
FI
847
848out_forget_reply:
849 spin_unlock(&ino->i_lock);
850 lseg->pls_layout = lo;
851 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
852 goto out;
b1f69b75
AA
853}
854
bae724ef
FI
855static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio,
856 struct nfs_page *prev,
857 struct nfs_page *req)
94ad1c80 858{
bae724ef
FI
859 if (pgio->pg_count == prev->wb_bytes) {
860 /* This is first coelesce call for a series of nfs_pages */
861 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
862 prev->wb_context,
863 IOMODE_READ);
864 }
865 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
94ad1c80
FI
866}
867
868void
bae724ef 869pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
94ad1c80 870{
bae724ef
FI
871 struct pnfs_layoutdriver_type *ld;
872
873 ld = NFS_SERVER(inode)->pnfs_curr_ld;
874 pgio->pg_test = (ld && ld->pg_test) ? pnfs_read_pg_test : NULL;
94ad1c80
FI
875}
876
44b83799
FI
877static int pnfs_write_pg_test(struct nfs_pageio_descriptor *pgio,
878 struct nfs_page *prev,
879 struct nfs_page *req)
880{
881 if (pgio->pg_count == prev->wb_bytes) {
882 /* This is first coelesce call for a series of nfs_pages */
883 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
884 prev->wb_context,
885 IOMODE_RW);
886 }
887 return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
888}
889
890void
891pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode)
892{
893 struct pnfs_layoutdriver_type *ld;
894
895 ld = NFS_SERVER(inode)->pnfs_curr_ld;
896 pgio->pg_test = (ld && ld->pg_test) ? pnfs_write_pg_test : NULL;
897}
898
0382b744
AA
899enum pnfs_try_status
900pnfs_try_to_write_data(struct nfs_write_data *wdata,
901 const struct rpc_call_ops *call_ops, int how)
902{
903 struct inode *inode = wdata->inode;
904 enum pnfs_try_status trypnfs;
905 struct nfs_server *nfss = NFS_SERVER(inode);
906
907 wdata->mds_ops = call_ops;
908
909 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
910 inode->i_ino, wdata->args.count, wdata->args.offset, how);
911
912 trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
913 if (trypnfs == PNFS_NOT_ATTEMPTED) {
914 put_lseg(wdata->lseg);
915 wdata->lseg = NULL;
916 } else
917 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
918
919 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
920 return trypnfs;
921}
922
64419a9b
AA
923/*
924 * Call the appropriate parallel I/O subsystem read function.
925 */
926enum pnfs_try_status
927pnfs_try_to_read_data(struct nfs_read_data *rdata,
928 const struct rpc_call_ops *call_ops)
929{
930 struct inode *inode = rdata->inode;
931 struct nfs_server *nfss = NFS_SERVER(inode);
932 enum pnfs_try_status trypnfs;
933
934 rdata->mds_ops = call_ops;
935
936 dprintk("%s: Reading ino:%lu %u@%llu\n",
937 __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
938
939 trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
940 if (trypnfs == PNFS_NOT_ATTEMPTED) {
941 put_lseg(rdata->lseg);
942 rdata->lseg = NULL;
943 } else {
944 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
945 }
946 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
947 return trypnfs;
948}