2 * Module for pnfs flexfile layout driver.
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 * Tao Peng <bergwolf@primarydata.com>
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
13 #include <linux/sunrpc/metrics.h>
14 #include <linux/nfs_idmap.h>
16 #include "flexfilelayout.h"
17 #include "../nfs4session.h"
18 #include "../internal.h"
19 #include "../delegation.h"
20 #include "../nfs4trace.h"
21 #include "../iostat.h"
24 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
26 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
28 static struct pnfs_layout_hdr *
29 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
31 struct nfs4_flexfile_layout *ffl;
33 ffl = kzalloc(sizeof(*ffl), gfp_flags);
35 INIT_LIST_HEAD(&ffl->error_list);
36 return &ffl->generic_hdr;
42 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
44 struct nfs4_ff_layout_ds_err *err, *n;
46 list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
51 kfree(FF_LAYOUT_FROM_HDR(lo));
54 static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
58 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
59 if (unlikely(p == NULL))
61 memcpy(stateid, p, NFS4_STATEID_SIZE);
62 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
63 p[0], p[1], p[2], p[3]);
67 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
71 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
74 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
75 nfs4_print_deviceid(devid);
79 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
83 p = xdr_inline_decode(xdr, 4);
86 fh->size = be32_to_cpup(p++);
87 if (fh->size > sizeof(struct nfs_fh)) {
88 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
93 p = xdr_inline_decode(xdr, fh->size);
96 memcpy(&fh->data, p, fh->size);
97 dprintk("%s: fh len %d\n", __func__, fh->size);
103 * Currently only stringified uids and gids are accepted.
104 * I.e., kerberos is not supported to the DSes, so no pricipals.
106 * That means that one common function will suffice, but when
107 * principals are added, this should be split to accomodate
108 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
111 decode_name(struct xdr_stream *xdr, u32 *id)
116 /* opaque_length(4)*/
117 p = xdr_inline_decode(xdr, 4);
120 len = be32_to_cpup(p++);
124 dprintk("%s: len %u\n", __func__, len);
127 p = xdr_inline_decode(xdr, len);
131 if (!nfs_map_string_to_numeric((char *)p, len, id))
137 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
141 if (fls->mirror_array) {
142 for (i = 0; i < fls->mirror_array_cnt; i++) {
143 /* normally mirror_ds is freed in
144 * .free_deviceid_node but we still do it here
145 * for .alloc_lseg error path */
146 if (fls->mirror_array[i]) {
147 kfree(fls->mirror_array[i]->fh_versions);
148 nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
149 kfree(fls->mirror_array[i]);
152 kfree(fls->mirror_array);
153 fls->mirror_array = NULL;
157 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
161 dprintk("--> %s\n", __func__);
163 /* FIXME: remove this check when layout segment support is added */
164 if (lgr->range.offset != 0 ||
165 lgr->range.length != NFS4_MAX_UINT64) {
166 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
171 dprintk("--> %s returns %d\n", __func__, ret);
175 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
178 ff_layout_free_mirror_array(fls);
183 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
185 struct nfs4_ff_layout_mirror *tmp;
188 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
189 for (j = i + 1; j < fls->mirror_array_cnt; j++)
190 if (fls->mirror_array[i]->efficiency <
191 fls->mirror_array[j]->efficiency) {
192 tmp = fls->mirror_array[i];
193 fls->mirror_array[i] = fls->mirror_array[j];
194 fls->mirror_array[j] = tmp;
199 static struct pnfs_layout_segment *
200 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
201 struct nfs4_layoutget_res *lgr,
204 struct pnfs_layout_segment *ret;
205 struct nfs4_ff_layout_segment *fls = NULL;
206 struct xdr_stream stream;
208 struct page *scratch;
210 u32 mirror_array_cnt;
214 dprintk("--> %s\n", __func__);
215 scratch = alloc_page(gfp_flags);
217 return ERR_PTR(-ENOMEM);
219 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
221 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
223 /* stripe unit and mirror_array_cnt */
225 p = xdr_inline_decode(&stream, 8 + 4);
229 p = xdr_decode_hyper(p, &stripe_unit);
230 mirror_array_cnt = be32_to_cpup(p++);
231 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
232 stripe_unit, mirror_array_cnt);
234 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
235 mirror_array_cnt == 0)
239 fls = kzalloc(sizeof(*fls), gfp_flags);
243 fls->mirror_array_cnt = mirror_array_cnt;
244 fls->stripe_unit = stripe_unit;
245 fls->mirror_array = kcalloc(fls->mirror_array_cnt,
246 sizeof(fls->mirror_array[0]), gfp_flags);
247 if (fls->mirror_array == NULL)
250 for (i = 0; i < fls->mirror_array_cnt; i++) {
251 struct nfs4_deviceid devid;
252 struct nfs4_deviceid_node *idnode;
258 p = xdr_inline_decode(&stream, 4);
261 ds_count = be32_to_cpup(p);
263 /* FIXME: allow for striping? */
267 fls->mirror_array[i] =
268 kzalloc(sizeof(struct nfs4_ff_layout_mirror),
270 if (fls->mirror_array[i] == NULL) {
275 spin_lock_init(&fls->mirror_array[i]->lock);
276 fls->mirror_array[i]->ds_count = ds_count;
279 rc = decode_deviceid(&stream, &devid);
283 idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
284 &devid, lh->plh_lc_cred,
287 * upon success, mirror_ds is allocated by previous
288 * getdeviceinfo, or newly by .alloc_deviceid_node
289 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
292 fls->mirror_array[i]->mirror_ds =
293 FF_LAYOUT_MIRROR_DS(idnode);
299 p = xdr_inline_decode(&stream, 4);
302 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
305 rc = decode_stateid(&stream, &fls->mirror_array[i]->stateid);
310 p = xdr_inline_decode(&stream, 4);
313 fh_count = be32_to_cpup(p);
315 fls->mirror_array[i]->fh_versions =
316 kzalloc(fh_count * sizeof(struct nfs_fh),
318 if (fls->mirror_array[i]->fh_versions == NULL) {
323 for (j = 0; j < fh_count; j++) {
324 rc = decode_nfs_fh(&stream,
325 &fls->mirror_array[i]->fh_versions[j]);
330 fls->mirror_array[i]->fh_versions_cnt = fh_count;
333 rc = decode_name(&stream, &fls->mirror_array[i]->uid);
338 rc = decode_name(&stream, &fls->mirror_array[i]->gid);
342 dprintk("%s: uid %d gid %d\n", __func__,
343 fls->mirror_array[i]->uid,
344 fls->mirror_array[i]->gid);
347 ff_layout_sort_mirrors(fls);
348 rc = ff_layout_check_layout(lgr);
352 ret = &fls->generic_hdr;
353 dprintk("<-- %s (success)\n", __func__);
355 __free_page(scratch);
358 _ff_layout_free_lseg(fls);
360 dprintk("<-- %s (%d)\n", __func__, rc);
364 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
366 struct pnfs_layout_segment *lseg;
368 list_for_each_entry(lseg, &layout->plh_segs, pls_list)
369 if (lseg->pls_range.iomode == IOMODE_RW)
376 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
378 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
381 dprintk("--> %s\n", __func__);
383 for (i = 0; i < fls->mirror_array_cnt; i++) {
384 if (fls->mirror_array[i]) {
385 nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds);
386 fls->mirror_array[i]->mirror_ds = NULL;
387 if (fls->mirror_array[i]->cred) {
388 put_rpccred(fls->mirror_array[i]->cred);
389 fls->mirror_array[i]->cred = NULL;
394 if (lseg->pls_range.iomode == IOMODE_RW) {
395 struct nfs4_flexfile_layout *ffl;
398 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
399 inode = ffl->generic_hdr.plh_inode;
400 spin_lock(&inode->i_lock);
401 if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
402 ffl->commit_info.nbuckets = 0;
403 kfree(ffl->commit_info.buckets);
404 ffl->commit_info.buckets = NULL;
406 spin_unlock(&inode->i_lock);
408 _ff_layout_free_lseg(fls);
411 /* Return 1 until we have multiple lsegs support */
413 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
419 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
420 struct nfs_commit_info *cinfo,
423 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
424 struct pnfs_commit_bucket *buckets;
427 if (cinfo->ds->nbuckets != 0) {
428 /* This assumes there is only one RW lseg per file.
429 * To support multiple lseg per file, we need to
430 * change struct pnfs_commit_bucket to allow dynamic
431 * increasing nbuckets.
436 size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
438 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
445 spin_lock(cinfo->lock);
446 if (cinfo->ds->nbuckets != 0)
449 cinfo->ds->buckets = buckets;
450 cinfo->ds->nbuckets = size;
451 for (i = 0; i < size; i++) {
452 INIT_LIST_HEAD(&buckets[i].written);
453 INIT_LIST_HEAD(&buckets[i].committing);
454 /* mark direct verifier as unset */
455 buckets[i].direct_verf.committed =
456 NFS_INVALID_STABLE_HOW;
459 spin_unlock(cinfo->lock);
464 static struct nfs4_pnfs_ds *
465 ff_layout_choose_best_ds_for_read(struct nfs_pageio_descriptor *pgio,
468 struct nfs4_ff_layout_segment *fls;
469 struct nfs4_pnfs_ds *ds;
472 fls = FF_LAYOUT_LSEG(pgio->pg_lseg);
473 /* mirrors are sorted by efficiency */
474 for (idx = 0; idx < fls->mirror_array_cnt; idx++) {
475 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, idx, false);
486 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
487 struct nfs_page *req)
489 struct nfs_pgio_mirror *pgm;
490 struct nfs4_ff_layout_mirror *mirror;
491 struct nfs4_pnfs_ds *ds;
494 /* Use full layout for now */
496 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
502 /* If no lseg, fall back to read through mds */
503 if (pgio->pg_lseg == NULL)
506 ds = ff_layout_choose_best_ds_for_read(pgio, &ds_idx);
509 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
511 pgio->pg_mirror_idx = ds_idx;
513 /* read always uses only one mirror - idx 0 for pgio layer */
514 pgm = &pgio->pg_mirrors[0];
515 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
519 pnfs_put_lseg(pgio->pg_lseg);
520 pgio->pg_lseg = NULL;
521 nfs_pageio_reset_read_mds(pgio);
525 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
526 struct nfs_page *req)
528 struct nfs4_ff_layout_mirror *mirror;
529 struct nfs_pgio_mirror *pgm;
530 struct nfs_commit_info cinfo;
531 struct nfs4_pnfs_ds *ds;
536 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
542 /* If no lseg, fall back to write through mds */
543 if (pgio->pg_lseg == NULL)
546 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
547 status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
551 /* Use a direct mapping of ds_idx to pgio mirror_idx */
552 if (WARN_ON_ONCE(pgio->pg_mirror_count !=
553 FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
556 for (i = 0; i < pgio->pg_mirror_count; i++) {
557 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
560 pgm = &pgio->pg_mirrors[i];
561 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
562 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
568 pnfs_put_lseg(pgio->pg_lseg);
569 pgio->pg_lseg = NULL;
570 nfs_pageio_reset_write_mds(pgio);
574 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
575 struct nfs_page *req)
578 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
585 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
587 /* no lseg means that pnfs is not in use, so no mirroring here */
588 pnfs_put_lseg(pgio->pg_lseg);
589 pgio->pg_lseg = NULL;
590 nfs_pageio_reset_write_mds(pgio);
594 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
595 .pg_init = ff_layout_pg_init_read,
596 .pg_test = pnfs_generic_pg_test,
597 .pg_doio = pnfs_generic_pg_readpages,
598 .pg_cleanup = pnfs_generic_pg_cleanup,
601 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
602 .pg_init = ff_layout_pg_init_write,
603 .pg_test = pnfs_generic_pg_test,
604 .pg_doio = pnfs_generic_pg_writepages,
605 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
606 .pg_cleanup = pnfs_generic_pg_cleanup,
609 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
611 struct rpc_task *task = &hdr->task;
613 pnfs_layoutcommit_inode(hdr->inode, false);
616 dprintk("%s Reset task %5u for i/o through pNFS "
617 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
619 hdr->inode->i_sb->s_id,
620 (unsigned long long)NFS_FILEID(hdr->inode),
622 (unsigned long long)hdr->args.offset);
625 struct nfs_open_context *ctx;
627 ctx = nfs_list_entry(hdr->pages.next)->wb_context;
628 set_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
629 hdr->completion_ops->error_cleanup(&hdr->pages);
631 nfs_direct_set_resched_writes(hdr->dreq);
632 /* fake unstable write to let common nfs resend pages */
633 hdr->verf.committed = NFS_UNSTABLE;
639 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
640 dprintk("%s Reset task %5u for i/o through MDS "
641 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
643 hdr->inode->i_sb->s_id,
644 (unsigned long long)NFS_FILEID(hdr->inode),
646 (unsigned long long)hdr->args.offset);
648 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
652 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
654 struct rpc_task *task = &hdr->task;
656 pnfs_layoutcommit_inode(hdr->inode, false);
658 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
659 dprintk("%s Reset task %5u for i/o through MDS "
660 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
662 hdr->inode->i_sb->s_id,
663 (unsigned long long)NFS_FILEID(hdr->inode),
665 (unsigned long long)hdr->args.offset);
667 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
671 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
672 struct nfs4_state *state,
673 struct nfs_client *clp,
674 struct pnfs_layout_segment *lseg,
677 struct pnfs_layout_hdr *lo = lseg->pls_layout;
678 struct inode *inode = lo->plh_inode;
679 struct nfs_server *mds_server = NFS_SERVER(inode);
681 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
682 struct nfs_client *mds_client = mds_server->nfs_client;
683 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
685 if (task->tk_status >= 0)
688 switch (task->tk_status) {
689 /* MDS state errors */
690 case -NFS4ERR_DELEG_REVOKED:
691 case -NFS4ERR_ADMIN_REVOKED:
692 case -NFS4ERR_BAD_STATEID:
695 nfs_remove_bad_delegation(state->inode);
696 case -NFS4ERR_OPENMODE:
699 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
700 goto out_bad_stateid;
701 goto wait_on_recovery;
702 case -NFS4ERR_EXPIRED:
704 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
705 goto out_bad_stateid;
707 nfs4_schedule_lease_recovery(mds_client);
708 goto wait_on_recovery;
709 /* DS session errors */
710 case -NFS4ERR_BADSESSION:
711 case -NFS4ERR_BADSLOT:
712 case -NFS4ERR_BAD_HIGH_SLOT:
713 case -NFS4ERR_DEADSESSION:
714 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
715 case -NFS4ERR_SEQ_FALSE_RETRY:
716 case -NFS4ERR_SEQ_MISORDERED:
717 dprintk("%s ERROR %d, Reset session. Exchangeid "
718 "flags 0x%x\n", __func__, task->tk_status,
719 clp->cl_exchange_flags);
720 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
724 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
726 case -NFS4ERR_RETRY_UNCACHED_REP:
728 /* Invalidate Layout errors */
729 case -NFS4ERR_PNFS_NO_LAYOUT:
730 case -ESTALE: /* mapped NFS4ERR_STALE */
731 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
732 case -EISDIR: /* mapped NFS4ERR_ISDIR */
733 case -NFS4ERR_FHEXPIRED:
734 case -NFS4ERR_WRONG_TYPE:
735 dprintk("%s Invalid layout error %d\n", __func__,
738 * Destroy layout so new i/o will get a new layout.
739 * Layout will not be destroyed until all current lseg
740 * references are put. Mark layout as invalid to resend failed
741 * i/o and all i/o waiting on the slot table to the MDS until
742 * layout is destroyed and a new valid layout is obtained.
744 pnfs_destroy_layout(NFS_I(inode));
745 rpc_wake_up(&tbl->slot_tbl_waitq);
747 /* RPC connection errors */
755 dprintk("%s DS connection error %d\n", __func__,
757 nfs4_mark_deviceid_unavailable(devid);
758 rpc_wake_up(&tbl->slot_tbl_waitq);
761 if (ff_layout_has_available_ds(lseg))
762 return -NFS4ERR_RESET_TO_PNFS;
764 dprintk("%s Retry through MDS. Error %d\n", __func__,
766 return -NFS4ERR_RESET_TO_MDS;
772 task->tk_status = -EIO;
775 rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
776 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
777 rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
781 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
782 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
783 struct pnfs_layout_segment *lseg,
786 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
788 if (task->tk_status >= 0)
791 if (task->tk_status != -EJUKEBOX) {
792 dprintk("%s DS connection error %d\n", __func__,
794 nfs4_mark_deviceid_unavailable(devid);
795 if (ff_layout_has_available_ds(lseg))
796 return -NFS4ERR_RESET_TO_PNFS;
798 return -NFS4ERR_RESET_TO_MDS;
801 if (task->tk_status == -EJUKEBOX)
802 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
804 rpc_restart_call(task);
805 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
809 static int ff_layout_async_handle_error(struct rpc_task *task,
810 struct nfs4_state *state,
811 struct nfs_client *clp,
812 struct pnfs_layout_segment *lseg,
815 int vers = clp->cl_nfs_mod->rpc_vers->number;
819 return ff_layout_async_handle_error_v3(task, lseg, idx);
821 return ff_layout_async_handle_error_v4(task, state, clp,
824 /* should never happen */
830 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
831 int idx, u64 offset, u64 length,
832 u32 status, int opnum)
834 struct nfs4_ff_layout_mirror *mirror;
837 mirror = FF_LAYOUT_COMP(lseg, idx);
838 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
839 mirror, offset, length, status, opnum,
841 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
844 /* NFS_PROTO call done callback routines */
846 static int ff_layout_read_done_cb(struct rpc_task *task,
847 struct nfs_pgio_header *hdr)
852 trace_nfs4_pnfs_read(hdr, task->tk_status);
853 if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
854 hdr->res.op_status = NFS4ERR_NXIO;
855 if (task->tk_status < 0 && hdr->res.op_status)
856 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
857 hdr->args.offset, hdr->args.count,
858 hdr->res.op_status, OP_READ);
859 err = ff_layout_async_handle_error(task, hdr->args.context->state,
860 hdr->ds_clp, hdr->lseg,
861 hdr->pgio_mirror_idx);
864 case -NFS4ERR_RESET_TO_PNFS:
865 set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
866 &hdr->lseg->pls_layout->plh_flags);
867 pnfs_read_resend_pnfs(hdr);
868 return task->tk_status;
869 case -NFS4ERR_RESET_TO_MDS:
870 inode = hdr->lseg->pls_layout->plh_inode;
871 pnfs_error_mark_layout_for_return(inode, hdr->lseg);
872 ff_layout_reset_read(hdr);
873 return task->tk_status;
875 rpc_restart_call_prepare(task);
883 * We reference the rpc_cred of the first WRITE that triggers the need for
884 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
885 * rfc5661 is not clear about which credential should be used.
887 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
888 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
889 * we always send layoutcommit after DS writes.
892 ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
894 pnfs_set_layoutcommit(hdr);
895 dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
896 (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
900 ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
902 /* No mirroring for now */
903 struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
905 return ff_layout_test_devid_unavailable(node);
908 static int ff_layout_read_prepare_common(struct rpc_task *task,
909 struct nfs_pgio_header *hdr)
911 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
912 rpc_exit(task, -EIO);
915 if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
916 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
917 if (ff_layout_has_available_ds(hdr->lseg))
918 pnfs_read_resend_pnfs(hdr);
920 ff_layout_reset_read(hdr);
924 hdr->pgio_done_cb = ff_layout_read_done_cb;
930 * Call ops for the async read/write cases
931 * In the case of dense layouts, the offset needs to be reset to its
934 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
936 struct nfs_pgio_header *hdr = data;
938 if (ff_layout_read_prepare_common(task, hdr))
941 rpc_call_start(task);
944 static int ff_layout_setup_sequence(struct nfs_client *ds_clp,
945 struct nfs4_sequence_args *args,
946 struct nfs4_sequence_res *res,
947 struct rpc_task *task)
949 if (ds_clp->cl_session)
950 return nfs41_setup_sequence(ds_clp->cl_session,
954 return nfs40_setup_sequence(ds_clp->cl_slot_tbl,
960 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
962 struct nfs_pgio_header *hdr = data;
964 if (ff_layout_read_prepare_common(task, hdr))
967 if (ff_layout_setup_sequence(hdr->ds_clp,
973 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
974 hdr->args.lock_context, FMODE_READ) == -EIO)
975 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
978 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
980 struct nfs_pgio_header *hdr = data;
982 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
984 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
985 task->tk_status == 0) {
986 nfs4_sequence_done(task, &hdr->res.seq_res);
990 /* Note this may cause RPC to be resent */
991 hdr->mds_ops->rpc_call_done(task, hdr);
994 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
996 struct nfs_pgio_header *hdr = data;
998 rpc_count_iostats_metrics(task,
999 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1002 static int ff_layout_write_done_cb(struct rpc_task *task,
1003 struct nfs_pgio_header *hdr)
1005 struct inode *inode;
1008 trace_nfs4_pnfs_write(hdr, task->tk_status);
1009 if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status)
1010 hdr->res.op_status = NFS4ERR_NXIO;
1011 if (task->tk_status < 0 && hdr->res.op_status)
1012 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1013 hdr->args.offset, hdr->args.count,
1014 hdr->res.op_status, OP_WRITE);
1015 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1016 hdr->ds_clp, hdr->lseg,
1017 hdr->pgio_mirror_idx);
1020 case -NFS4ERR_RESET_TO_PNFS:
1021 case -NFS4ERR_RESET_TO_MDS:
1022 inode = hdr->lseg->pls_layout->plh_inode;
1023 pnfs_error_mark_layout_for_return(inode, hdr->lseg);
1024 if (err == -NFS4ERR_RESET_TO_PNFS) {
1025 pnfs_set_retry_layoutget(hdr->lseg->pls_layout);
1026 ff_layout_reset_write(hdr, true);
1028 pnfs_clear_retry_layoutget(hdr->lseg->pls_layout);
1029 ff_layout_reset_write(hdr, false);
1031 return task->tk_status;
1033 rpc_restart_call_prepare(task);
1037 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1038 hdr->res.verf->committed == NFS_DATA_SYNC)
1039 ff_layout_set_layoutcommit(hdr);
1044 static int ff_layout_commit_done_cb(struct rpc_task *task,
1045 struct nfs_commit_data *data)
1047 struct inode *inode;
1050 trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1051 if (task->tk_status == -ETIMEDOUT && !data->res.op_status)
1052 data->res.op_status = NFS4ERR_NXIO;
1053 if (task->tk_status < 0 && data->res.op_status)
1054 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1055 data->args.offset, data->args.count,
1056 data->res.op_status, OP_COMMIT);
1057 err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1058 data->lseg, data->ds_commit_index);
1061 case -NFS4ERR_RESET_TO_PNFS:
1062 case -NFS4ERR_RESET_TO_MDS:
1063 inode = data->lseg->pls_layout->plh_inode;
1064 pnfs_error_mark_layout_for_return(inode, data->lseg);
1065 if (err == -NFS4ERR_RESET_TO_PNFS)
1066 pnfs_set_retry_layoutget(data->lseg->pls_layout);
1068 pnfs_clear_retry_layoutget(data->lseg->pls_layout);
1069 pnfs_generic_prepare_to_resend_writes(data);
1072 rpc_restart_call_prepare(task);
1076 if (data->verf.committed == NFS_UNSTABLE)
1077 pnfs_commit_set_layoutcommit(data);
1082 static int ff_layout_write_prepare_common(struct rpc_task *task,
1083 struct nfs_pgio_header *hdr)
1085 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1086 rpc_exit(task, -EIO);
1090 if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
1093 retry_pnfs = ff_layout_has_available_ds(hdr->lseg);
1094 dprintk("%s task %u reset io to %s\n", __func__,
1095 task->tk_pid, retry_pnfs ? "pNFS" : "MDS");
1096 ff_layout_reset_write(hdr, retry_pnfs);
1104 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1106 struct nfs_pgio_header *hdr = data;
1108 if (ff_layout_write_prepare_common(task, hdr))
1111 rpc_call_start(task);
1114 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1116 struct nfs_pgio_header *hdr = data;
1118 if (ff_layout_write_prepare_common(task, hdr))
1121 if (ff_layout_setup_sequence(hdr->ds_clp,
1122 &hdr->args.seq_args,
1127 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
1128 hdr->args.lock_context, FMODE_WRITE) == -EIO)
1129 rpc_exit(task, -EIO); /* lost lock, terminate I/O */
1132 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1134 struct nfs_pgio_header *hdr = data;
1136 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1137 task->tk_status == 0) {
1138 nfs4_sequence_done(task, &hdr->res.seq_res);
1142 /* Note this may cause RPC to be resent */
1143 hdr->mds_ops->rpc_call_done(task, hdr);
1146 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1148 struct nfs_pgio_header *hdr = data;
1150 rpc_count_iostats_metrics(task,
1151 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1154 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1156 rpc_call_start(task);
1159 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1161 struct nfs_commit_data *wdata = data;
1163 ff_layout_setup_sequence(wdata->ds_clp,
1164 &wdata->args.seq_args,
1165 &wdata->res.seq_res,
1169 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1171 struct nfs_commit_data *cdata = data;
1173 rpc_count_iostats_metrics(task,
1174 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1177 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1178 .rpc_call_prepare = ff_layout_read_prepare_v3,
1179 .rpc_call_done = ff_layout_read_call_done,
1180 .rpc_count_stats = ff_layout_read_count_stats,
1181 .rpc_release = pnfs_generic_rw_release,
1184 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1185 .rpc_call_prepare = ff_layout_read_prepare_v4,
1186 .rpc_call_done = ff_layout_read_call_done,
1187 .rpc_count_stats = ff_layout_read_count_stats,
1188 .rpc_release = pnfs_generic_rw_release,
1191 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1192 .rpc_call_prepare = ff_layout_write_prepare_v3,
1193 .rpc_call_done = ff_layout_write_call_done,
1194 .rpc_count_stats = ff_layout_write_count_stats,
1195 .rpc_release = pnfs_generic_rw_release,
1198 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1199 .rpc_call_prepare = ff_layout_write_prepare_v4,
1200 .rpc_call_done = ff_layout_write_call_done,
1201 .rpc_count_stats = ff_layout_write_count_stats,
1202 .rpc_release = pnfs_generic_rw_release,
1205 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1206 .rpc_call_prepare = ff_layout_commit_prepare_v3,
1207 .rpc_call_done = pnfs_generic_write_commit_done,
1208 .rpc_count_stats = ff_layout_commit_count_stats,
1209 .rpc_release = pnfs_generic_commit_release,
1212 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1213 .rpc_call_prepare = ff_layout_commit_prepare_v4,
1214 .rpc_call_done = pnfs_generic_write_commit_done,
1215 .rpc_count_stats = ff_layout_commit_count_stats,
1216 .rpc_release = pnfs_generic_commit_release,
1219 static enum pnfs_try_status
1220 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1222 struct pnfs_layout_segment *lseg = hdr->lseg;
1223 struct nfs4_pnfs_ds *ds;
1224 struct rpc_clnt *ds_clnt;
1225 struct rpc_cred *ds_cred;
1226 loff_t offset = hdr->args.offset;
1227 u32 idx = hdr->pgio_mirror_idx;
1231 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
1232 __func__, hdr->inode->i_ino,
1233 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1235 ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
1239 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1241 if (IS_ERR(ds_clnt))
1244 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1245 if (IS_ERR(ds_cred))
1248 vers = nfs4_ff_layout_ds_version(lseg, idx);
1250 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1251 ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
1253 atomic_inc(&ds->ds_clp->cl_count);
1254 hdr->ds_clp = ds->ds_clp;
1255 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1260 * Note that if we ever decide to split across DSes,
1261 * then we may need to handle dense-like offsets.
1263 hdr->args.offset = offset;
1264 hdr->mds_offset = offset;
1266 /* Perform an asynchronous read to ds */
1267 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1268 vers == 3 ? &ff_layout_read_call_ops_v3 :
1269 &ff_layout_read_call_ops_v4,
1270 0, RPC_TASK_SOFTCONN);
1272 return PNFS_ATTEMPTED;
1275 if (ff_layout_has_available_ds(lseg))
1276 return PNFS_TRY_AGAIN;
1277 return PNFS_NOT_ATTEMPTED;
1280 /* Perform async writes. */
1281 static enum pnfs_try_status
1282 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1284 struct pnfs_layout_segment *lseg = hdr->lseg;
1285 struct nfs4_pnfs_ds *ds;
1286 struct rpc_clnt *ds_clnt;
1287 struct rpc_cred *ds_cred;
1288 loff_t offset = hdr->args.offset;
1291 int idx = hdr->pgio_mirror_idx;
1293 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1295 return PNFS_NOT_ATTEMPTED;
1297 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1299 if (IS_ERR(ds_clnt))
1300 return PNFS_NOT_ATTEMPTED;
1302 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1303 if (IS_ERR(ds_cred))
1304 return PNFS_NOT_ATTEMPTED;
1306 vers = nfs4_ff_layout_ds_version(lseg, idx);
1308 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n",
1309 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1310 offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
1313 hdr->pgio_done_cb = ff_layout_write_done_cb;
1314 atomic_inc(&ds->ds_clp->cl_count);
1315 hdr->ds_clp = ds->ds_clp;
1316 hdr->ds_commit_idx = idx;
1317 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1322 * Note that if we ever decide to split across DSes,
1323 * then we may need to handle dense-like offsets.
1325 hdr->args.offset = offset;
1327 /* Perform an asynchronous write */
1328 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1329 vers == 3 ? &ff_layout_write_call_ops_v3 :
1330 &ff_layout_write_call_ops_v4,
1331 sync, RPC_TASK_SOFTCONN);
1332 return PNFS_ATTEMPTED;
1336 ff_layout_mark_request_commit(struct nfs_page *req,
1337 struct pnfs_layout_segment *lseg,
1338 struct nfs_commit_info *cinfo,
1341 struct list_head *list;
1342 struct pnfs_commit_bucket *buckets;
1344 spin_lock(cinfo->lock);
1345 buckets = cinfo->ds->buckets;
1346 list = &buckets[ds_commit_idx].written;
1347 if (list_empty(list)) {
1348 /* Non-empty buckets hold a reference on the lseg. That ref
1349 * is normally transferred to the COMMIT call and released
1350 * there. It could also be released if the last req is pulled
1351 * off due to a rewrite, in which case it will be done in
1352 * pnfs_common_clear_request_commit
1354 WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
1355 buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
1357 set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1358 cinfo->ds->nwritten++;
1360 /* nfs_request_add_commit_list(). We need to add req to list without
1361 * dropping cinfo lock.
1363 set_bit(PG_CLEAN, &(req)->wb_flags);
1364 nfs_list_add_request(req, list);
1365 cinfo->mds->ncommit++;
1366 spin_unlock(cinfo->lock);
1368 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1369 inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
1371 __mark_inode_dirty(req->wb_context->dentry->d_inode,
1376 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1381 static struct nfs_fh *
1382 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1384 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1386 /* FIXME: Assume that there is only one NFS version available
1389 return &flseg->mirror_array[i]->fh_versions[0];
1392 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1394 struct pnfs_layout_segment *lseg = data->lseg;
1395 struct nfs4_pnfs_ds *ds;
1396 struct rpc_clnt *ds_clnt;
1397 struct rpc_cred *ds_cred;
1402 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1403 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1407 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1409 if (IS_ERR(ds_clnt))
1412 ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
1413 if (IS_ERR(ds_cred))
1416 vers = nfs4_ff_layout_ds_version(lseg, idx);
1418 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1419 data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
1421 data->commit_done_cb = ff_layout_commit_done_cb;
1422 data->cred = ds_cred;
1423 atomic_inc(&ds->ds_clp->cl_count);
1424 data->ds_clp = ds->ds_clp;
1425 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1428 return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1429 vers == 3 ? &ff_layout_commit_call_ops_v3 :
1430 &ff_layout_commit_call_ops_v4,
1431 how, RPC_TASK_SOFTCONN);
1433 pnfs_generic_prepare_to_resend_writes(data);
1434 pnfs_generic_commit_release(data);
1439 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1440 int how, struct nfs_commit_info *cinfo)
1442 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1443 ff_layout_initiate_commit);
1446 static struct pnfs_ds_commit_info *
1447 ff_layout_get_ds_info(struct inode *inode)
1449 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1454 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1458 ff_layout_free_deveiceid_node(struct nfs4_deviceid_node *d)
1460 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1464 static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
1465 struct xdr_stream *xdr,
1466 const struct nfs4_layoutreturn_args *args)
1468 struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
1470 int count = 0, ret = 0;
1472 start = xdr_reserve_space(xdr, 4);
1473 if (unlikely(!start))
1476 /* This assume we always return _ALL_ layouts */
1477 spin_lock(&hdr->plh_inode->i_lock);
1478 ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
1479 spin_unlock(&hdr->plh_inode->i_lock);
1481 *start = cpu_to_be32(count);
1486 /* report nothing for now */
1487 static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
1488 struct xdr_stream *xdr,
1489 const struct nfs4_layoutreturn_args *args)
1493 p = xdr_reserve_space(xdr, 4);
1495 *p = cpu_to_be32(0);
1498 static struct nfs4_deviceid_node *
1499 ff_layout_alloc_deviceid_node(struct nfs_server *server,
1500 struct pnfs_device *pdev, gfp_t gfp_flags)
1502 struct nfs4_ff_layout_ds *dsaddr;
1504 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
1507 return &dsaddr->id_node;
1511 ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
1512 struct xdr_stream *xdr,
1513 const struct nfs4_layoutreturn_args *args)
1515 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
1518 dprintk("%s: Begin\n", __func__);
1519 start = xdr_reserve_space(xdr, 4);
1522 if (ff_layout_encode_ioerr(flo, xdr, args))
1525 ff_layout_encode_iostats(flo, xdr, args);
1527 *start = cpu_to_be32((xdr->p - start - 1) * 4);
1528 dprintk("%s: Return\n", __func__);
1531 static struct pnfs_layoutdriver_type flexfilelayout_type = {
1532 .id = LAYOUT_FLEX_FILES,
1533 .name = "LAYOUT_FLEX_FILES",
1534 .owner = THIS_MODULE,
1535 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
1536 .free_layout_hdr = ff_layout_free_layout_hdr,
1537 .alloc_lseg = ff_layout_alloc_lseg,
1538 .free_lseg = ff_layout_free_lseg,
1539 .pg_read_ops = &ff_layout_pg_read_ops,
1540 .pg_write_ops = &ff_layout_pg_write_ops,
1541 .get_ds_info = ff_layout_get_ds_info,
1542 .free_deviceid_node = ff_layout_free_deveiceid_node,
1543 .mark_request_commit = ff_layout_mark_request_commit,
1544 .clear_request_commit = pnfs_generic_clear_request_commit,
1545 .scan_commit_lists = pnfs_generic_scan_commit_lists,
1546 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
1547 .commit_pagelist = ff_layout_commit_pagelist,
1548 .read_pagelist = ff_layout_read_pagelist,
1549 .write_pagelist = ff_layout_write_pagelist,
1550 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
1551 .encode_layoutreturn = ff_layout_encode_layoutreturn,
1554 static int __init nfs4flexfilelayout_init(void)
1556 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
1558 return pnfs_register_layoutdriver(&flexfilelayout_type);
1561 static void __exit nfs4flexfilelayout_exit(void)
1563 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
1565 pnfs_unregister_layoutdriver(&flexfilelayout_type);
1568 MODULE_ALIAS("nfs-layouttype4-4");
1570 MODULE_LICENSE("GPL");
1571 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
1573 module_init(nfs4flexfilelayout_init);
1574 module_exit(nfs4flexfilelayout_exit);