1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/nfs/callback_xdr.c
5 * Copyright (C) 2004 Trond Myklebust
7 * NFSv4 callback encode/decode procedures
9 #include <linux/kernel.h>
10 #include <linux/sunrpc/svc.h>
11 #include <linux/nfs4.h>
12 #include <linux/nfs_fs.h>
13 #include <linux/ratelimit.h>
14 #include <linux/printk.h>
15 #include <linux/slab.h>
16 #include <linux/sunrpc/bc_xprt.h>
20 #include "nfs4session.h"
21 #include "nfs4trace.h"
23 #define CB_OP_TAGLEN_MAXSZ (512)
24 #define CB_OP_HDR_RES_MAXSZ (2 * 4) // opcode, status
25 #define CB_OP_GETATTR_BITMAP_MAXSZ (4 * 4) // bitmap length, 3 bitmaps
26 #define CB_OP_GETATTR_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
27 CB_OP_GETATTR_BITMAP_MAXSZ + \
28 /* change, size, ctime, mtime */\
30 #define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
32 #if defined(CONFIG_NFS_V4_1)
33 #define CB_OP_LAYOUTRECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
34 #define CB_OP_DEVICENOTIFY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
35 #define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
36 NFS4_MAX_SESSIONID_LEN + \
37 (1 + 3) * 4) // seqid, 3 slotids
38 #define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
39 #define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
40 #define CB_OP_NOTIFY_LOCK_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
41 #endif /* CONFIG_NFS_V4_1 */
42 #ifdef CONFIG_NFS_V4_2
43 #define CB_OP_OFFLOAD_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
44 #endif /* CONFIG_NFS_V4_2 */
46 #define NFSDBG_FACILITY NFSDBG_CALLBACK
48 /* Internal error code */
49 #define NFS4ERR_RESOURCE_HDR 11050
52 __be32 (*process_op)(void *, void *, struct cb_process_state *);
53 __be32 (*decode_args)(struct svc_rqst *, struct xdr_stream *, void *);
54 __be32 (*encode_res)(struct svc_rqst *, struct xdr_stream *,
59 static struct callback_op callback_ops[];
61 static __be32 nfs4_callback_null(struct svc_rqst *rqstp)
63 return htonl(NFS4_OK);
67 * svc_process_common() looks for an XDR encoder to know when
68 * not to drop a Reply.
70 static bool nfs4_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr)
75 static __be32 decode_string(struct xdr_stream *xdr, unsigned int *len,
76 const char **str, size_t maxlen)
80 err = xdr_stream_decode_opaque_inline(xdr, (void **)str, maxlen);
82 return cpu_to_be32(NFS4ERR_RESOURCE);
87 static __be32 decode_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
91 p = xdr_inline_decode(xdr, 4);
92 if (unlikely(p == NULL))
93 return htonl(NFS4ERR_RESOURCE);
95 if (fh->size > NFS4_FHSIZE)
96 return htonl(NFS4ERR_BADHANDLE);
97 p = xdr_inline_decode(xdr, fh->size);
98 if (unlikely(p == NULL))
99 return htonl(NFS4ERR_RESOURCE);
100 memcpy(&fh->data[0], p, fh->size);
101 memset(&fh->data[fh->size], 0, sizeof(fh->data) - fh->size);
105 static __be32 decode_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
108 unsigned int attrlen;
110 p = xdr_inline_decode(xdr, 4);
111 if (unlikely(p == NULL))
112 return htonl(NFS4ERR_RESOURCE);
114 p = xdr_inline_decode(xdr, attrlen << 2);
115 if (unlikely(p == NULL))
116 return htonl(NFS4ERR_RESOURCE);
117 if (likely(attrlen > 0))
118 bitmap[0] = ntohl(*p++);
120 bitmap[1] = ntohl(*p);
124 static __be32 decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
128 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
129 if (unlikely(p == NULL))
130 return htonl(NFS4ERR_RESOURCE);
131 memcpy(stateid->data, p, NFS4_STATEID_SIZE);
135 static __be32 decode_delegation_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
137 stateid->type = NFS4_DELEGATION_STATEID_TYPE;
138 return decode_stateid(xdr, stateid);
141 static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr)
146 status = decode_string(xdr, &hdr->taglen, &hdr->tag, CB_OP_TAGLEN_MAXSZ);
147 if (unlikely(status != 0))
149 p = xdr_inline_decode(xdr, 12);
150 if (unlikely(p == NULL))
151 return htonl(NFS4ERR_RESOURCE);
152 hdr->minorversion = ntohl(*p++);
153 /* Check for minor version support */
154 if (hdr->minorversion <= NFS4_MAX_MINOR_VERSION) {
155 hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 and v4.2 */
157 pr_warn_ratelimited("NFS: %s: NFSv4 server callback with "
158 "illegal minor version %u!\n",
159 __func__, hdr->minorversion);
160 return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
162 hdr->nops = ntohl(*p);
166 static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op)
169 p = xdr_inline_decode(xdr, 4);
170 if (unlikely(p == NULL))
171 return htonl(NFS4ERR_RESOURCE_HDR);
176 static __be32 decode_getattr_args(struct svc_rqst *rqstp,
177 struct xdr_stream *xdr, void *argp)
179 struct cb_getattrargs *args = argp;
182 status = decode_fh(xdr, &args->fh);
183 if (unlikely(status != 0))
185 return decode_bitmap(xdr, args->bitmap);
188 static __be32 decode_recall_args(struct svc_rqst *rqstp,
189 struct xdr_stream *xdr, void *argp)
191 struct cb_recallargs *args = argp;
195 status = decode_delegation_stateid(xdr, &args->stateid);
196 if (unlikely(status != 0))
198 p = xdr_inline_decode(xdr, 4);
199 if (unlikely(p == NULL))
200 return htonl(NFS4ERR_RESOURCE);
201 args->truncate = ntohl(*p);
202 return decode_fh(xdr, &args->fh);
205 #if defined(CONFIG_NFS_V4_1)
206 static __be32 decode_layout_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
208 stateid->type = NFS4_LAYOUT_STATEID_TYPE;
209 return decode_stateid(xdr, stateid);
212 static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
213 struct xdr_stream *xdr, void *argp)
215 struct cb_layoutrecallargs *args = argp;
220 p = xdr_inline_decode(xdr, 4 * sizeof(uint32_t));
221 if (unlikely(p == NULL))
222 return htonl(NFS4ERR_BADXDR);
224 args->cbl_layout_type = ntohl(*p++);
225 /* Depite the spec's xdr, iomode really belongs in the FILE switch,
226 * as it is unusable and ignored with the other types.
228 iomode = ntohl(*p++);
229 args->cbl_layoutchanged = ntohl(*p++);
230 args->cbl_recall_type = ntohl(*p++);
232 if (args->cbl_recall_type == RETURN_FILE) {
233 args->cbl_range.iomode = iomode;
234 status = decode_fh(xdr, &args->cbl_fh);
235 if (unlikely(status != 0))
238 p = xdr_inline_decode(xdr, 2 * sizeof(uint64_t));
239 if (unlikely(p == NULL))
240 return htonl(NFS4ERR_BADXDR);
241 p = xdr_decode_hyper(p, &args->cbl_range.offset);
242 p = xdr_decode_hyper(p, &args->cbl_range.length);
243 return decode_layout_stateid(xdr, &args->cbl_stateid);
244 } else if (args->cbl_recall_type == RETURN_FSID) {
245 p = xdr_inline_decode(xdr, 2 * sizeof(uint64_t));
246 if (unlikely(p == NULL))
247 return htonl(NFS4ERR_BADXDR);
248 p = xdr_decode_hyper(p, &args->cbl_fsid.major);
249 p = xdr_decode_hyper(p, &args->cbl_fsid.minor);
250 } else if (args->cbl_recall_type != RETURN_ALL)
251 return htonl(NFS4ERR_BADXDR);
256 __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
257 struct xdr_stream *xdr,
260 struct cb_devicenotifyargs *args = argp;
265 /* Num of device notifications */
266 p = xdr_inline_decode(xdr, sizeof(uint32_t));
267 if (unlikely(p == NULL)) {
268 status = htonl(NFS4ERR_BADXDR);
275 args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL);
277 status = htonl(NFS4ERR_DELAY);
281 /* Decode each dev notification */
282 for (i = 0; i < n; i++) {
283 struct cb_devicenotifyitem *dev = &args->devs[i];
285 p = xdr_inline_decode(xdr, (4 * sizeof(uint32_t)) +
286 NFS4_DEVICEID4_SIZE);
287 if (unlikely(p == NULL)) {
288 status = htonl(NFS4ERR_BADXDR);
292 tmp = ntohl(*p++); /* bitmap size */
294 status = htonl(NFS4ERR_INVAL);
297 dev->cbd_notify_type = ntohl(*p++);
298 if (dev->cbd_notify_type != NOTIFY_DEVICEID4_CHANGE &&
299 dev->cbd_notify_type != NOTIFY_DEVICEID4_DELETE) {
300 status = htonl(NFS4ERR_INVAL);
304 tmp = ntohl(*p++); /* opaque size */
305 if (((dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE) &&
306 (tmp != NFS4_DEVICEID4_SIZE + 8)) ||
307 ((dev->cbd_notify_type == NOTIFY_DEVICEID4_DELETE) &&
308 (tmp != NFS4_DEVICEID4_SIZE + 4))) {
309 status = htonl(NFS4ERR_INVAL);
312 dev->cbd_layout_type = ntohl(*p++);
313 memcpy(dev->cbd_dev_id.data, p, NFS4_DEVICEID4_SIZE);
314 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
316 if (dev->cbd_layout_type == NOTIFY_DEVICEID4_CHANGE) {
317 p = xdr_inline_decode(xdr, sizeof(uint32_t));
318 if (unlikely(p == NULL)) {
319 status = htonl(NFS4ERR_BADXDR);
322 dev->cbd_immediate = ntohl(*p++);
324 dev->cbd_immediate = 0;
327 dprintk("%s: type %d layout 0x%x immediate %d\n",
328 __func__, dev->cbd_notify_type, dev->cbd_layout_type,
332 dprintk("%s: ndevs %d\n", __func__, args->ndevs);
339 dprintk("%s: status %d ndevs %d\n",
340 __func__, ntohl(status), args->ndevs);
344 static __be32 decode_sessionid(struct xdr_stream *xdr,
345 struct nfs4_sessionid *sid)
349 p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN);
350 if (unlikely(p == NULL))
351 return htonl(NFS4ERR_RESOURCE);
353 memcpy(sid->data, p, NFS4_MAX_SESSIONID_LEN);
357 static __be32 decode_rc_list(struct xdr_stream *xdr,
358 struct referring_call_list *rc_list)
364 status = decode_sessionid(xdr, &rc_list->rcl_sessionid);
368 status = htonl(NFS4ERR_RESOURCE);
369 p = xdr_inline_decode(xdr, sizeof(uint32_t));
370 if (unlikely(p == NULL))
373 rc_list->rcl_nrefcalls = ntohl(*p++);
374 if (rc_list->rcl_nrefcalls) {
375 p = xdr_inline_decode(xdr,
376 rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
377 if (unlikely(p == NULL))
379 rc_list->rcl_refcalls = kmalloc_array(rc_list->rcl_nrefcalls,
380 sizeof(*rc_list->rcl_refcalls),
382 if (unlikely(rc_list->rcl_refcalls == NULL))
384 for (i = 0; i < rc_list->rcl_nrefcalls; i++) {
385 rc_list->rcl_refcalls[i].rc_sequenceid = ntohl(*p++);
386 rc_list->rcl_refcalls[i].rc_slotid = ntohl(*p++);
395 static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
396 struct xdr_stream *xdr,
399 struct cb_sequenceargs *args = argp;
404 status = decode_sessionid(xdr, &args->csa_sessionid);
408 p = xdr_inline_decode(xdr, 5 * sizeof(uint32_t));
409 if (unlikely(p == NULL))
410 return htonl(NFS4ERR_RESOURCE);
412 args->csa_addr = svc_addr(rqstp);
413 args->csa_sequenceid = ntohl(*p++);
414 args->csa_slotid = ntohl(*p++);
415 args->csa_highestslotid = ntohl(*p++);
416 args->csa_cachethis = ntohl(*p++);
417 args->csa_nrclists = ntohl(*p++);
418 args->csa_rclists = NULL;
419 if (args->csa_nrclists) {
420 args->csa_rclists = kmalloc_array(args->csa_nrclists,
421 sizeof(*args->csa_rclists),
423 if (unlikely(args->csa_rclists == NULL))
424 return htonl(NFS4ERR_RESOURCE);
426 for (i = 0; i < args->csa_nrclists; i++) {
427 status = decode_rc_list(xdr, &args->csa_rclists[i]);
429 args->csa_nrclists = i;
437 for (i = 0; i < args->csa_nrclists; i++)
438 kfree(args->csa_rclists[i].rcl_refcalls);
439 kfree(args->csa_rclists);
443 static __be32 decode_recallany_args(struct svc_rqst *rqstp,
444 struct xdr_stream *xdr,
447 struct cb_recallanyargs *args = argp;
451 p = xdr_inline_decode(xdr, 4);
452 if (unlikely(p == NULL))
453 return htonl(NFS4ERR_BADXDR);
454 args->craa_objs_to_keep = ntohl(*p++);
455 status = decode_bitmap(xdr, bitmap);
456 if (unlikely(status))
458 args->craa_type_mask = bitmap[0];
463 static __be32 decode_recallslot_args(struct svc_rqst *rqstp,
464 struct xdr_stream *xdr,
467 struct cb_recallslotargs *args = argp;
470 p = xdr_inline_decode(xdr, 4);
471 if (unlikely(p == NULL))
472 return htonl(NFS4ERR_BADXDR);
473 args->crsa_target_highest_slotid = ntohl(*p++);
477 static __be32 decode_lockowner(struct xdr_stream *xdr, struct cb_notify_lock_args *args)
482 p = xdr_inline_decode(xdr, 12);
483 if (unlikely(p == NULL))
484 return htonl(NFS4ERR_BADXDR);
486 p = xdr_decode_hyper(p, &args->cbnl_owner.clientid);
487 len = be32_to_cpu(*p);
489 p = xdr_inline_decode(xdr, len);
490 if (unlikely(p == NULL))
491 return htonl(NFS4ERR_BADXDR);
493 /* Only try to decode if the length is right */
495 p += 2; /* skip "lock id:" */
496 args->cbnl_owner.s_dev = be32_to_cpu(*p++);
497 xdr_decode_hyper(p, &args->cbnl_owner.id);
498 args->cbnl_valid = true;
500 args->cbnl_owner.s_dev = 0;
501 args->cbnl_owner.id = 0;
502 args->cbnl_valid = false;
507 static __be32 decode_notify_lock_args(struct svc_rqst *rqstp,
508 struct xdr_stream *xdr, void *argp)
510 struct cb_notify_lock_args *args = argp;
513 status = decode_fh(xdr, &args->cbnl_fh);
514 if (unlikely(status != 0))
516 return decode_lockowner(xdr, args);
519 #endif /* CONFIG_NFS_V4_1 */
520 #ifdef CONFIG_NFS_V4_2
521 static __be32 decode_write_response(struct xdr_stream *xdr,
522 struct cb_offloadargs *args)
526 /* skip the always zero field */
527 p = xdr_inline_decode(xdr, 4);
532 /* decode count, stable_how, verifier */
533 p = xdr_inline_decode(xdr, 8 + 4);
536 p = xdr_decode_hyper(p, &args->wr_count);
537 args->wr_writeverf.committed = be32_to_cpup(p);
538 p = xdr_inline_decode(xdr, NFS4_VERIFIER_SIZE);
540 memcpy(&args->wr_writeverf.verifier.data[0], p,
545 return htonl(NFS4ERR_RESOURCE);
548 static __be32 decode_offload_args(struct svc_rqst *rqstp,
549 struct xdr_stream *xdr,
552 struct cb_offloadargs *args = data;
557 status = decode_fh(xdr, &args->coa_fh);
558 if (unlikely(status != 0))
562 status = decode_stateid(xdr, &args->coa_stateid);
563 if (unlikely(status != 0))
567 p = xdr_inline_decode(xdr, 4);
570 args->error = ntohl(*p++);
572 status = decode_write_response(xdr, args);
573 if (unlikely(status != 0))
576 p = xdr_inline_decode(xdr, 8);
579 p = xdr_decode_hyper(p, &args->wr_count);
583 return htonl(NFS4ERR_RESOURCE);
585 #endif /* CONFIG_NFS_V4_2 */
586 static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
588 if (unlikely(xdr_stream_encode_opaque(xdr, str, len) < 0))
589 return cpu_to_be32(NFS4ERR_RESOURCE);
593 static __be32 encode_attr_bitmap(struct xdr_stream *xdr, const uint32_t *bitmap, size_t sz)
595 if (xdr_stream_encode_uint32_array(xdr, bitmap, sz) < 0)
596 return cpu_to_be32(NFS4ERR_RESOURCE);
600 static __be32 encode_attr_change(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t change)
604 if (!(bitmap[0] & FATTR4_WORD0_CHANGE))
606 p = xdr_reserve_space(xdr, 8);
608 return htonl(NFS4ERR_RESOURCE);
609 p = xdr_encode_hyper(p, change);
613 static __be32 encode_attr_size(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t size)
617 if (!(bitmap[0] & FATTR4_WORD0_SIZE))
619 p = xdr_reserve_space(xdr, 8);
621 return htonl(NFS4ERR_RESOURCE);
622 p = xdr_encode_hyper(p, size);
626 static __be32 encode_attr_time(struct xdr_stream *xdr, const struct timespec64 *time)
630 p = xdr_reserve_space(xdr, 12);
632 return htonl(NFS4ERR_RESOURCE);
633 p = xdr_encode_hyper(p, time->tv_sec);
634 *p = htonl(time->tv_nsec);
638 static __be32 encode_attr_ctime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec64 *time)
640 if (!(bitmap[1] & FATTR4_WORD1_TIME_METADATA))
642 return encode_attr_time(xdr,time);
645 static __be32 encode_attr_mtime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec64 *time)
647 if (!(bitmap[1] & FATTR4_WORD1_TIME_MODIFY))
649 return encode_attr_time(xdr,time);
652 static __be32 encode_compound_hdr_res(struct xdr_stream *xdr, struct cb_compound_hdr_res *hdr)
656 hdr->status = xdr_reserve_space(xdr, 4);
657 if (unlikely(hdr->status == NULL))
658 return htonl(NFS4ERR_RESOURCE);
659 status = encode_string(xdr, hdr->taglen, hdr->tag);
660 if (unlikely(status != 0))
662 hdr->nops = xdr_reserve_space(xdr, 4);
663 if (unlikely(hdr->nops == NULL))
664 return htonl(NFS4ERR_RESOURCE);
668 static __be32 encode_op_hdr(struct xdr_stream *xdr, uint32_t op, __be32 res)
672 p = xdr_reserve_space(xdr, 8);
673 if (unlikely(p == NULL))
674 return htonl(NFS4ERR_RESOURCE_HDR);
680 static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr,
683 const struct cb_getattrres *res = resp;
684 __be32 *savep = NULL;
685 __be32 status = res->status;
687 if (unlikely(status != 0))
689 status = encode_attr_bitmap(xdr, res->bitmap, ARRAY_SIZE(res->bitmap));
690 if (unlikely(status != 0))
692 status = cpu_to_be32(NFS4ERR_RESOURCE);
693 savep = xdr_reserve_space(xdr, sizeof(*savep));
694 if (unlikely(!savep))
696 status = encode_attr_change(xdr, res->bitmap, res->change_attr);
697 if (unlikely(status != 0))
699 status = encode_attr_size(xdr, res->bitmap, res->size);
700 if (unlikely(status != 0))
702 status = encode_attr_ctime(xdr, res->bitmap, &res->ctime);
703 if (unlikely(status != 0))
705 status = encode_attr_mtime(xdr, res->bitmap, &res->mtime);
706 *savep = htonl((unsigned int)((char *)xdr->p - (char *)(savep+1)));
711 #if defined(CONFIG_NFS_V4_1)
713 static __be32 encode_sessionid(struct xdr_stream *xdr,
714 const struct nfs4_sessionid *sid)
718 p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
719 if (unlikely(p == NULL))
720 return htonl(NFS4ERR_RESOURCE);
722 memcpy(p, sid, NFS4_MAX_SESSIONID_LEN);
726 static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp,
727 struct xdr_stream *xdr,
730 const struct cb_sequenceres *res = resp;
732 __be32 status = res->csr_status;
734 if (unlikely(status != 0))
737 status = encode_sessionid(xdr, &res->csr_sessionid);
741 p = xdr_reserve_space(xdr, 4 * sizeof(uint32_t));
742 if (unlikely(p == NULL))
743 return htonl(NFS4ERR_RESOURCE);
745 *p++ = htonl(res->csr_sequenceid);
746 *p++ = htonl(res->csr_slotid);
747 *p++ = htonl(res->csr_highestslotid);
748 *p++ = htonl(res->csr_target_highestslotid);
753 preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
755 if (op_nr == OP_CB_SEQUENCE) {
757 return htonl(NFS4ERR_SEQUENCE_POS);
760 return htonl(NFS4ERR_OP_NOT_IN_SESSION);
767 case OP_CB_RECALL_ANY:
768 case OP_CB_RECALL_SLOT:
769 case OP_CB_LAYOUTRECALL:
770 case OP_CB_NOTIFY_DEVICEID:
771 case OP_CB_NOTIFY_LOCK:
772 *op = &callback_ops[op_nr];
776 case OP_CB_PUSH_DELEG:
777 case OP_CB_RECALLABLE_OBJ_AVAIL:
778 case OP_CB_WANTS_CANCELLED:
779 return htonl(NFS4ERR_NOTSUPP);
782 return htonl(NFS4ERR_OP_ILLEGAL);
785 return htonl(NFS_OK);
788 static void nfs4_callback_free_slot(struct nfs4_session *session,
789 struct nfs4_slot *slot)
791 struct nfs4_slot_table *tbl = &session->bc_slot_table;
793 spin_lock(&tbl->slot_tbl_lock);
795 * Let the state manager know callback processing done.
796 * A single slot, so highest used slotid is either 0 or -1
798 nfs4_free_slot(tbl, slot);
799 spin_unlock(&tbl->slot_tbl_lock);
802 static void nfs4_cb_free_slot(struct cb_process_state *cps)
805 nfs4_callback_free_slot(cps->clp->cl_session, cps->slot);
810 #else /* CONFIG_NFS_V4_1 */
813 preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
815 return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
818 static void nfs4_cb_free_slot(struct cb_process_state *cps)
821 #endif /* CONFIG_NFS_V4_1 */
823 #ifdef CONFIG_NFS_V4_2
825 preprocess_nfs42_op(int nop, unsigned int op_nr, struct callback_op **op)
827 __be32 status = preprocess_nfs41_op(nop, op_nr, op);
828 if (status != htonl(NFS4ERR_OP_ILLEGAL))
831 if (op_nr == OP_CB_OFFLOAD) {
832 *op = &callback_ops[op_nr];
833 return htonl(NFS_OK);
835 return htonl(NFS4ERR_NOTSUPP);
836 return htonl(NFS4ERR_OP_ILLEGAL);
838 #else /* CONFIG_NFS_V4_2 */
840 preprocess_nfs42_op(int nop, unsigned int op_nr, struct callback_op **op)
842 return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
844 #endif /* CONFIG_NFS_V4_2 */
847 preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op)
852 *op = &callback_ops[op_nr];
855 return htonl(NFS4ERR_OP_ILLEGAL);
858 return htonl(NFS_OK);
861 static __be32 process_op(int nop, struct svc_rqst *rqstp,
862 struct cb_process_state *cps)
864 struct xdr_stream *xdr_out = &rqstp->rq_res_stream;
865 struct callback_op *op = &callback_ops[0];
871 status = decode_op_hdr(&rqstp->rq_arg_stream, &op_nr);
872 if (unlikely(status))
875 switch (cps->minorversion) {
877 status = preprocess_nfs4_op(op_nr, &op);
880 status = preprocess_nfs41_op(nop, op_nr, &op);
883 status = preprocess_nfs42_op(nop, op_nr, &op);
886 status = htonl(NFS4ERR_MINOR_VERS_MISMATCH);
889 if (status == htonl(NFS4ERR_OP_ILLEGAL))
890 op_nr = OP_CB_ILLEGAL;
894 if (cps->drc_status) {
895 status = cps->drc_status;
899 maxlen = xdr_out->end - xdr_out->p;
900 if (maxlen > 0 && maxlen < PAGE_SIZE) {
901 status = op->decode_args(rqstp, &rqstp->rq_arg_stream,
903 if (likely(status == 0))
904 status = op->process_op(rqstp->rq_argp, rqstp->rq_resp,
907 status = htonl(NFS4ERR_RESOURCE);
910 res = encode_op_hdr(xdr_out, op_nr, status);
913 if (op->encode_res != NULL && status == 0)
914 status = op->encode_res(rqstp, xdr_out, rqstp->rq_resp);
919 * Decode, process and encode a COMPOUND
921 static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
923 struct cb_compound_hdr_arg hdr_arg = { 0 };
924 struct cb_compound_hdr_res hdr_res = { NULL };
925 struct cb_process_state cps = {
928 .net = SVC_NET(rqstp),
930 unsigned int nops = 0;
933 status = decode_compound_hdr_arg(&rqstp->rq_arg_stream, &hdr_arg);
934 if (status == htonl(NFS4ERR_RESOURCE))
935 return rpc_garbage_args;
937 if (hdr_arg.minorversion == 0) {
938 cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
940 trace_nfs_cb_no_clp(rqstp->rq_xid, hdr_arg.cb_ident);
941 goto out_invalidcred;
943 if (!check_gss_callback_principal(cps.clp, rqstp)) {
944 trace_nfs_cb_badprinc(rqstp->rq_xid, hdr_arg.cb_ident);
945 nfs_put_client(cps.clp);
946 goto out_invalidcred;
950 cps.minorversion = hdr_arg.minorversion;
951 hdr_res.taglen = hdr_arg.taglen;
952 hdr_res.tag = hdr_arg.tag;
953 if (encode_compound_hdr_res(&rqstp->rq_res_stream, &hdr_res) != 0) {
955 nfs_put_client(cps.clp);
956 return rpc_system_err;
958 while (status == 0 && nops != hdr_arg.nops) {
959 status = process_op(nops, rqstp, &cps);
963 /* Buffer overflow in decode_ops_hdr or encode_ops_hdr. Return
964 * resource error in cb_compound status without returning op */
965 if (unlikely(status == htonl(NFS4ERR_RESOURCE_HDR))) {
966 status = htonl(NFS4ERR_RESOURCE);
970 if (svc_is_backchannel(rqstp) && cps.clp) {
971 rqstp->bc_to_initval = cps.clp->cl_rpcclient->cl_timeout->to_initval;
972 rqstp->bc_to_retries = cps.clp->cl_rpcclient->cl_timeout->to_retries;
975 *hdr_res.status = status;
976 *hdr_res.nops = htonl(nops);
977 nfs4_cb_free_slot(&cps);
978 nfs_put_client(cps.clp);
982 pr_warn_ratelimited("NFS: NFSv4 callback contains invalid cred\n");
983 rqstp->rq_auth_stat = rpc_autherr_badcred;
988 nfs_callback_dispatch(struct svc_rqst *rqstp)
990 const struct svc_procedure *procp = rqstp->rq_procinfo;
992 *rqstp->rq_accept_statp = procp->pc_func(rqstp);
997 * Define NFS4 callback COMPOUND ops.
999 static struct callback_op callback_ops[] = {
1001 .res_maxsize = CB_OP_HDR_RES_MAXSZ,
1004 .process_op = nfs4_callback_getattr,
1005 .decode_args = decode_getattr_args,
1006 .encode_res = encode_getattr_res,
1007 .res_maxsize = CB_OP_GETATTR_RES_MAXSZ,
1010 .process_op = nfs4_callback_recall,
1011 .decode_args = decode_recall_args,
1012 .res_maxsize = CB_OP_RECALL_RES_MAXSZ,
1014 #if defined(CONFIG_NFS_V4_1)
1015 [OP_CB_LAYOUTRECALL] = {
1016 .process_op = nfs4_callback_layoutrecall,
1017 .decode_args = decode_layoutrecall_args,
1018 .res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ,
1020 [OP_CB_NOTIFY_DEVICEID] = {
1021 .process_op = nfs4_callback_devicenotify,
1022 .decode_args = decode_devicenotify_args,
1023 .res_maxsize = CB_OP_DEVICENOTIFY_RES_MAXSZ,
1025 [OP_CB_SEQUENCE] = {
1026 .process_op = nfs4_callback_sequence,
1027 .decode_args = decode_cb_sequence_args,
1028 .encode_res = encode_cb_sequence_res,
1029 .res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ,
1031 [OP_CB_RECALL_ANY] = {
1032 .process_op = nfs4_callback_recallany,
1033 .decode_args = decode_recallany_args,
1034 .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ,
1036 [OP_CB_RECALL_SLOT] = {
1037 .process_op = nfs4_callback_recallslot,
1038 .decode_args = decode_recallslot_args,
1039 .res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ,
1041 [OP_CB_NOTIFY_LOCK] = {
1042 .process_op = nfs4_callback_notify_lock,
1043 .decode_args = decode_notify_lock_args,
1044 .res_maxsize = CB_OP_NOTIFY_LOCK_RES_MAXSZ,
1046 #endif /* CONFIG_NFS_V4_1 */
1047 #ifdef CONFIG_NFS_V4_2
1049 .process_op = nfs4_callback_offload,
1050 .decode_args = decode_offload_args,
1051 .res_maxsize = CB_OP_OFFLOAD_RES_MAXSZ,
1053 #endif /* CONFIG_NFS_V4_2 */
1057 * Define NFS4 callback procedures
1059 static const struct svc_procedure nfs4_callback_procedures1[] = {
1061 .pc_func = nfs4_callback_null,
1062 .pc_encode = nfs4_encode_void,
1067 .pc_func = nfs4_callback_compound,
1068 .pc_encode = nfs4_encode_void,
1072 .pc_xdrressize = NFS4_CALLBACK_BUFSIZE,
1073 .pc_name = "COMPOUND",
1077 static DEFINE_PER_CPU_ALIGNED(unsigned long,
1078 nfs4_callback_count1[ARRAY_SIZE(nfs4_callback_procedures1)]);
1079 const struct svc_version nfs4_callback_version1 = {
1081 .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
1082 .vs_proc = nfs4_callback_procedures1,
1083 .vs_count = nfs4_callback_count1,
1084 .vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
1085 .vs_dispatch = nfs_callback_dispatch,
1087 .vs_need_cong_ctrl = true,
1090 static DEFINE_PER_CPU_ALIGNED(unsigned long,
1091 nfs4_callback_count4[ARRAY_SIZE(nfs4_callback_procedures1)]);
1092 const struct svc_version nfs4_callback_version4 = {
1094 .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
1095 .vs_proc = nfs4_callback_procedures1,
1096 .vs_count = nfs4_callback_count4,
1097 .vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
1098 .vs_dispatch = nfs_callback_dispatch,
1100 .vs_need_cong_ctrl = true,