SUNRPC: handle GSS AUTH pipes by network namespace aware routines
[linux-2.6-block.git] / net / sunrpc / clnt.c
CommitLineData
1da177e4 1/*
55aa4f58 2 * linux/net/sunrpc/clnt.c
1da177e4
LT
3 *
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
7 *
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP connect handling.
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
15 *
1da177e4
LT
16 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
17 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
18 */
19
20#include <asm/system.h>
21
22#include <linux/module.h>
23#include <linux/types.h>
cb3997b5 24#include <linux/kallsyms.h>
1da177e4 25#include <linux/mm.h>
23ac6581
TM
26#include <linux/namei.h>
27#include <linux/mount.h>
1da177e4 28#include <linux/slab.h>
1da177e4 29#include <linux/utsname.h>
11c556b3 30#include <linux/workqueue.h>
176e21ee 31#include <linux/in.h>
510deb0d 32#include <linux/in6.h>
176e21ee 33#include <linux/un.h>
1da177e4
LT
34
35#include <linux/sunrpc/clnt.h>
1da177e4 36#include <linux/sunrpc/rpc_pipe_fs.h>
11c556b3 37#include <linux/sunrpc/metrics.h>
55ae1aab 38#include <linux/sunrpc/bc_xprt.h>
1da177e4 39
55ae1aab 40#include "sunrpc.h"
1da177e4 41
1da177e4
LT
42#ifdef RPC_DEBUG
43# define RPCDBG_FACILITY RPCDBG_CALL
44#endif
45
46121cf7
CL
46#define dprint_status(t) \
47 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \
0dc47877 48 __func__, t->tk_status)
46121cf7 49
188fef11
TM
50/*
51 * All RPC clients are linked into this list
52 */
53static LIST_HEAD(all_clients);
54static DEFINE_SPINLOCK(rpc_client_lock);
55
1da177e4
LT
56static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
57
58
59static void call_start(struct rpc_task *task);
60static void call_reserve(struct rpc_task *task);
61static void call_reserveresult(struct rpc_task *task);
62static void call_allocate(struct rpc_task *task);
1da177e4
LT
63static void call_decode(struct rpc_task *task);
64static void call_bind(struct rpc_task *task);
da351878 65static void call_bind_status(struct rpc_task *task);
1da177e4 66static void call_transmit(struct rpc_task *task);
9e00abc3 67#if defined(CONFIG_SUNRPC_BACKCHANNEL)
55ae1aab 68static void call_bc_transmit(struct rpc_task *task);
9e00abc3 69#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1da177e4 70static void call_status(struct rpc_task *task);
940e3318 71static void call_transmit_status(struct rpc_task *task);
1da177e4
LT
72static void call_refresh(struct rpc_task *task);
73static void call_refreshresult(struct rpc_task *task);
74static void call_timeout(struct rpc_task *task);
75static void call_connect(struct rpc_task *task);
76static void call_connect_status(struct rpc_task *task);
1da177e4 77
b0e1c57e
CL
78static __be32 *rpc_encode_header(struct rpc_task *task);
79static __be32 *rpc_verify_header(struct rpc_task *task);
caabea8a 80static int rpc_ping(struct rpc_clnt *clnt);
64c91a1f 81
188fef11
TM
82static void rpc_register_client(struct rpc_clnt *clnt)
83{
84 spin_lock(&rpc_client_lock);
85 list_add(&clnt->cl_clients, &all_clients);
86 spin_unlock(&rpc_client_lock);
87}
88
89static void rpc_unregister_client(struct rpc_clnt *clnt)
90{
91 spin_lock(&rpc_client_lock);
92 list_del(&clnt->cl_clients);
93 spin_unlock(&rpc_client_lock);
94}
1da177e4 95
0157d021
SK
96static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
97{
98 if (clnt->cl_path.dentry)
99 rpc_remove_client_dir(clnt->cl_path.dentry);
100 clnt->cl_path.dentry = NULL;
101}
102
103static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
104{
105 struct super_block *pipefs_sb;
106 int put_mnt = 0;
107
108 pipefs_sb = rpc_get_sb_net(clnt->cl_xprt->xprt_net);
109 if (pipefs_sb) {
110 if (clnt->cl_path.dentry)
111 put_mnt = 1;
112 __rpc_clnt_remove_pipedir(clnt);
113 rpc_put_sb_net(clnt->cl_xprt->xprt_net);
114 }
115 if (put_mnt)
116 rpc_put_mount();
117}
118
119static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
120 struct rpc_clnt *clnt, char *dir_name)
1da177e4 121{
f134585a 122 static uint32_t clntid;
23ac6581
TM
123 char name[15];
124 struct qstr q = {
125 .name = name,
126 };
0157d021 127 struct dentry *dir, *dentry;
1da177e4
LT
128 int error;
129
0157d021
SK
130 dir = rpc_d_lookup_sb(sb, dir_name);
131 if (dir == NULL)
132 return dir;
f134585a 133 for (;;) {
23ac6581
TM
134 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
135 name[sizeof(name) - 1] = '\0';
136 q.hash = full_name_hash(q.name, q.len);
0157d021
SK
137 dentry = rpc_create_client_dir(dir, &q, clnt);
138 if (!IS_ERR(dentry))
23ac6581 139 break;
0157d021 140 error = PTR_ERR(dentry);
f134585a 141 if (error != -EEXIST) {
23ac6581
TM
142 printk(KERN_INFO "RPC: Couldn't create pipefs entry"
143 " %s/%s, error %d\n",
144 dir_name, name, error);
0157d021 145 break;
f134585a 146 }
1da177e4 147 }
0157d021
SK
148 dput(dir);
149 return dentry;
150}
151
152static int
153rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
154{
155 struct super_block *pipefs_sb;
156 struct path path;
157
158 clnt->cl_path.mnt = ERR_PTR(-ENOENT);
159 clnt->cl_path.dentry = NULL;
160 if (dir_name == NULL)
161 return 0;
162
163 path.mnt = rpc_get_mount();
164 if (IS_ERR(path.mnt))
165 return PTR_ERR(path.mnt);
166 pipefs_sb = rpc_get_sb_net(clnt->cl_xprt->xprt_net);
167 if (!pipefs_sb) {
168 rpc_put_mount();
169 return -ENOENT;
170 }
171 path.dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt, dir_name);
172 rpc_put_sb_net(clnt->cl_xprt->xprt_net);
173 if (IS_ERR(path.dentry)) {
174 rpc_put_mount();
175 return PTR_ERR(path.dentry);
176 }
23ac6581
TM
177 clnt->cl_path = path;
178 return 0;
1da177e4
LT
179}
180
698b6d08 181static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
1da177e4 182{
698b6d08 183 struct rpc_program *program = args->program;
1da177e4
LT
184 struct rpc_version *version;
185 struct rpc_clnt *clnt = NULL;
6a19275a 186 struct rpc_auth *auth;
1da177e4 187 int err;
06b8d255
CL
188 size_t len;
189
190 /* sanity check the name before trying to print it */
191 err = -EINVAL;
698b6d08 192 len = strlen(args->servername);
06b8d255
CL
193 if (len > RPC_MAXNETNAMELEN)
194 goto out_no_rpciod;
195 len++;
1da177e4 196
46121cf7 197 dprintk("RPC: creating %s client for %s (xprt %p)\n",
698b6d08 198 program->name, args->servername, xprt);
1da177e4 199
4ada539e
TM
200 err = rpciod_up();
201 if (err)
202 goto out_no_rpciod;
1da177e4
LT
203 err = -EINVAL;
204 if (!xprt)
712917d1 205 goto out_no_xprt;
698b6d08
TM
206
207 if (args->version >= program->nrvers)
208 goto out_err;
209 version = program->version[args->version];
210 if (version == NULL)
1da177e4
LT
211 goto out_err;
212
213 err = -ENOMEM;
0da974f4 214 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
1da177e4
LT
215 if (!clnt)
216 goto out_err;
1da177e4
LT
217 clnt->cl_parent = clnt;
218
219 clnt->cl_server = clnt->cl_inline_name;
1da177e4
LT
220 if (len > sizeof(clnt->cl_inline_name)) {
221 char *buf = kmalloc(len, GFP_KERNEL);
698b6d08 222 if (buf != NULL)
1da177e4
LT
223 clnt->cl_server = buf;
224 else
225 len = sizeof(clnt->cl_inline_name);
226 }
698b6d08 227 strlcpy(clnt->cl_server, args->servername, len);
1da177e4
LT
228
229 clnt->cl_xprt = xprt;
230 clnt->cl_procinfo = version->procs;
231 clnt->cl_maxproc = version->nrprocs;
232 clnt->cl_protname = program->name;
d5b337b4 233 clnt->cl_prog = args->prognumber ? : program->number;
1da177e4 234 clnt->cl_vers = version->number;
1da177e4 235 clnt->cl_stats = program->stats;
11c556b3 236 clnt->cl_metrics = rpc_alloc_iostats(clnt);
23bf85ba
TM
237 err = -ENOMEM;
238 if (clnt->cl_metrics == NULL)
239 goto out_no_stats;
3e32a5d9 240 clnt->cl_program = program;
6529eba0 241 INIT_LIST_HEAD(&clnt->cl_tasks);
4bef61ff 242 spin_lock_init(&clnt->cl_lock);
1da177e4 243
ec739ef0 244 if (!xprt_bound(clnt->cl_xprt))
1da177e4
LT
245 clnt->cl_autobind = 1;
246
ba7392bb
TM
247 clnt->cl_timeout = xprt->timeout;
248 if (args->timeout != NULL) {
249 memcpy(&clnt->cl_timeout_default, args->timeout,
250 sizeof(clnt->cl_timeout_default));
251 clnt->cl_timeout = &clnt->cl_timeout_default;
252 }
253
1da177e4 254 clnt->cl_rtt = &clnt->cl_rtt_default;
ba7392bb 255 rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
608207e8
OK
256 clnt->cl_principal = NULL;
257 if (args->client_name) {
258 clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL);
259 if (!clnt->cl_principal)
260 goto out_no_principal;
261 }
1da177e4 262
006abe88 263 atomic_set(&clnt->cl_count, 1);
34f52e35 264
1da177e4
LT
265 err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
266 if (err < 0)
267 goto out_no_path;
268
698b6d08 269 auth = rpcauth_create(args->authflavor, clnt);
6a19275a 270 if (IS_ERR(auth)) {
1da177e4 271 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
698b6d08 272 args->authflavor);
6a19275a 273 err = PTR_ERR(auth);
1da177e4
LT
274 goto out_no_auth;
275 }
276
277 /* save the nodename */
63ffc23d 278 clnt->cl_nodelen = strlen(init_utsname()->nodename);
1da177e4
LT
279 if (clnt->cl_nodelen > UNX_MAXNODENAME)
280 clnt->cl_nodelen = UNX_MAXNODENAME;
63ffc23d 281 memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
6529eba0 282 rpc_register_client(clnt);
1da177e4
LT
283 return clnt;
284
285out_no_auth:
0157d021 286 rpc_clnt_remove_pipedir(clnt);
1da177e4 287out_no_path:
608207e8
OK
288 kfree(clnt->cl_principal);
289out_no_principal:
23bf85ba
TM
290 rpc_free_iostats(clnt->cl_metrics);
291out_no_stats:
1da177e4
LT
292 if (clnt->cl_server != clnt->cl_inline_name)
293 kfree(clnt->cl_server);
294 kfree(clnt);
295out_err:
6b6ca86b 296 xprt_put(xprt);
712917d1 297out_no_xprt:
4ada539e
TM
298 rpciod_down();
299out_no_rpciod:
1da177e4
LT
300 return ERR_PTR(err);
301}
302
c2866763
CL
303/*
304 * rpc_create - create an RPC client and transport with one call
305 * @args: rpc_clnt create argument structure
306 *
307 * Creates and initializes an RPC transport and an RPC client.
308 *
309 * It can ping the server in order to determine if it is up, and to see if
310 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables
311 * this behavior so asynchronous tasks can also use rpc_create.
312 */
313struct rpc_clnt *rpc_create(struct rpc_create_args *args)
314{
315 struct rpc_xprt *xprt;
316 struct rpc_clnt *clnt;
3c341b0b 317 struct xprt_create xprtargs = {
9a23e332 318 .net = args->net,
4fa016eb 319 .ident = args->protocol,
d3bc9a1d 320 .srcaddr = args->saddress,
96802a09
FM
321 .dstaddr = args->address,
322 .addrlen = args->addrsize,
f300baba 323 .bc_xprt = args->bc_xprt,
96802a09 324 };
510deb0d 325 char servername[48];
c2866763 326
43780b87
CL
327 /*
328 * If the caller chooses not to specify a hostname, whip
329 * up a string representation of the passed-in address.
330 */
331 if (args->servername == NULL) {
176e21ee
CL
332 struct sockaddr_un *sun =
333 (struct sockaddr_un *)args->address;
da09eb93
CL
334 struct sockaddr_in *sin =
335 (struct sockaddr_in *)args->address;
336 struct sockaddr_in6 *sin6 =
337 (struct sockaddr_in6 *)args->address;
338
510deb0d
CL
339 servername[0] = '\0';
340 switch (args->address->sa_family) {
176e21ee
CL
341 case AF_LOCAL:
342 snprintf(servername, sizeof(servername), "%s",
343 sun->sun_path);
344 break;
da09eb93 345 case AF_INET:
21454aaa
HH
346 snprintf(servername, sizeof(servername), "%pI4",
347 &sin->sin_addr.s_addr);
510deb0d 348 break;
da09eb93 349 case AF_INET6:
5b095d98 350 snprintf(servername, sizeof(servername), "%pI6",
da09eb93 351 &sin6->sin6_addr);
510deb0d 352 break;
510deb0d
CL
353 default:
354 /* caller wants default server name, but
355 * address family isn't recognized. */
356 return ERR_PTR(-EINVAL);
357 }
43780b87
CL
358 args->servername = servername;
359 }
360
510deb0d
CL
361 xprt = xprt_create_transport(&xprtargs);
362 if (IS_ERR(xprt))
363 return (struct rpc_clnt *)xprt;
364
c2866763
CL
365 /*
366 * By default, kernel RPC client connects from a reserved port.
367 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
368 * but it is always enabled for rpciod, which handles the connect
369 * operation.
370 */
371 xprt->resvport = 1;
372 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
373 xprt->resvport = 0;
374
698b6d08 375 clnt = rpc_new_client(args, xprt);
c2866763
CL
376 if (IS_ERR(clnt))
377 return clnt;
378
379 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
caabea8a 380 int err = rpc_ping(clnt);
c2866763
CL
381 if (err != 0) {
382 rpc_shutdown_client(clnt);
383 return ERR_PTR(err);
384 }
385 }
386
387 clnt->cl_softrtry = 1;
388 if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
389 clnt->cl_softrtry = 0;
390
c2866763
CL
391 if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
392 clnt->cl_autobind = 1;
43d78ef2
CL
393 if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
394 clnt->cl_discrtry = 1;
b6b6152c
OK
395 if (!(args->flags & RPC_CLNT_CREATE_QUIET))
396 clnt->cl_chatty = 1;
c2866763
CL
397
398 return clnt;
399}
b86acd50 400EXPORT_SYMBOL_GPL(rpc_create);
c2866763 401
1da177e4
LT
402/*
403 * This function clones the RPC client structure. It allows us to share the
404 * same transport while varying parameters such as the authentication
405 * flavour.
406 */
407struct rpc_clnt *
408rpc_clone_client(struct rpc_clnt *clnt)
409{
410 struct rpc_clnt *new;
3e32a5d9 411 int err = -ENOMEM;
1da177e4 412
e69062b4 413 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
1da177e4
LT
414 if (!new)
415 goto out_no_clnt;
d431a555
TM
416 new->cl_parent = clnt;
417 /* Turn off autobind on clones */
418 new->cl_autobind = 0;
419 INIT_LIST_HEAD(&new->cl_tasks);
420 spin_lock_init(&new->cl_lock);
ba7392bb 421 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval);
23bf85ba
TM
422 new->cl_metrics = rpc_alloc_iostats(clnt);
423 if (new->cl_metrics == NULL)
424 goto out_no_stats;
608207e8
OK
425 if (clnt->cl_principal) {
426 new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL);
427 if (new->cl_principal == NULL)
428 goto out_no_principal;
429 }
006abe88 430 atomic_set(&new->cl_count, 1);
3e32a5d9
TM
431 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
432 if (err != 0)
433 goto out_no_path;
1da177e4
LT
434 if (new->cl_auth)
435 atomic_inc(&new->cl_auth->au_count);
d431a555 436 xprt_get(clnt->cl_xprt);
006abe88 437 atomic_inc(&clnt->cl_count);
6529eba0 438 rpc_register_client(new);
4ada539e 439 rpciod_up();
1da177e4 440 return new;
3e32a5d9 441out_no_path:
608207e8
OK
442 kfree(new->cl_principal);
443out_no_principal:
3e32a5d9 444 rpc_free_iostats(new->cl_metrics);
23bf85ba
TM
445out_no_stats:
446 kfree(new);
1da177e4 447out_no_clnt:
0dc47877 448 dprintk("RPC: %s: returned error %d\n", __func__, err);
3e32a5d9 449 return ERR_PTR(err);
1da177e4 450}
e8914c65 451EXPORT_SYMBOL_GPL(rpc_clone_client);
1da177e4 452
58f9612c
TM
453/*
454 * Kill all tasks for the given client.
455 * XXX: kill their descendants as well?
456 */
457void rpc_killall_tasks(struct rpc_clnt *clnt)
458{
459 struct rpc_task *rovr;
460
461
462 if (list_empty(&clnt->cl_tasks))
463 return;
464 dprintk("RPC: killing all tasks for client %p\n", clnt);
465 /*
466 * Spin lock all_tasks to prevent changes...
467 */
468 spin_lock(&clnt->cl_lock);
469 list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
470 if (!RPC_IS_ACTIVATED(rovr))
471 continue;
472 if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
473 rovr->tk_flags |= RPC_TASK_KILLED;
474 rpc_exit(rovr, -EIO);
8e26de23
SK
475 if (RPC_IS_QUEUED(rovr))
476 rpc_wake_up_queued_task(rovr->tk_waitqueue,
477 rovr);
58f9612c
TM
478 }
479 }
480 spin_unlock(&clnt->cl_lock);
481}
482EXPORT_SYMBOL_GPL(rpc_killall_tasks);
483
1da177e4
LT
484/*
485 * Properly shut down an RPC client, terminating all outstanding
90c5755f 486 * requests.
1da177e4 487 */
4c402b40 488void rpc_shutdown_client(struct rpc_clnt *clnt)
1da177e4 489{
34f52e35
TM
490 dprintk("RPC: shutting down %s client for %s\n",
491 clnt->cl_protname, clnt->cl_server);
1da177e4 492
34f52e35 493 while (!list_empty(&clnt->cl_tasks)) {
1da177e4 494 rpc_killall_tasks(clnt);
532347e2 495 wait_event_timeout(destroy_wait,
34f52e35 496 list_empty(&clnt->cl_tasks), 1*HZ);
1da177e4
LT
497 }
498
4c402b40 499 rpc_release_client(clnt);
1da177e4 500}
e8914c65 501EXPORT_SYMBOL_GPL(rpc_shutdown_client);
1da177e4
LT
502
503/*
34f52e35 504 * Free an RPC client
1da177e4 505 */
34f52e35 506static void
006abe88 507rpc_free_client(struct rpc_clnt *clnt)
1da177e4 508{
46121cf7 509 dprintk("RPC: destroying %s client for %s\n",
1da177e4 510 clnt->cl_protname, clnt->cl_server);
0157d021 511 rpc_clnt_remove_pipedir(clnt);
3e32a5d9 512 if (clnt->cl_parent != clnt) {
8ad7c892 513 rpc_release_client(clnt->cl_parent);
3e32a5d9
TM
514 goto out_free;
515 }
1da177e4
LT
516 if (clnt->cl_server != clnt->cl_inline_name)
517 kfree(clnt->cl_server);
518out_free:
6529eba0 519 rpc_unregister_client(clnt);
11c556b3 520 rpc_free_iostats(clnt->cl_metrics);
608207e8 521 kfree(clnt->cl_principal);
11c556b3 522 clnt->cl_metrics = NULL;
6b6ca86b 523 xprt_put(clnt->cl_xprt);
4ada539e 524 rpciod_down();
1da177e4 525 kfree(clnt);
1da177e4
LT
526}
527
1dd17ec6
TM
528/*
529 * Free an RPC client
530 */
531static void
006abe88 532rpc_free_auth(struct rpc_clnt *clnt)
1dd17ec6 533{
1dd17ec6 534 if (clnt->cl_auth == NULL) {
006abe88 535 rpc_free_client(clnt);
1dd17ec6
TM
536 return;
537 }
538
539 /*
540 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
541 * release remaining GSS contexts. This mechanism ensures
542 * that it can do so safely.
543 */
006abe88 544 atomic_inc(&clnt->cl_count);
1dd17ec6
TM
545 rpcauth_release(clnt->cl_auth);
546 clnt->cl_auth = NULL;
006abe88
TM
547 if (atomic_dec_and_test(&clnt->cl_count))
548 rpc_free_client(clnt);
1dd17ec6
TM
549}
550
1da177e4 551/*
34f52e35 552 * Release reference to the RPC client
1da177e4
LT
553 */
554void
555rpc_release_client(struct rpc_clnt *clnt)
556{
34f52e35 557 dprintk("RPC: rpc_release_client(%p)\n", clnt);
1da177e4 558
34f52e35
TM
559 if (list_empty(&clnt->cl_tasks))
560 wake_up(&destroy_wait);
006abe88
TM
561 if (atomic_dec_and_test(&clnt->cl_count))
562 rpc_free_auth(clnt);
34f52e35
TM
563}
564
007e251f
AG
565/**
566 * rpc_bind_new_program - bind a new RPC program to an existing client
65b6e42c
RD
567 * @old: old rpc_client
568 * @program: rpc program to set
569 * @vers: rpc program version
007e251f
AG
570 *
571 * Clones the rpc client and sets up a new RPC program. This is mainly
572 * of use for enabling different RPC programs to share the same transport.
573 * The Sun NFSv2/v3 ACL protocol can do this.
574 */
575struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
576 struct rpc_program *program,
89eb21c3 577 u32 vers)
007e251f
AG
578{
579 struct rpc_clnt *clnt;
580 struct rpc_version *version;
581 int err;
582
583 BUG_ON(vers >= program->nrvers || !program->version[vers]);
584 version = program->version[vers];
585 clnt = rpc_clone_client(old);
586 if (IS_ERR(clnt))
587 goto out;
588 clnt->cl_procinfo = version->procs;
589 clnt->cl_maxproc = version->nrprocs;
590 clnt->cl_protname = program->name;
591 clnt->cl_prog = program->number;
592 clnt->cl_vers = version->number;
593 clnt->cl_stats = program->stats;
caabea8a 594 err = rpc_ping(clnt);
007e251f
AG
595 if (err != 0) {
596 rpc_shutdown_client(clnt);
597 clnt = ERR_PTR(err);
598 }
cca5172a 599out:
007e251f
AG
600 return clnt;
601}
e8914c65 602EXPORT_SYMBOL_GPL(rpc_bind_new_program);
007e251f 603
58f9612c
TM
604void rpc_task_release_client(struct rpc_task *task)
605{
606 struct rpc_clnt *clnt = task->tk_client;
607
608 if (clnt != NULL) {
609 /* Remove from client task list */
610 spin_lock(&clnt->cl_lock);
611 list_del(&task->tk_task);
612 spin_unlock(&clnt->cl_lock);
613 task->tk_client = NULL;
614
615 rpc_release_client(clnt);
616 }
617}
618
619static
620void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
621{
622 if (clnt != NULL) {
623 rpc_task_release_client(task);
624 task->tk_client = clnt;
006abe88 625 atomic_inc(&clnt->cl_count);
58f9612c
TM
626 if (clnt->cl_softrtry)
627 task->tk_flags |= RPC_TASK_SOFT;
628 /* Add to the client's list of all tasks */
629 spin_lock(&clnt->cl_lock);
630 list_add_tail(&task->tk_task, &clnt->cl_tasks);
631 spin_unlock(&clnt->cl_lock);
632 }
633}
634
cbdabc7f
AA
635void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt)
636{
637 rpc_task_release_client(task);
638 rpc_task_set_client(task, clnt);
639}
640EXPORT_SYMBOL_GPL(rpc_task_reset_client);
641
642
58f9612c
TM
643static void
644rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
645{
646 if (msg != NULL) {
647 task->tk_msg.rpc_proc = msg->rpc_proc;
648 task->tk_msg.rpc_argp = msg->rpc_argp;
649 task->tk_msg.rpc_resp = msg->rpc_resp;
a17c2153
TM
650 if (msg->rpc_cred != NULL)
651 task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
58f9612c
TM
652 }
653}
654
1da177e4
LT
655/*
656 * Default callback for async RPC calls
657 */
658static void
963d8fe5 659rpc_default_callback(struct rpc_task *task, void *data)
1da177e4
LT
660{
661}
662
963d8fe5
TM
663static const struct rpc_call_ops rpc_default_ops = {
664 .rpc_call_done = rpc_default_callback,
665};
666
c970aa85
TM
667/**
668 * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
669 * @task_setup_data: pointer to task initialisation data
670 */
671struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
6e5b70e9 672{
19445b99 673 struct rpc_task *task;
6e5b70e9 674
84115e1c 675 task = rpc_new_task(task_setup_data);
19445b99 676 if (IS_ERR(task))
50859259 677 goto out;
6e5b70e9 678
58f9612c
TM
679 rpc_task_set_client(task, task_setup_data->rpc_client);
680 rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
681
58f9612c
TM
682 if (task->tk_action == NULL)
683 rpc_call_start(task);
684
6e5b70e9
TM
685 atomic_inc(&task->tk_count);
686 rpc_execute(task);
6e5b70e9 687out:
19445b99 688 return task;
6e5b70e9 689}
c970aa85 690EXPORT_SYMBOL_GPL(rpc_run_task);
6e5b70e9
TM
691
692/**
693 * rpc_call_sync - Perform a synchronous RPC call
694 * @clnt: pointer to RPC client
695 * @msg: RPC call parameters
696 * @flags: RPC call flags
1da177e4 697 */
cbc20059 698int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
1da177e4
LT
699{
700 struct rpc_task *task;
84115e1c
TM
701 struct rpc_task_setup task_setup_data = {
702 .rpc_client = clnt,
703 .rpc_message = msg,
704 .callback_ops = &rpc_default_ops,
705 .flags = flags,
706 };
6e5b70e9 707 int status;
1da177e4 708
1da177e4
LT
709 BUG_ON(flags & RPC_TASK_ASYNC);
710
c970aa85 711 task = rpc_run_task(&task_setup_data);
6e5b70e9
TM
712 if (IS_ERR(task))
713 return PTR_ERR(task);
e60859ac 714 status = task->tk_status;
bde8f00c 715 rpc_put_task(task);
1da177e4
LT
716 return status;
717}
e8914c65 718EXPORT_SYMBOL_GPL(rpc_call_sync);
1da177e4 719
6e5b70e9
TM
720/**
721 * rpc_call_async - Perform an asynchronous RPC call
722 * @clnt: pointer to RPC client
723 * @msg: RPC call parameters
724 * @flags: RPC call flags
65b6e42c 725 * @tk_ops: RPC call ops
6e5b70e9 726 * @data: user call data
1da177e4
LT
727 */
728int
cbc20059 729rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
963d8fe5 730 const struct rpc_call_ops *tk_ops, void *data)
1da177e4
LT
731{
732 struct rpc_task *task;
84115e1c
TM
733 struct rpc_task_setup task_setup_data = {
734 .rpc_client = clnt,
735 .rpc_message = msg,
736 .callback_ops = tk_ops,
737 .callback_data = data,
738 .flags = flags|RPC_TASK_ASYNC,
739 };
1da177e4 740
c970aa85 741 task = rpc_run_task(&task_setup_data);
6e5b70e9
TM
742 if (IS_ERR(task))
743 return PTR_ERR(task);
744 rpc_put_task(task);
745 return 0;
1da177e4 746}
e8914c65 747EXPORT_SYMBOL_GPL(rpc_call_async);
1da177e4 748
9e00abc3 749#if defined(CONFIG_SUNRPC_BACKCHANNEL)
55ae1aab
RL
750/**
751 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
752 * rpc_execute against it
7a73fdde
JSR
753 * @req: RPC request
754 * @tk_ops: RPC call ops
55ae1aab
RL
755 */
756struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
7a73fdde 757 const struct rpc_call_ops *tk_ops)
55ae1aab
RL
758{
759 struct rpc_task *task;
760 struct xdr_buf *xbufp = &req->rq_snd_buf;
761 struct rpc_task_setup task_setup_data = {
762 .callback_ops = tk_ops,
763 };
764
765 dprintk("RPC: rpc_run_bc_task req= %p\n", req);
766 /*
767 * Create an rpc_task to send the data
768 */
769 task = rpc_new_task(&task_setup_data);
19445b99 770 if (IS_ERR(task)) {
55ae1aab
RL
771 xprt_free_bc_request(req);
772 goto out;
773 }
774 task->tk_rqstp = req;
775
776 /*
777 * Set up the xdr_buf length.
778 * This also indicates that the buffer is XDR encoded already.
779 */
780 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
781 xbufp->tail[0].iov_len;
782
783 task->tk_action = call_bc_transmit;
784 atomic_inc(&task->tk_count);
785 BUG_ON(atomic_read(&task->tk_count) != 2);
786 rpc_execute(task);
787
788out:
789 dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
790 return task;
791}
9e00abc3 792#endif /* CONFIG_SUNRPC_BACKCHANNEL */
55ae1aab 793
77de2c59
TM
794void
795rpc_call_start(struct rpc_task *task)
796{
797 task->tk_action = call_start;
798}
799EXPORT_SYMBOL_GPL(rpc_call_start);
800
ed39440a
CL
801/**
802 * rpc_peeraddr - extract remote peer address from clnt's xprt
803 * @clnt: RPC client structure
804 * @buf: target buffer
65b6e42c 805 * @bufsize: length of target buffer
ed39440a
CL
806 *
807 * Returns the number of bytes that are actually in the stored address.
808 */
809size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
810{
811 size_t bytes;
812 struct rpc_xprt *xprt = clnt->cl_xprt;
813
814 bytes = sizeof(xprt->addr);
815 if (bytes > bufsize)
816 bytes = bufsize;
817 memcpy(buf, &clnt->cl_xprt->addr, bytes);
c4efcb1d 818 return xprt->addrlen;
ed39440a 819}
b86acd50 820EXPORT_SYMBOL_GPL(rpc_peeraddr);
ed39440a 821
f425eba4
CL
822/**
823 * rpc_peeraddr2str - return remote peer address in printable format
824 * @clnt: RPC client structure
825 * @format: address format
826 *
827 */
b454ae90
CL
828const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
829 enum rpc_display_format_t format)
f425eba4
CL
830{
831 struct rpc_xprt *xprt = clnt->cl_xprt;
7559c7a2
CL
832
833 if (xprt->address_strings[format] != NULL)
834 return xprt->address_strings[format];
835 else
836 return "unprintable";
f425eba4 837}
b86acd50 838EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
f425eba4 839
1da177e4
LT
840void
841rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
842{
843 struct rpc_xprt *xprt = clnt->cl_xprt;
470056c2
CL
844 if (xprt->ops->set_buffer_size)
845 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
1da177e4 846}
e8914c65 847EXPORT_SYMBOL_GPL(rpc_setbufsize);
1da177e4
LT
848
849/*
850 * Return size of largest payload RPC client can support, in bytes
851 *
852 * For stream transports, this is one RPC record fragment (see RFC
853 * 1831), as we don't support multi-record requests yet. For datagram
854 * transports, this is the size of an IP packet minus the IP, UDP, and
855 * RPC header sizes.
856 */
857size_t rpc_max_payload(struct rpc_clnt *clnt)
858{
859 return clnt->cl_xprt->max_payload;
860}
b86acd50 861EXPORT_SYMBOL_GPL(rpc_max_payload);
1da177e4 862
35f5a422
CL
863/**
864 * rpc_force_rebind - force transport to check that remote port is unchanged
865 * @clnt: client to rebind
866 *
867 */
868void rpc_force_rebind(struct rpc_clnt *clnt)
869{
870 if (clnt->cl_autobind)
ec739ef0 871 xprt_clear_bound(clnt->cl_xprt);
35f5a422 872}
b86acd50 873EXPORT_SYMBOL_GPL(rpc_force_rebind);
35f5a422 874
aae2006e
AA
875/*
876 * Restart an (async) RPC call from the call_prepare state.
877 * Usually called from within the exit handler.
878 */
f1f88fc7 879int
aae2006e
AA
880rpc_restart_call_prepare(struct rpc_task *task)
881{
882 if (RPC_ASSASSINATED(task))
f1f88fc7 883 return 0;
d00c5d43
TM
884 task->tk_action = call_start;
885 if (task->tk_ops->rpc_call_prepare != NULL)
886 task->tk_action = rpc_prepare_task;
f1f88fc7 887 return 1;
aae2006e
AA
888}
889EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
890
1da177e4
LT
891/*
892 * Restart an (async) RPC call. Usually called from within the
893 * exit handler.
894 */
f1f88fc7 895int
1da177e4
LT
896rpc_restart_call(struct rpc_task *task)
897{
898 if (RPC_ASSASSINATED(task))
f1f88fc7 899 return 0;
1da177e4 900 task->tk_action = call_start;
f1f88fc7 901 return 1;
1da177e4 902}
e8914c65 903EXPORT_SYMBOL_GPL(rpc_restart_call);
1da177e4 904
3748f1e4
CL
905#ifdef RPC_DEBUG
906static const char *rpc_proc_name(const struct rpc_task *task)
907{
908 const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
909
910 if (proc) {
911 if (proc->p_name)
912 return proc->p_name;
913 else
914 return "NULL";
915 } else
916 return "no proc";
917}
918#endif
919
1da177e4
LT
920/*
921 * 0. Initial state
922 *
923 * Other FSM states can be visited zero or more times, but
924 * this state is visited exactly once for each RPC.
925 */
926static void
927call_start(struct rpc_task *task)
928{
929 struct rpc_clnt *clnt = task->tk_client;
930
3748f1e4 931 dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
46121cf7 932 clnt->cl_protname, clnt->cl_vers,
3748f1e4 933 rpc_proc_name(task),
46121cf7 934 (RPC_IS_ASYNC(task) ? "async" : "sync"));
1da177e4
LT
935
936 /* Increment call count */
937 task->tk_msg.rpc_proc->p_count++;
938 clnt->cl_stats->rpccnt++;
939 task->tk_action = call_reserve;
940}
941
942/*
943 * 1. Reserve an RPC call slot
944 */
945static void
946call_reserve(struct rpc_task *task)
947{
46121cf7 948 dprint_status(task);
1da177e4 949
1da177e4
LT
950 task->tk_status = 0;
951 task->tk_action = call_reserveresult;
952 xprt_reserve(task);
953}
954
955/*
956 * 1b. Grok the result of xprt_reserve()
957 */
958static void
959call_reserveresult(struct rpc_task *task)
960{
961 int status = task->tk_status;
962
46121cf7 963 dprint_status(task);
1da177e4
LT
964
965 /*
966 * After a call to xprt_reserve(), we must have either
967 * a request slot or else an error status.
968 */
969 task->tk_status = 0;
970 if (status >= 0) {
971 if (task->tk_rqstp) {
f2d47d02 972 task->tk_action = call_refresh;
1da177e4
LT
973 return;
974 }
975
976 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
0dc47877 977 __func__, status);
1da177e4
LT
978 rpc_exit(task, -EIO);
979 return;
980 }
981
982 /*
983 * Even though there was an error, we may have acquired
984 * a request slot somehow. Make sure not to leak it.
985 */
986 if (task->tk_rqstp) {
987 printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
0dc47877 988 __func__, status);
1da177e4
LT
989 xprt_release(task);
990 }
991
992 switch (status) {
993 case -EAGAIN: /* woken up; retry */
994 task->tk_action = call_reserve;
995 return;
996 case -EIO: /* probably a shutdown */
997 break;
998 default:
999 printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
0dc47877 1000 __func__, status);
1da177e4
LT
1001 break;
1002 }
1003 rpc_exit(task, status);
1004}
1005
1006/*
55576244
BF
1007 * 2. Bind and/or refresh the credentials
1008 */
1009static void
1010call_refresh(struct rpc_task *task)
1011{
1012 dprint_status(task);
1013
1014 task->tk_action = call_refreshresult;
1015 task->tk_status = 0;
1016 task->tk_client->cl_stats->rpcauthrefresh++;
1017 rpcauth_refreshcred(task);
1018}
1019
1020/*
1021 * 2a. Process the results of a credential refresh
1022 */
1023static void
1024call_refreshresult(struct rpc_task *task)
1025{
1026 int status = task->tk_status;
1027
1028 dprint_status(task);
1029
1030 task->tk_status = 0;
5fc43978 1031 task->tk_action = call_refresh;
55576244 1032 switch (status) {
5fc43978
TM
1033 case 0:
1034 if (rpcauth_uptodatecred(task))
1035 task->tk_action = call_allocate;
55576244
BF
1036 return;
1037 case -ETIMEDOUT:
1038 rpc_delay(task, 3*HZ);
5fc43978
TM
1039 case -EAGAIN:
1040 status = -EACCES;
1041 if (!task->tk_cred_retry)
1042 break;
1043 task->tk_cred_retry--;
1044 dprintk("RPC: %5u %s: retry refresh creds\n",
1045 task->tk_pid, __func__);
1046 return;
55576244 1047 }
5fc43978
TM
1048 dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1049 task->tk_pid, __func__, status);
1050 rpc_exit(task, status);
55576244
BF
1051}
1052
1053/*
1054 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
02107148 1055 * (Note: buffer memory is freed in xprt_release).
1da177e4
LT
1056 */
1057static void
1058call_allocate(struct rpc_task *task)
1059{
f2d47d02 1060 unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
02107148
CL
1061 struct rpc_rqst *req = task->tk_rqstp;
1062 struct rpc_xprt *xprt = task->tk_xprt;
2bea90d4 1063 struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
1da177e4 1064
46121cf7
CL
1065 dprint_status(task);
1066
2bea90d4 1067 task->tk_status = 0;
f2d47d02 1068 task->tk_action = call_bind;
2bea90d4 1069
02107148 1070 if (req->rq_buffer)
1da177e4
LT
1071 return;
1072
2bea90d4
CL
1073 if (proc->p_proc != 0) {
1074 BUG_ON(proc->p_arglen == 0);
1075 if (proc->p_decode != NULL)
1076 BUG_ON(proc->p_replen == 0);
1077 }
1da177e4 1078
2bea90d4
CL
1079 /*
1080 * Calculate the size (in quads) of the RPC call
1081 * and reply headers, and convert both values
1082 * to byte sizes.
1083 */
1084 req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
1085 req->rq_callsize <<= 2;
1086 req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
1087 req->rq_rcvsize <<= 2;
1088
c5a4dd8b
CL
1089 req->rq_buffer = xprt->ops->buf_alloc(task,
1090 req->rq_callsize + req->rq_rcvsize);
2bea90d4 1091 if (req->rq_buffer != NULL)
1da177e4 1092 return;
46121cf7
CL
1093
1094 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1da177e4 1095
5afa9133 1096 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
b6e9c713 1097 task->tk_action = call_allocate;
1da177e4
LT
1098 rpc_delay(task, HZ>>4);
1099 return;
1100 }
1101
1102 rpc_exit(task, -ERESTARTSYS);
1103}
1104
940e3318
TM
1105static inline int
1106rpc_task_need_encode(struct rpc_task *task)
1107{
1108 return task->tk_rqstp->rq_snd_buf.len == 0;
1109}
1110
1111static inline void
1112rpc_task_force_reencode(struct rpc_task *task)
1113{
1114 task->tk_rqstp->rq_snd_buf.len = 0;
2574cc9f 1115 task->tk_rqstp->rq_bytes_sent = 0;
940e3318
TM
1116}
1117
2bea90d4
CL
1118static inline void
1119rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
1120{
1121 buf->head[0].iov_base = start;
1122 buf->head[0].iov_len = len;
1123 buf->tail[0].iov_len = 0;
1124 buf->page_len = 0;
4f22ccc3 1125 buf->flags = 0;
2bea90d4
CL
1126 buf->len = 0;
1127 buf->buflen = len;
1128}
1129
1da177e4
LT
1130/*
1131 * 3. Encode arguments of an RPC call
1132 */
1133static void
b0e1c57e 1134rpc_xdr_encode(struct rpc_task *task)
1da177e4 1135{
1da177e4 1136 struct rpc_rqst *req = task->tk_rqstp;
9f06c719 1137 kxdreproc_t encode;
d8ed029d 1138 __be32 *p;
1da177e4 1139
46121cf7 1140 dprint_status(task);
1da177e4 1141
2bea90d4
CL
1142 rpc_xdr_buf_init(&req->rq_snd_buf,
1143 req->rq_buffer,
1144 req->rq_callsize);
1145 rpc_xdr_buf_init(&req->rq_rcv_buf,
1146 (char *)req->rq_buffer + req->rq_callsize,
1147 req->rq_rcvsize);
1da177e4 1148
b0e1c57e
CL
1149 p = rpc_encode_header(task);
1150 if (p == NULL) {
1151 printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
1da177e4
LT
1152 rpc_exit(task, -EIO);
1153 return;
1154 }
b0e1c57e
CL
1155
1156 encode = task->tk_msg.rpc_proc->p_encode;
f3680312
BF
1157 if (encode == NULL)
1158 return;
1159
1160 task->tk_status = rpcauth_wrap_req(task, encode, req, p,
1161 task->tk_msg.rpc_argp);
1da177e4
LT
1162}
1163
1164/*
1165 * 4. Get the server port number if not yet set
1166 */
1167static void
1168call_bind(struct rpc_task *task)
1169{
ec739ef0 1170 struct rpc_xprt *xprt = task->tk_xprt;
1da177e4 1171
46121cf7 1172 dprint_status(task);
1da177e4 1173
da351878 1174 task->tk_action = call_connect;
ec739ef0 1175 if (!xprt_bound(xprt)) {
da351878 1176 task->tk_action = call_bind_status;
ec739ef0 1177 task->tk_timeout = xprt->bind_timeout;
bbf7c1dd 1178 xprt->ops->rpcbind(task);
1da177e4
LT
1179 }
1180}
1181
1182/*
da351878
CL
1183 * 4a. Sort out bind result
1184 */
1185static void
1186call_bind_status(struct rpc_task *task)
1187{
906462af 1188 int status = -EIO;
da351878
CL
1189
1190 if (task->tk_status >= 0) {
46121cf7 1191 dprint_status(task);
da351878
CL
1192 task->tk_status = 0;
1193 task->tk_action = call_connect;
1194 return;
1195 }
1196
1197 switch (task->tk_status) {
381ba74a
TM
1198 case -ENOMEM:
1199 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
1200 rpc_delay(task, HZ >> 2);
2429cbf6 1201 goto retry_timeout;
da351878 1202 case -EACCES:
46121cf7
CL
1203 dprintk("RPC: %5u remote rpcbind: RPC program/version "
1204 "unavailable\n", task->tk_pid);
b79dc8ce
CL
1205 /* fail immediately if this is an RPC ping */
1206 if (task->tk_msg.rpc_proc->p_proc == 0) {
1207 status = -EOPNOTSUPP;
1208 break;
1209 }
0b760113
TM
1210 if (task->tk_rebind_retry == 0)
1211 break;
1212 task->tk_rebind_retry--;
ea635a51 1213 rpc_delay(task, 3*HZ);
da45828e 1214 goto retry_timeout;
da351878 1215 case -ETIMEDOUT:
46121cf7 1216 dprintk("RPC: %5u rpcbind request timed out\n",
da351878 1217 task->tk_pid);
da45828e 1218 goto retry_timeout;
da351878 1219 case -EPFNOSUPPORT:
906462af 1220 /* server doesn't support any rpcbind version we know of */
012da158 1221 dprintk("RPC: %5u unrecognized remote rpcbind service\n",
da351878
CL
1222 task->tk_pid);
1223 break;
1224 case -EPROTONOSUPPORT:
00a6e7bb 1225 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
da351878 1226 task->tk_pid);
00a6e7bb
CL
1227 task->tk_status = 0;
1228 task->tk_action = call_bind;
1229 return;
012da158
CL
1230 case -ECONNREFUSED: /* connection problems */
1231 case -ECONNRESET:
1232 case -ENOTCONN:
1233 case -EHOSTDOWN:
1234 case -EHOSTUNREACH:
1235 case -ENETUNREACH:
1236 case -EPIPE:
1237 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
1238 task->tk_pid, task->tk_status);
1239 if (!RPC_IS_SOFTCONN(task)) {
1240 rpc_delay(task, 5*HZ);
1241 goto retry_timeout;
1242 }
1243 status = task->tk_status;
1244 break;
da351878 1245 default:
46121cf7 1246 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
da351878 1247 task->tk_pid, -task->tk_status);
da351878
CL
1248 }
1249
1250 rpc_exit(task, status);
1251 return;
1252
da45828e
TM
1253retry_timeout:
1254 task->tk_action = call_timeout;
da351878
CL
1255}
1256
1257/*
1258 * 4b. Connect to the RPC server
1da177e4
LT
1259 */
1260static void
1261call_connect(struct rpc_task *task)
1262{
da351878 1263 struct rpc_xprt *xprt = task->tk_xprt;
1da177e4 1264
46121cf7 1265 dprintk("RPC: %5u call_connect xprt %p %s connected\n",
da351878
CL
1266 task->tk_pid, xprt,
1267 (xprt_connected(xprt) ? "is" : "is not"));
1da177e4 1268
da351878
CL
1269 task->tk_action = call_transmit;
1270 if (!xprt_connected(xprt)) {
1271 task->tk_action = call_connect_status;
1272 if (task->tk_status < 0)
1273 return;
1274 xprt_connect(task);
1da177e4 1275 }
1da177e4
LT
1276}
1277
1278/*
da351878 1279 * 4c. Sort out connect result
1da177e4
LT
1280 */
1281static void
1282call_connect_status(struct rpc_task *task)
1283{
1284 struct rpc_clnt *clnt = task->tk_client;
1285 int status = task->tk_status;
1286
46121cf7 1287 dprint_status(task);
da351878 1288
1da177e4 1289 task->tk_status = 0;
2a491991 1290 if (status >= 0 || status == -EAGAIN) {
1da177e4
LT
1291 clnt->cl_stats->netreconn++;
1292 task->tk_action = call_transmit;
1293 return;
1294 }
1295
1da177e4 1296 switch (status) {
da45828e
TM
1297 /* if soft mounted, test if we've timed out */
1298 case -ETIMEDOUT:
1299 task->tk_action = call_timeout;
2a491991
TM
1300 break;
1301 default:
1302 rpc_exit(task, -EIO);
1da177e4
LT
1303 }
1304}
1305
1306/*
1307 * 5. Transmit the RPC request, and wait for reply
1308 */
1309static void
1310call_transmit(struct rpc_task *task)
1311{
46121cf7 1312 dprint_status(task);
1da177e4
LT
1313
1314 task->tk_action = call_status;
1315 if (task->tk_status < 0)
1316 return;
1317 task->tk_status = xprt_prepare_transmit(task);
1318 if (task->tk_status != 0)
1319 return;
e0ab53de 1320 task->tk_action = call_transmit_status;
1da177e4 1321 /* Encode here so that rpcsec_gss can use correct sequence number. */
940e3318 1322 if (rpc_task_need_encode(task)) {
e0ab53de 1323 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
b0e1c57e 1324 rpc_xdr_encode(task);
5e5ce5be 1325 /* Did the encode result in an error condition? */
8b39f2b4
TM
1326 if (task->tk_status != 0) {
1327 /* Was the error nonfatal? */
1328 if (task->tk_status == -EAGAIN)
1329 rpc_delay(task, HZ >> 4);
1330 else
1331 rpc_exit(task, task->tk_status);
e0ab53de 1332 return;
8b39f2b4 1333 }
5e5ce5be 1334 }
1da177e4
LT
1335 xprt_transmit(task);
1336 if (task->tk_status < 0)
1337 return;
e0ab53de
TM
1338 /*
1339 * On success, ensure that we call xprt_end_transmit() before sleeping
1340 * in order to allow access to the socket to other RPC requests.
1341 */
1342 call_transmit_status(task);
55ae1aab 1343 if (rpc_reply_expected(task))
e0ab53de
TM
1344 return;
1345 task->tk_action = rpc_exit_task;
fda13939 1346 rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
e0ab53de
TM
1347}
1348
1349/*
1350 * 5a. Handle cleanup after a transmission
1351 */
1352static void
1353call_transmit_status(struct rpc_task *task)
1354{
1355 task->tk_action = call_status;
206a134b
CL
1356
1357 /*
1358 * Common case: success. Force the compiler to put this
1359 * test first.
1360 */
1361 if (task->tk_status == 0) {
1362 xprt_end_transmit(task);
1363 rpc_task_force_reencode(task);
1364 return;
1365 }
1366
15f081ca
TM
1367 switch (task->tk_status) {
1368 case -EAGAIN:
1369 break;
1370 default:
206a134b 1371 dprint_status(task);
15f081ca 1372 xprt_end_transmit(task);
09a21c41
CL
1373 rpc_task_force_reencode(task);
1374 break;
15f081ca
TM
1375 /*
1376 * Special cases: if we've been waiting on the
1377 * socket's write_space() callback, or if the
1378 * socket just returned a connection error,
1379 * then hold onto the transport lock.
1380 */
1381 case -ECONNREFUSED:
15f081ca
TM
1382 case -EHOSTDOWN:
1383 case -EHOSTUNREACH:
1384 case -ENETUNREACH:
09a21c41
CL
1385 if (RPC_IS_SOFTCONN(task)) {
1386 xprt_end_transmit(task);
1387 rpc_exit(task, task->tk_status);
1388 break;
1389 }
1390 case -ECONNRESET:
1391 case -ENOTCONN:
c8485e4d 1392 case -EPIPE:
15f081ca
TM
1393 rpc_task_force_reencode(task);
1394 }
1da177e4
LT
1395}
1396
9e00abc3 1397#if defined(CONFIG_SUNRPC_BACKCHANNEL)
55ae1aab
RL
1398/*
1399 * 5b. Send the backchannel RPC reply. On error, drop the reply. In
1400 * addition, disconnect on connectivity errors.
1401 */
1402static void
1403call_bc_transmit(struct rpc_task *task)
1404{
1405 struct rpc_rqst *req = task->tk_rqstp;
1406
1407 BUG_ON(task->tk_status != 0);
1408 task->tk_status = xprt_prepare_transmit(task);
1409 if (task->tk_status == -EAGAIN) {
1410 /*
1411 * Could not reserve the transport. Try again after the
1412 * transport is released.
1413 */
1414 task->tk_status = 0;
1415 task->tk_action = call_bc_transmit;
1416 return;
1417 }
1418
1419 task->tk_action = rpc_exit_task;
1420 if (task->tk_status < 0) {
1421 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1422 "error: %d\n", task->tk_status);
1423 return;
1424 }
1425
1426 xprt_transmit(task);
1427 xprt_end_transmit(task);
1428 dprint_status(task);
1429 switch (task->tk_status) {
1430 case 0:
1431 /* Success */
1432 break;
1433 case -EHOSTDOWN:
1434 case -EHOSTUNREACH:
1435 case -ENETUNREACH:
1436 case -ETIMEDOUT:
1437 /*
1438 * Problem reaching the server. Disconnect and let the
1439 * forechannel reestablish the connection. The server will
1440 * have to retransmit the backchannel request and we'll
1441 * reprocess it. Since these ops are idempotent, there's no
1442 * need to cache our reply at this time.
1443 */
1444 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1445 "error: %d\n", task->tk_status);
1446 xprt_conditional_disconnect(task->tk_xprt,
1447 req->rq_connect_cookie);
1448 break;
1449 default:
1450 /*
1451 * We were unable to reply and will have to drop the
1452 * request. The server should reconnect and retransmit.
1453 */
1454 BUG_ON(task->tk_status == -EAGAIN);
1455 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1456 "error: %d\n", task->tk_status);
1457 break;
1458 }
1459 rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1460}
9e00abc3 1461#endif /* CONFIG_SUNRPC_BACKCHANNEL */
55ae1aab 1462
1da177e4
LT
1463/*
1464 * 6. Sort out the RPC call status
1465 */
1466static void
1467call_status(struct rpc_task *task)
1468{
1469 struct rpc_clnt *clnt = task->tk_client;
1470 struct rpc_rqst *req = task->tk_rqstp;
1471 int status;
1472
dd2b63d0
RL
1473 if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
1474 task->tk_status = req->rq_reply_bytes_recvd;
1da177e4 1475
46121cf7 1476 dprint_status(task);
1da177e4
LT
1477
1478 status = task->tk_status;
1479 if (status >= 0) {
1480 task->tk_action = call_decode;
1481 return;
1482 }
1483
1484 task->tk_status = 0;
1485 switch(status) {
76303992
TM
1486 case -EHOSTDOWN:
1487 case -EHOSTUNREACH:
1488 case -ENETUNREACH:
1489 /*
1490 * Delay any retries for 3 seconds, then handle as if it
1491 * were a timeout.
1492 */
1493 rpc_delay(task, 3*HZ);
1da177e4
LT
1494 case -ETIMEDOUT:
1495 task->tk_action = call_timeout;
241c39b9 1496 if (task->tk_client->cl_discrtry)
7c1d71cf
TM
1497 xprt_conditional_disconnect(task->tk_xprt,
1498 req->rq_connect_cookie);
1da177e4 1499 break;
c8485e4d 1500 case -ECONNRESET:
1da177e4 1501 case -ECONNREFUSED:
35f5a422 1502 rpc_force_rebind(clnt);
c8485e4d
TM
1503 rpc_delay(task, 3*HZ);
1504 case -EPIPE:
1505 case -ENOTCONN:
1da177e4
LT
1506 task->tk_action = call_bind;
1507 break;
1508 case -EAGAIN:
1509 task->tk_action = call_transmit;
1510 break;
1511 case -EIO:
1512 /* shutdown or soft timeout */
1513 rpc_exit(task, status);
1514 break;
1515 default:
b6b6152c
OK
1516 if (clnt->cl_chatty)
1517 printk("%s: RPC call returned error %d\n",
1da177e4
LT
1518 clnt->cl_protname, -status);
1519 rpc_exit(task, status);
1da177e4
LT
1520 }
1521}
1522
1523/*
e0ab53de 1524 * 6a. Handle RPC timeout
1da177e4
LT
1525 * We do not release the request slot, so we keep using the
1526 * same XID for all retransmits.
1527 */
1528static void
1529call_timeout(struct rpc_task *task)
1530{
1531 struct rpc_clnt *clnt = task->tk_client;
1532
1533 if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
46121cf7 1534 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
1da177e4
LT
1535 goto retry;
1536 }
1537
46121cf7 1538 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
ef759a2e
CL
1539 task->tk_timeouts++;
1540
3a28becc
CL
1541 if (RPC_IS_SOFTCONN(task)) {
1542 rpc_exit(task, -ETIMEDOUT);
1543 return;
1544 }
1da177e4 1545 if (RPC_IS_SOFT(task)) {
b6b6152c
OK
1546 if (clnt->cl_chatty)
1547 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
1da177e4 1548 clnt->cl_protname, clnt->cl_server);
7494d00c
TM
1549 if (task->tk_flags & RPC_TASK_TIMEOUT)
1550 rpc_exit(task, -ETIMEDOUT);
1551 else
1552 rpc_exit(task, -EIO);
1da177e4
LT
1553 return;
1554 }
1555
f518e35a 1556 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1da177e4 1557 task->tk_flags |= RPC_CALL_MAJORSEEN;
b6b6152c
OK
1558 if (clnt->cl_chatty)
1559 printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1da177e4
LT
1560 clnt->cl_protname, clnt->cl_server);
1561 }
35f5a422 1562 rpc_force_rebind(clnt);
b48633bd
TM
1563 /*
1564 * Did our request time out due to an RPCSEC_GSS out-of-sequence
1565 * event? RFC2203 requires the server to drop all such requests.
1566 */
1567 rpcauth_invalcred(task);
1da177e4
LT
1568
1569retry:
1570 clnt->cl_stats->rpcretrans++;
1571 task->tk_action = call_bind;
1572 task->tk_status = 0;
1573}
1574
1575/*
1576 * 7. Decode the RPC reply
1577 */
1578static void
1579call_decode(struct rpc_task *task)
1580{
1581 struct rpc_clnt *clnt = task->tk_client;
1582 struct rpc_rqst *req = task->tk_rqstp;
bf269551 1583 kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
d8ed029d 1584 __be32 *p;
1da177e4 1585
726fd6ad 1586 dprint_status(task);
1da177e4 1587
f518e35a 1588 if (task->tk_flags & RPC_CALL_MAJORSEEN) {
b6b6152c
OK
1589 if (clnt->cl_chatty)
1590 printk(KERN_NOTICE "%s: server %s OK\n",
1591 clnt->cl_protname, clnt->cl_server);
1da177e4
LT
1592 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
1593 }
1594
43ac3f29
TM
1595 /*
1596 * Ensure that we see all writes made by xprt_complete_rqst()
dd2b63d0 1597 * before it changed req->rq_reply_bytes_recvd.
43ac3f29
TM
1598 */
1599 smp_rmb();
1da177e4
LT
1600 req->rq_rcv_buf.len = req->rq_private_buf.len;
1601
1602 /* Check that the softirq receive buffer is valid */
1603 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
1604 sizeof(req->rq_rcv_buf)) != 0);
1605
1e799b67
TM
1606 if (req->rq_rcv_buf.len < 12) {
1607 if (!RPC_IS_SOFT(task)) {
1608 task->tk_action = call_bind;
1609 clnt->cl_stats->rpcretrans++;
1610 goto out_retry;
1611 }
1612 dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
1613 clnt->cl_protname, task->tk_status);
1614 task->tk_action = call_timeout;
1615 goto out_retry;
1616 }
1617
b0e1c57e 1618 p = rpc_verify_header(task);
abbcf28f
TM
1619 if (IS_ERR(p)) {
1620 if (p == ERR_PTR(-EAGAIN))
1621 goto out_retry;
1622 return;
1da177e4
LT
1623 }
1624
abbcf28f 1625 task->tk_action = rpc_exit_task;
1da177e4 1626
6d5fcb5a 1627 if (decode) {
1da177e4
LT
1628 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
1629 task->tk_msg.rpc_resp);
6d5fcb5a 1630 }
46121cf7
CL
1631 dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
1632 task->tk_status);
1da177e4
LT
1633 return;
1634out_retry:
1da177e4 1635 task->tk_status = 0;
b0e1c57e 1636 /* Note: rpc_verify_header() may have freed the RPC slot */
24b74bf0 1637 if (task->tk_rqstp == req) {
dd2b63d0 1638 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
24b74bf0 1639 if (task->tk_client->cl_discrtry)
7c1d71cf
TM
1640 xprt_conditional_disconnect(task->tk_xprt,
1641 req->rq_connect_cookie);
24b74bf0 1642 }
1da177e4
LT
1643}
1644
d8ed029d 1645static __be32 *
b0e1c57e 1646rpc_encode_header(struct rpc_task *task)
1da177e4
LT
1647{
1648 struct rpc_clnt *clnt = task->tk_client;
1da177e4 1649 struct rpc_rqst *req = task->tk_rqstp;
d8ed029d 1650 __be32 *p = req->rq_svec[0].iov_base;
1da177e4
LT
1651
1652 /* FIXME: check buffer size? */
808012fb
CL
1653
1654 p = xprt_skip_transport_header(task->tk_xprt, p);
1da177e4
LT
1655 *p++ = req->rq_xid; /* XID */
1656 *p++ = htonl(RPC_CALL); /* CALL */
1657 *p++ = htonl(RPC_VERSION); /* RPC version */
1658 *p++ = htonl(clnt->cl_prog); /* program number */
1659 *p++ = htonl(clnt->cl_vers); /* program version */
1660 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
334ccfd5
TM
1661 p = rpcauth_marshcred(task, p);
1662 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
1663 return p;
1da177e4
LT
1664}
1665
d8ed029d 1666static __be32 *
b0e1c57e 1667rpc_verify_header(struct rpc_task *task)
1da177e4
LT
1668{
1669 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
1670 int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
d8ed029d
AD
1671 __be32 *p = iov->iov_base;
1672 u32 n;
1da177e4
LT
1673 int error = -EACCES;
1674
e8896495
DH
1675 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
1676 /* RFC-1014 says that the representation of XDR data must be a
1677 * multiple of four bytes
1678 * - if it isn't pointer subtraction in the NFS client may give
1679 * undefined results
1680 */
8a702bbb 1681 dprintk("RPC: %5u %s: XDR representation not a multiple of"
0dc47877 1682 " 4 bytes: 0x%x\n", task->tk_pid, __func__,
8a702bbb 1683 task->tk_rqstp->rq_rcv_buf.len);
e8896495
DH
1684 goto out_eio;
1685 }
1da177e4
LT
1686 if ((len -= 3) < 0)
1687 goto out_overflow;
1da177e4 1688
f4a2e418 1689 p += 1; /* skip XID */
1da177e4 1690 if ((n = ntohl(*p++)) != RPC_REPLY) {
8a702bbb 1691 dprintk("RPC: %5u %s: not an RPC reply: %x\n",
f4a2e418 1692 task->tk_pid, __func__, n);
abbcf28f 1693 goto out_garbage;
1da177e4 1694 }
f4a2e418 1695
1da177e4
LT
1696 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
1697 if (--len < 0)
1698 goto out_overflow;
1699 switch ((n = ntohl(*p++))) {
89f0e4fe
JP
1700 case RPC_AUTH_ERROR:
1701 break;
1702 case RPC_MISMATCH:
1703 dprintk("RPC: %5u %s: RPC call version mismatch!\n",
1704 task->tk_pid, __func__);
1705 error = -EPROTONOSUPPORT;
1706 goto out_err;
1707 default:
1708 dprintk("RPC: %5u %s: RPC call rejected, "
1709 "unknown error: %x\n",
1710 task->tk_pid, __func__, n);
1711 goto out_eio;
1da177e4
LT
1712 }
1713 if (--len < 0)
1714 goto out_overflow;
1715 switch ((n = ntohl(*p++))) {
1716 case RPC_AUTH_REJECTEDCRED:
1717 case RPC_AUTH_REJECTEDVERF:
1718 case RPCSEC_GSS_CREDPROBLEM:
1719 case RPCSEC_GSS_CTXPROBLEM:
1720 if (!task->tk_cred_retry)
1721 break;
1722 task->tk_cred_retry--;
46121cf7 1723 dprintk("RPC: %5u %s: retry stale creds\n",
0dc47877 1724 task->tk_pid, __func__);
1da177e4 1725 rpcauth_invalcred(task);
220bcc2a
TM
1726 /* Ensure we obtain a new XID! */
1727 xprt_release(task);
118df3d1 1728 task->tk_action = call_reserve;
abbcf28f 1729 goto out_retry;
1da177e4
LT
1730 case RPC_AUTH_BADCRED:
1731 case RPC_AUTH_BADVERF:
1732 /* possibly garbled cred/verf? */
1733 if (!task->tk_garb_retry)
1734 break;
1735 task->tk_garb_retry--;
46121cf7 1736 dprintk("RPC: %5u %s: retry garbled creds\n",
0dc47877 1737 task->tk_pid, __func__);
1da177e4 1738 task->tk_action = call_bind;
abbcf28f 1739 goto out_retry;
1da177e4 1740 case RPC_AUTH_TOOWEAK:
b0e1c57e 1741 printk(KERN_NOTICE "RPC: server %s requires stronger "
1356b8c2 1742 "authentication.\n", task->tk_client->cl_server);
1da177e4
LT
1743 break;
1744 default:
8a702bbb 1745 dprintk("RPC: %5u %s: unknown auth error: %x\n",
0dc47877 1746 task->tk_pid, __func__, n);
1da177e4
LT
1747 error = -EIO;
1748 }
46121cf7 1749 dprintk("RPC: %5u %s: call rejected %d\n",
0dc47877 1750 task->tk_pid, __func__, n);
1da177e4
LT
1751 goto out_err;
1752 }
1753 if (!(p = rpcauth_checkverf(task, p))) {
8a702bbb 1754 dprintk("RPC: %5u %s: auth check failed\n",
0dc47877 1755 task->tk_pid, __func__);
abbcf28f 1756 goto out_garbage; /* bad verifier, retry */
1da177e4 1757 }
d8ed029d 1758 len = p - (__be32 *)iov->iov_base - 1;
1da177e4
LT
1759 if (len < 0)
1760 goto out_overflow;
1761 switch ((n = ntohl(*p++))) {
1762 case RPC_SUCCESS:
1763 return p;
1764 case RPC_PROG_UNAVAIL:
46121cf7 1765 dprintk("RPC: %5u %s: program %u is unsupported by server %s\n",
0dc47877 1766 task->tk_pid, __func__,
1da177e4
LT
1767 (unsigned int)task->tk_client->cl_prog,
1768 task->tk_client->cl_server);
cdf47706
AG
1769 error = -EPFNOSUPPORT;
1770 goto out_err;
1da177e4 1771 case RPC_PROG_MISMATCH:
46121cf7 1772 dprintk("RPC: %5u %s: program %u, version %u unsupported by "
0dc47877 1773 "server %s\n", task->tk_pid, __func__,
1da177e4
LT
1774 (unsigned int)task->tk_client->cl_prog,
1775 (unsigned int)task->tk_client->cl_vers,
1776 task->tk_client->cl_server);
cdf47706
AG
1777 error = -EPROTONOSUPPORT;
1778 goto out_err;
1da177e4 1779 case RPC_PROC_UNAVAIL:
3748f1e4 1780 dprintk("RPC: %5u %s: proc %s unsupported by program %u, "
46121cf7 1781 "version %u on server %s\n",
0dc47877 1782 task->tk_pid, __func__,
3748f1e4 1783 rpc_proc_name(task),
1da177e4
LT
1784 task->tk_client->cl_prog,
1785 task->tk_client->cl_vers,
1786 task->tk_client->cl_server);
cdf47706
AG
1787 error = -EOPNOTSUPP;
1788 goto out_err;
1da177e4 1789 case RPC_GARBAGE_ARGS:
46121cf7 1790 dprintk("RPC: %5u %s: server saw garbage\n",
0dc47877 1791 task->tk_pid, __func__);
1da177e4
LT
1792 break; /* retry */
1793 default:
8a702bbb 1794 dprintk("RPC: %5u %s: server accept status: %x\n",
0dc47877 1795 task->tk_pid, __func__, n);
1da177e4
LT
1796 /* Also retry */
1797 }
1798
abbcf28f 1799out_garbage:
1da177e4
LT
1800 task->tk_client->cl_stats->rpcgarbage++;
1801 if (task->tk_garb_retry) {
1802 task->tk_garb_retry--;
46121cf7 1803 dprintk("RPC: %5u %s: retrying\n",
0dc47877 1804 task->tk_pid, __func__);
1da177e4 1805 task->tk_action = call_bind;
abbcf28f
TM
1806out_retry:
1807 return ERR_PTR(-EAGAIN);
1da177e4 1808 }
1da177e4
LT
1809out_eio:
1810 error = -EIO;
1811out_err:
1812 rpc_exit(task, error);
8a702bbb 1813 dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
0dc47877 1814 __func__, error);
abbcf28f 1815 return ERR_PTR(error);
1da177e4 1816out_overflow:
8a702bbb 1817 dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
0dc47877 1818 __func__);
abbcf28f 1819 goto out_garbage;
1da177e4 1820}
5ee0ed7d 1821
9f06c719 1822static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
5ee0ed7d 1823{
5ee0ed7d
TM
1824}
1825
bf269551 1826static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
5ee0ed7d
TM
1827{
1828 return 0;
1829}
1830
1831static struct rpc_procinfo rpcproc_null = {
1832 .p_encode = rpcproc_encode_null,
1833 .p_decode = rpcproc_decode_null,
1834};
1835
caabea8a 1836static int rpc_ping(struct rpc_clnt *clnt)
5ee0ed7d
TM
1837{
1838 struct rpc_message msg = {
1839 .rpc_proc = &rpcproc_null,
1840 };
1841 int err;
1842 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
caabea8a 1843 err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
5ee0ed7d
TM
1844 put_rpccred(msg.rpc_cred);
1845 return err;
1846}
188fef11 1847
5e1550d6
TM
1848struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
1849{
1850 struct rpc_message msg = {
1851 .rpc_proc = &rpcproc_null,
1852 .rpc_cred = cred,
1853 };
84115e1c
TM
1854 struct rpc_task_setup task_setup_data = {
1855 .rpc_client = clnt,
1856 .rpc_message = &msg,
1857 .callback_ops = &rpc_default_ops,
1858 .flags = flags,
1859 };
c970aa85 1860 return rpc_run_task(&task_setup_data);
5e1550d6 1861}
e8914c65 1862EXPORT_SYMBOL_GPL(rpc_call_null);
5e1550d6 1863
188fef11 1864#ifdef RPC_DEBUG
68a23ee9
CL
1865static void rpc_show_header(void)
1866{
cb3997b5
CL
1867 printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
1868 "-timeout ---ops--\n");
68a23ee9
CL
1869}
1870
38e886e0
CL
1871static void rpc_show_task(const struct rpc_clnt *clnt,
1872 const struct rpc_task *task)
1873{
1874 const char *rpc_waitq = "none";
38e886e0
CL
1875
1876 if (RPC_IS_QUEUED(task))
1877 rpc_waitq = rpc_qname(task->tk_waitqueue);
1878
b3bcedad 1879 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
cb3997b5
CL
1880 task->tk_pid, task->tk_flags, task->tk_status,
1881 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
1882 clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
b3bcedad 1883 task->tk_action, rpc_waitq);
38e886e0
CL
1884}
1885
188fef11
TM
1886void rpc_show_tasks(void)
1887{
1888 struct rpc_clnt *clnt;
38e886e0 1889 struct rpc_task *task;
68a23ee9 1890 int header = 0;
188fef11
TM
1891
1892 spin_lock(&rpc_client_lock);
188fef11 1893 list_for_each_entry(clnt, &all_clients, cl_clients) {
188fef11 1894 spin_lock(&clnt->cl_lock);
38e886e0 1895 list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
68a23ee9
CL
1896 if (!header) {
1897 rpc_show_header();
1898 header++;
1899 }
38e886e0 1900 rpc_show_task(clnt, task);
188fef11
TM
1901 }
1902 spin_unlock(&clnt->cl_lock);
1903 }
188fef11
TM
1904 spin_unlock(&rpc_client_lock);
1905}
1906#endif