blob: b36797ad8083166c2c0f966ed38f506468794f8d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/net/sunrpc/rpcclnt.c
3 *
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
7 *
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP connect handling.
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
15 *
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
19 *
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
22 */
23
24#include <asm/system.h>
25
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/slab.h>
30#include <linux/in.h>
31#include <linux/utsname.h>
32
33#include <linux/sunrpc/clnt.h>
34#include <linux/workqueue.h>
35#include <linux/sunrpc/rpc_pipe_fs.h>
36
37#include <linux/nfs.h>
38
39
40#define RPC_SLACK_SPACE (1024) /* total overkill */
41
42#ifdef RPC_DEBUG
43# define RPCDBG_FACILITY RPCDBG_CALL
44#endif
45
46static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
47
48
49static void call_start(struct rpc_task *task);
50static void call_reserve(struct rpc_task *task);
51static void call_reserveresult(struct rpc_task *task);
52static void call_allocate(struct rpc_task *task);
53static void call_encode(struct rpc_task *task);
54static void call_decode(struct rpc_task *task);
55static void call_bind(struct rpc_task *task);
56static void call_transmit(struct rpc_task *task);
57static void call_status(struct rpc_task *task);
58static void call_refresh(struct rpc_task *task);
59static void call_refreshresult(struct rpc_task *task);
60static void call_timeout(struct rpc_task *task);
61static void call_connect(struct rpc_task *task);
62static void call_connect_status(struct rpc_task *task);
63static u32 * call_header(struct rpc_task *task);
64static u32 * call_verify(struct rpc_task *task);
65
66
67static int
68rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
69{
70 static uint32_t clntid;
71 int error;
72
73 if (dir_name == NULL)
74 return 0;
75 for (;;) {
76 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname),
77 "%s/clnt%x", dir_name,
78 (unsigned int)clntid++);
79 clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0';
80 clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt);
81 if (!IS_ERR(clnt->cl_dentry))
82 return 0;
83 error = PTR_ERR(clnt->cl_dentry);
84 if (error != -EEXIST) {
85 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n",
86 clnt->cl_pathname, error);
87 return error;
88 }
89 }
90}
91
92/*
93 * Create an RPC client
94 * FIXME: This should also take a flags argument (as in task->tk_flags).
95 * It's called (among others) from pmap_create_client, which may in
96 * turn be called by an async task. In this case, rpciod should not be
97 * made to sleep too long.
98 */
99struct rpc_clnt *
Trond Myklebust5ee0ed72005-06-22 17:16:20 +0000100rpc_new_client(struct rpc_xprt *xprt, char *servname,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 struct rpc_program *program, u32 vers,
102 rpc_authflavor_t flavor)
103{
104 struct rpc_version *version;
105 struct rpc_clnt *clnt = NULL;
106 int err;
107 int len;
108
109 dprintk("RPC: creating %s client for %s (xprt %p)\n",
110 program->name, servname, xprt);
111
112 err = -EINVAL;
113 if (!xprt)
114 goto out_err;
115 if (vers >= program->nrvers || !(version = program->version[vers]))
116 goto out_err;
117
118 err = -ENOMEM;
119 clnt = (struct rpc_clnt *) kmalloc(sizeof(*clnt), GFP_KERNEL);
120 if (!clnt)
121 goto out_err;
122 memset(clnt, 0, sizeof(*clnt));
123 atomic_set(&clnt->cl_users, 0);
124 atomic_set(&clnt->cl_count, 1);
125 clnt->cl_parent = clnt;
126
127 clnt->cl_server = clnt->cl_inline_name;
128 len = strlen(servname) + 1;
129 if (len > sizeof(clnt->cl_inline_name)) {
130 char *buf = kmalloc(len, GFP_KERNEL);
131 if (buf != 0)
132 clnt->cl_server = buf;
133 else
134 len = sizeof(clnt->cl_inline_name);
135 }
136 strlcpy(clnt->cl_server, servname, len);
137
138 clnt->cl_xprt = xprt;
139 clnt->cl_procinfo = version->procs;
140 clnt->cl_maxproc = version->nrprocs;
141 clnt->cl_protname = program->name;
142 clnt->cl_pmap = &clnt->cl_pmap_default;
143 clnt->cl_port = xprt->addr.sin_port;
144 clnt->cl_prog = program->number;
145 clnt->cl_vers = version->number;
146 clnt->cl_prot = xprt->prot;
147 clnt->cl_stats = program->stats;
148 rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait");
149
150 if (!clnt->cl_port)
151 clnt->cl_autobind = 1;
152
153 clnt->cl_rtt = &clnt->cl_rtt_default;
154 rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval);
155
156 err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
157 if (err < 0)
158 goto out_no_path;
159
160 err = -ENOMEM;
161 if (!rpcauth_create(flavor, clnt)) {
162 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
163 flavor);
164 goto out_no_auth;
165 }
166
167 /* save the nodename */
168 clnt->cl_nodelen = strlen(system_utsname.nodename);
169 if (clnt->cl_nodelen > UNX_MAXNODENAME)
170 clnt->cl_nodelen = UNX_MAXNODENAME;
171 memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
172 return clnt;
173
174out_no_auth:
175 rpc_rmdir(clnt->cl_pathname);
176out_no_path:
177 if (clnt->cl_server != clnt->cl_inline_name)
178 kfree(clnt->cl_server);
179 kfree(clnt);
180out_err:
Trond Myklebust5b616f52005-06-22 17:16:20 +0000181 xprt_destroy(xprt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 return ERR_PTR(err);
183}
184
Trond Myklebust5ee0ed72005-06-22 17:16:20 +0000185/**
186 * Create an RPC client
187 * @xprt - pointer to xprt struct
188 * @servname - name of server
189 * @info - rpc_program
190 * @version - rpc_program version
191 * @authflavor - rpc_auth flavour to use
192 *
193 * Creates an RPC client structure, then pings the server in order to
194 * determine if it is up, and if it supports this program and version.
195 *
196 * This function should never be called by asynchronous tasks such as
197 * the portmapper.
198 */
199struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname,
200 struct rpc_program *info, u32 version, rpc_authflavor_t authflavor)
201{
202 struct rpc_clnt *clnt;
203 int err;
204
205 clnt = rpc_new_client(xprt, servname, info, version, authflavor);
206 if (IS_ERR(clnt))
207 return clnt;
208 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
209 if (err == 0)
210 return clnt;
211 rpc_shutdown_client(clnt);
212 return ERR_PTR(err);
213}
214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215/*
216 * This function clones the RPC client structure. It allows us to share the
217 * same transport while varying parameters such as the authentication
218 * flavour.
219 */
220struct rpc_clnt *
221rpc_clone_client(struct rpc_clnt *clnt)
222{
223 struct rpc_clnt *new;
224
225 new = (struct rpc_clnt *)kmalloc(sizeof(*new), GFP_KERNEL);
226 if (!new)
227 goto out_no_clnt;
228 memcpy(new, clnt, sizeof(*new));
229 atomic_set(&new->cl_count, 1);
230 atomic_set(&new->cl_users, 0);
231 new->cl_parent = clnt;
232 atomic_inc(&clnt->cl_count);
233 /* Duplicate portmapper */
234 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait");
235 /* Turn off autobind on clones */
236 new->cl_autobind = 0;
237 new->cl_oneshot = 0;
238 new->cl_dead = 0;
239 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
240 if (new->cl_auth)
241 atomic_inc(&new->cl_auth->au_count);
242 return new;
243out_no_clnt:
244 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__);
245 return ERR_PTR(-ENOMEM);
246}
247
248/*
249 * Properly shut down an RPC client, terminating all outstanding
250 * requests. Note that we must be certain that cl_oneshot and
251 * cl_dead are cleared, or else the client would be destroyed
252 * when the last task releases it.
253 */
254int
255rpc_shutdown_client(struct rpc_clnt *clnt)
256{
257 dprintk("RPC: shutting down %s client for %s, tasks=%d\n",
258 clnt->cl_protname, clnt->cl_server,
259 atomic_read(&clnt->cl_users));
260
261 while (atomic_read(&clnt->cl_users) > 0) {
262 /* Don't let rpc_release_client destroy us */
263 clnt->cl_oneshot = 0;
264 clnt->cl_dead = 0;
265 rpc_killall_tasks(clnt);
266 sleep_on_timeout(&destroy_wait, 1*HZ);
267 }
268
269 if (atomic_read(&clnt->cl_users) < 0) {
270 printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n",
271 clnt, atomic_read(&clnt->cl_users));
272#ifdef RPC_DEBUG
273 rpc_show_tasks();
274#endif
275 BUG();
276 }
277
278 return rpc_destroy_client(clnt);
279}
280
281/*
282 * Delete an RPC client
283 */
284int
285rpc_destroy_client(struct rpc_clnt *clnt)
286{
287 if (!atomic_dec_and_test(&clnt->cl_count))
288 return 1;
289 BUG_ON(atomic_read(&clnt->cl_users) != 0);
290
291 dprintk("RPC: destroying %s client for %s\n",
292 clnt->cl_protname, clnt->cl_server);
293 if (clnt->cl_auth) {
294 rpcauth_destroy(clnt->cl_auth);
295 clnt->cl_auth = NULL;
296 }
297 if (clnt->cl_parent != clnt) {
298 rpc_destroy_client(clnt->cl_parent);
299 goto out_free;
300 }
301 if (clnt->cl_pathname[0])
302 rpc_rmdir(clnt->cl_pathname);
303 if (clnt->cl_xprt) {
304 xprt_destroy(clnt->cl_xprt);
305 clnt->cl_xprt = NULL;
306 }
307 if (clnt->cl_server != clnt->cl_inline_name)
308 kfree(clnt->cl_server);
309out_free:
310 kfree(clnt);
311 return 0;
312}
313
314/*
315 * Release an RPC client
316 */
317void
318rpc_release_client(struct rpc_clnt *clnt)
319{
320 dprintk("RPC: rpc_release_client(%p, %d)\n",
321 clnt, atomic_read(&clnt->cl_users));
322
323 if (!atomic_dec_and_test(&clnt->cl_users))
324 return;
325 wake_up(&destroy_wait);
326 if (clnt->cl_oneshot || clnt->cl_dead)
327 rpc_destroy_client(clnt);
328}
329
330/*
331 * Default callback for async RPC calls
332 */
333static void
334rpc_default_callback(struct rpc_task *task)
335{
336}
337
338/*
339 * Export the signal mask handling for aysnchronous code that
340 * sleeps on RPC calls
341 */
342
343void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
344{
345 unsigned long sigallow = sigmask(SIGKILL);
346 unsigned long irqflags;
347
348 /* Turn off various signals */
349 if (clnt->cl_intr) {
350 struct k_sigaction *action = current->sighand->action;
351 if (action[SIGINT-1].sa.sa_handler == SIG_DFL)
352 sigallow |= sigmask(SIGINT);
353 if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
354 sigallow |= sigmask(SIGQUIT);
355 }
356 spin_lock_irqsave(&current->sighand->siglock, irqflags);
357 *oldset = current->blocked;
358 siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
359 recalc_sigpending();
360 spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
361}
362
363void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
364{
365 unsigned long irqflags;
366
367 spin_lock_irqsave(&current->sighand->siglock, irqflags);
368 current->blocked = *oldset;
369 recalc_sigpending();
370 spin_unlock_irqrestore(&current->sighand->siglock, irqflags);
371}
372
373/*
374 * New rpc_call implementation
375 */
376int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
377{
378 struct rpc_task *task;
379 sigset_t oldset;
380 int status;
381
382 /* If this client is slain all further I/O fails */
383 if (clnt->cl_dead)
384 return -EIO;
385
386 BUG_ON(flags & RPC_TASK_ASYNC);
387
388 rpc_clnt_sigmask(clnt, &oldset);
389
390 status = -ENOMEM;
391 task = rpc_new_task(clnt, NULL, flags);
392 if (task == NULL)
393 goto out;
394
395 rpc_call_setup(task, msg, 0);
396
397 /* Set up the call info struct and execute the task */
398 if (task->tk_status == 0)
399 status = rpc_execute(task);
400 else {
401 status = task->tk_status;
402 rpc_release_task(task);
403 }
404
405out:
406 rpc_clnt_sigunmask(clnt, &oldset);
407
408 return status;
409}
410
411/*
412 * New rpc_call implementation
413 */
414int
415rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
416 rpc_action callback, void *data)
417{
418 struct rpc_task *task;
419 sigset_t oldset;
420 int status;
421
422 /* If this client is slain all further I/O fails */
423 if (clnt->cl_dead)
424 return -EIO;
425
426 flags |= RPC_TASK_ASYNC;
427
428 rpc_clnt_sigmask(clnt, &oldset);
429
430 /* Create/initialize a new RPC task */
431 if (!callback)
432 callback = rpc_default_callback;
433 status = -ENOMEM;
434 if (!(task = rpc_new_task(clnt, callback, flags)))
435 goto out;
436 task->tk_calldata = data;
437
438 rpc_call_setup(task, msg, 0);
439
440 /* Set up the call info struct and execute the task */
441 status = task->tk_status;
442 if (status == 0)
443 rpc_execute(task);
444 else
445 rpc_release_task(task);
446
447out:
448 rpc_clnt_sigunmask(clnt, &oldset);
449
450 return status;
451}
452
453
454void
455rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags)
456{
457 task->tk_msg = *msg;
458 task->tk_flags |= flags;
459 /* Bind the user cred */
460 if (task->tk_msg.rpc_cred != NULL)
461 rpcauth_holdcred(task);
462 else
463 rpcauth_bindcred(task);
464
465 if (task->tk_status == 0)
466 task->tk_action = call_start;
467 else
468 task->tk_action = NULL;
469}
470
471void
472rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
473{
474 struct rpc_xprt *xprt = clnt->cl_xprt;
475
476 xprt->sndsize = 0;
477 if (sndsize)
478 xprt->sndsize = sndsize + RPC_SLACK_SPACE;
479 xprt->rcvsize = 0;
480 if (rcvsize)
481 xprt->rcvsize = rcvsize + RPC_SLACK_SPACE;
482 if (xprt_connected(xprt))
483 xprt_sock_setbufsize(xprt);
484}
485
486/*
487 * Return size of largest payload RPC client can support, in bytes
488 *
489 * For stream transports, this is one RPC record fragment (see RFC
490 * 1831), as we don't support multi-record requests yet. For datagram
491 * transports, this is the size of an IP packet minus the IP, UDP, and
492 * RPC header sizes.
493 */
494size_t rpc_max_payload(struct rpc_clnt *clnt)
495{
496 return clnt->cl_xprt->max_payload;
497}
498EXPORT_SYMBOL(rpc_max_payload);
499
500/*
501 * Restart an (async) RPC call. Usually called from within the
502 * exit handler.
503 */
504void
505rpc_restart_call(struct rpc_task *task)
506{
507 if (RPC_ASSASSINATED(task))
508 return;
509
510 task->tk_action = call_start;
511}
512
513/*
514 * 0. Initial state
515 *
516 * Other FSM states can be visited zero or more times, but
517 * this state is visited exactly once for each RPC.
518 */
519static void
520call_start(struct rpc_task *task)
521{
522 struct rpc_clnt *clnt = task->tk_client;
523
524 dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid,
525 clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc,
526 (RPC_IS_ASYNC(task) ? "async" : "sync"));
527
528 /* Increment call count */
529 task->tk_msg.rpc_proc->p_count++;
530 clnt->cl_stats->rpccnt++;
531 task->tk_action = call_reserve;
532}
533
534/*
535 * 1. Reserve an RPC call slot
536 */
537static void
538call_reserve(struct rpc_task *task)
539{
540 dprintk("RPC: %4d call_reserve\n", task->tk_pid);
541
542 if (!rpcauth_uptodatecred(task)) {
543 task->tk_action = call_refresh;
544 return;
545 }
546
547 task->tk_status = 0;
548 task->tk_action = call_reserveresult;
549 xprt_reserve(task);
550}
551
552/*
553 * 1b. Grok the result of xprt_reserve()
554 */
555static void
556call_reserveresult(struct rpc_task *task)
557{
558 int status = task->tk_status;
559
560 dprintk("RPC: %4d call_reserveresult (status %d)\n",
561 task->tk_pid, task->tk_status);
562
563 /*
564 * After a call to xprt_reserve(), we must have either
565 * a request slot or else an error status.
566 */
567 task->tk_status = 0;
568 if (status >= 0) {
569 if (task->tk_rqstp) {
570 task->tk_action = call_allocate;
571 return;
572 }
573
574 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
575 __FUNCTION__, status);
576 rpc_exit(task, -EIO);
577 return;
578 }
579
580 /*
581 * Even though there was an error, we may have acquired
582 * a request slot somehow. Make sure not to leak it.
583 */
584 if (task->tk_rqstp) {
585 printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
586 __FUNCTION__, status);
587 xprt_release(task);
588 }
589
590 switch (status) {
591 case -EAGAIN: /* woken up; retry */
592 task->tk_action = call_reserve;
593 return;
594 case -EIO: /* probably a shutdown */
595 break;
596 default:
597 printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
598 __FUNCTION__, status);
599 break;
600 }
601 rpc_exit(task, status);
602}
603
604/*
605 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
606 * (Note: buffer memory is freed in rpc_task_release).
607 */
608static void
609call_allocate(struct rpc_task *task)
610{
611 unsigned int bufsiz;
612
613 dprintk("RPC: %4d call_allocate (status %d)\n",
614 task->tk_pid, task->tk_status);
615 task->tk_action = call_bind;
616 if (task->tk_buffer)
617 return;
618
619 /* FIXME: compute buffer requirements more exactly using
620 * auth->au_wslack */
621 bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE;
622
623 if (rpc_malloc(task, bufsiz << 1) != NULL)
624 return;
625 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
626
627 if (RPC_IS_ASYNC(task) || !(task->tk_client->cl_intr && signalled())) {
628 xprt_release(task);
629 task->tk_action = call_reserve;
630 rpc_delay(task, HZ>>4);
631 return;
632 }
633
634 rpc_exit(task, -ERESTARTSYS);
635}
636
637/*
638 * 3. Encode arguments of an RPC call
639 */
640static void
641call_encode(struct rpc_task *task)
642{
643 struct rpc_clnt *clnt = task->tk_client;
644 struct rpc_rqst *req = task->tk_rqstp;
645 struct xdr_buf *sndbuf = &req->rq_snd_buf;
646 struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
647 unsigned int bufsiz;
648 kxdrproc_t encode;
649 int status;
650 u32 *p;
651
652 dprintk("RPC: %4d call_encode (status %d)\n",
653 task->tk_pid, task->tk_status);
654
655 /* Default buffer setup */
656 bufsiz = task->tk_bufsize >> 1;
657 sndbuf->head[0].iov_base = (void *)task->tk_buffer;
658 sndbuf->head[0].iov_len = bufsiz;
659 sndbuf->tail[0].iov_len = 0;
660 sndbuf->page_len = 0;
661 sndbuf->len = 0;
662 sndbuf->buflen = bufsiz;
663 rcvbuf->head[0].iov_base = (void *)((char *)task->tk_buffer + bufsiz);
664 rcvbuf->head[0].iov_len = bufsiz;
665 rcvbuf->tail[0].iov_len = 0;
666 rcvbuf->page_len = 0;
667 rcvbuf->len = 0;
668 rcvbuf->buflen = bufsiz;
669
670 /* Encode header and provided arguments */
671 encode = task->tk_msg.rpc_proc->p_encode;
672 if (!(p = call_header(task))) {
673 printk(KERN_INFO "RPC: call_header failed, exit EIO\n");
674 rpc_exit(task, -EIO);
675 return;
676 }
677 if (encode && (status = rpcauth_wrap_req(task, encode, req, p,
678 task->tk_msg.rpc_argp)) < 0) {
679 printk(KERN_WARNING "%s: can't encode arguments: %d\n",
680 clnt->cl_protname, -status);
681 rpc_exit(task, status);
682 }
683}
684
685/*
686 * 4. Get the server port number if not yet set
687 */
688static void
689call_bind(struct rpc_task *task)
690{
691 struct rpc_clnt *clnt = task->tk_client;
692 struct rpc_xprt *xprt = clnt->cl_xprt;
693
694 dprintk("RPC: %4d call_bind xprt %p %s connected\n", task->tk_pid,
695 xprt, (xprt_connected(xprt) ? "is" : "is not"));
696
697 task->tk_action = (xprt_connected(xprt)) ? call_transmit : call_connect;
698
699 if (!clnt->cl_port) {
700 task->tk_action = call_connect;
701 task->tk_timeout = RPC_CONNECT_TIMEOUT;
702 rpc_getport(task, clnt);
703 }
704}
705
706/*
707 * 4a. Connect to the RPC server (TCP case)
708 */
709static void
710call_connect(struct rpc_task *task)
711{
712 struct rpc_clnt *clnt = task->tk_client;
713
714 dprintk("RPC: %4d call_connect status %d\n",
715 task->tk_pid, task->tk_status);
716
717 if (xprt_connected(clnt->cl_xprt)) {
718 task->tk_action = call_transmit;
719 return;
720 }
721 task->tk_action = call_connect_status;
722 if (task->tk_status < 0)
723 return;
724 xprt_connect(task);
725}
726
727/*
728 * 4b. Sort out connect result
729 */
730static void
731call_connect_status(struct rpc_task *task)
732{
733 struct rpc_clnt *clnt = task->tk_client;
734 int status = task->tk_status;
735
736 task->tk_status = 0;
737 if (status >= 0) {
738 clnt->cl_stats->netreconn++;
739 task->tk_action = call_transmit;
740 return;
741 }
742
743 /* Something failed: we may have to rebind */
744 if (clnt->cl_autobind)
745 clnt->cl_port = 0;
746 switch (status) {
747 case -ENOTCONN:
748 case -ETIMEDOUT:
749 case -EAGAIN:
750 task->tk_action = (clnt->cl_port == 0) ? call_bind : call_connect;
751 break;
752 default:
753 rpc_exit(task, -EIO);
754 }
755}
756
757/*
758 * 5. Transmit the RPC request, and wait for reply
759 */
760static void
761call_transmit(struct rpc_task *task)
762{
763 dprintk("RPC: %4d call_transmit (status %d)\n",
764 task->tk_pid, task->tk_status);
765
766 task->tk_action = call_status;
767 if (task->tk_status < 0)
768 return;
769 task->tk_status = xprt_prepare_transmit(task);
770 if (task->tk_status != 0)
771 return;
772 /* Encode here so that rpcsec_gss can use correct sequence number. */
773 if (!task->tk_rqstp->rq_bytes_sent)
774 call_encode(task);
775 if (task->tk_status < 0)
776 return;
777 xprt_transmit(task);
778 if (task->tk_status < 0)
779 return;
780 if (!task->tk_msg.rpc_proc->p_decode) {
781 task->tk_action = NULL;
782 rpc_wake_up_task(task);
783 }
784}
785
786/*
787 * 6. Sort out the RPC call status
788 */
789static void
790call_status(struct rpc_task *task)
791{
792 struct rpc_clnt *clnt = task->tk_client;
793 struct rpc_rqst *req = task->tk_rqstp;
794 int status;
795
796 if (req->rq_received > 0 && !req->rq_bytes_sent)
797 task->tk_status = req->rq_received;
798
799 dprintk("RPC: %4d call_status (status %d)\n",
800 task->tk_pid, task->tk_status);
801
802 status = task->tk_status;
803 if (status >= 0) {
804 task->tk_action = call_decode;
805 return;
806 }
807
808 task->tk_status = 0;
809 switch(status) {
810 case -ETIMEDOUT:
811 task->tk_action = call_timeout;
812 break;
813 case -ECONNREFUSED:
814 case -ENOTCONN:
815 req->rq_bytes_sent = 0;
816 if (clnt->cl_autobind)
817 clnt->cl_port = 0;
818 task->tk_action = call_bind;
819 break;
820 case -EAGAIN:
821 task->tk_action = call_transmit;
822 break;
823 case -EIO:
824 /* shutdown or soft timeout */
825 rpc_exit(task, status);
826 break;
827 default:
828 if (clnt->cl_chatty)
829 printk("%s: RPC call returned error %d\n",
830 clnt->cl_protname, -status);
831 rpc_exit(task, status);
832 break;
833 }
834}
835
836/*
837 * 6a. Handle RPC timeout
838 * We do not release the request slot, so we keep using the
839 * same XID for all retransmits.
840 */
841static void
842call_timeout(struct rpc_task *task)
843{
844 struct rpc_clnt *clnt = task->tk_client;
845
846 if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
847 dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid);
848 goto retry;
849 }
850
851 dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid);
852 if (RPC_IS_SOFT(task)) {
853 if (clnt->cl_chatty)
854 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
855 clnt->cl_protname, clnt->cl_server);
856 rpc_exit(task, -EIO);
857 return;
858 }
859
860 if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) {
861 task->tk_flags |= RPC_CALL_MAJORSEEN;
862 printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
863 clnt->cl_protname, clnt->cl_server);
864 }
865 if (clnt->cl_autobind)
866 clnt->cl_port = 0;
867
868retry:
869 clnt->cl_stats->rpcretrans++;
870 task->tk_action = call_bind;
871 task->tk_status = 0;
872}
873
874/*
875 * 7. Decode the RPC reply
876 */
877static void
878call_decode(struct rpc_task *task)
879{
880 struct rpc_clnt *clnt = task->tk_client;
881 struct rpc_rqst *req = task->tk_rqstp;
882 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode;
883 u32 *p;
884
885 dprintk("RPC: %4d call_decode (status %d)\n",
886 task->tk_pid, task->tk_status);
887
888 if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) {
889 printk(KERN_NOTICE "%s: server %s OK\n",
890 clnt->cl_protname, clnt->cl_server);
891 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
892 }
893
894 if (task->tk_status < 12) {
895 if (!RPC_IS_SOFT(task)) {
896 task->tk_action = call_bind;
897 clnt->cl_stats->rpcretrans++;
898 goto out_retry;
899 }
900 printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
901 clnt->cl_protname, task->tk_status);
902 rpc_exit(task, -EIO);
903 return;
904 }
905
906 req->rq_rcv_buf.len = req->rq_private_buf.len;
907
908 /* Check that the softirq receive buffer is valid */
909 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
910 sizeof(req->rq_rcv_buf)) != 0);
911
912 /* Verify the RPC header */
913 if (!(p = call_verify(task))) {
914 if (task->tk_action == NULL)
915 return;
916 goto out_retry;
917 }
918
919 task->tk_action = NULL;
920
921 if (decode)
922 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
923 task->tk_msg.rpc_resp);
924 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
925 task->tk_status);
926 return;
927out_retry:
928 req->rq_received = req->rq_private_buf.len = 0;
929 task->tk_status = 0;
930}
931
932/*
933 * 8. Refresh the credentials if rejected by the server
934 */
935static void
936call_refresh(struct rpc_task *task)
937{
938 dprintk("RPC: %4d call_refresh\n", task->tk_pid);
939
940 xprt_release(task); /* Must do to obtain new XID */
941 task->tk_action = call_refreshresult;
942 task->tk_status = 0;
943 task->tk_client->cl_stats->rpcauthrefresh++;
944 rpcauth_refreshcred(task);
945}
946
947/*
948 * 8a. Process the results of a credential refresh
949 */
950static void
951call_refreshresult(struct rpc_task *task)
952{
953 int status = task->tk_status;
954 dprintk("RPC: %4d call_refreshresult (status %d)\n",
955 task->tk_pid, task->tk_status);
956
957 task->tk_status = 0;
958 task->tk_action = call_reserve;
959 if (status >= 0 && rpcauth_uptodatecred(task))
960 return;
961 if (status == -EACCES) {
962 rpc_exit(task, -EACCES);
963 return;
964 }
965 task->tk_action = call_refresh;
966 if (status != -ETIMEDOUT)
967 rpc_delay(task, 3*HZ);
968 return;
969}
970
971/*
972 * Call header serialization
973 */
974static u32 *
975call_header(struct rpc_task *task)
976{
977 struct rpc_clnt *clnt = task->tk_client;
978 struct rpc_xprt *xprt = clnt->cl_xprt;
979 struct rpc_rqst *req = task->tk_rqstp;
980 u32 *p = req->rq_svec[0].iov_base;
981
982 /* FIXME: check buffer size? */
983 if (xprt->stream)
984 *p++ = 0; /* fill in later */
985 *p++ = req->rq_xid; /* XID */
986 *p++ = htonl(RPC_CALL); /* CALL */
987 *p++ = htonl(RPC_VERSION); /* RPC version */
988 *p++ = htonl(clnt->cl_prog); /* program number */
989 *p++ = htonl(clnt->cl_vers); /* program version */
990 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
Trond Myklebust334ccfd2005-06-22 17:16:19 +0000991 p = rpcauth_marshcred(task, p);
992 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
993 return p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994}
995
996/*
997 * Reply header verification
998 */
999static u32 *
1000call_verify(struct rpc_task *task)
1001{
1002 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
1003 int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
1004 u32 *p = iov->iov_base, n;
1005 int error = -EACCES;
1006
1007 if ((len -= 3) < 0)
1008 goto out_overflow;
1009 p += 1; /* skip XID */
1010
1011 if ((n = ntohl(*p++)) != RPC_REPLY) {
1012 printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n);
1013 goto out_retry;
1014 }
1015 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
1016 if (--len < 0)
1017 goto out_overflow;
1018 switch ((n = ntohl(*p++))) {
1019 case RPC_AUTH_ERROR:
1020 break;
1021 case RPC_MISMATCH:
1022 printk(KERN_WARNING "%s: RPC call version mismatch!\n", __FUNCTION__);
1023 goto out_eio;
1024 default:
1025 printk(KERN_WARNING "%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n);
1026 goto out_eio;
1027 }
1028 if (--len < 0)
1029 goto out_overflow;
1030 switch ((n = ntohl(*p++))) {
1031 case RPC_AUTH_REJECTEDCRED:
1032 case RPC_AUTH_REJECTEDVERF:
1033 case RPCSEC_GSS_CREDPROBLEM:
1034 case RPCSEC_GSS_CTXPROBLEM:
1035 if (!task->tk_cred_retry)
1036 break;
1037 task->tk_cred_retry--;
1038 dprintk("RPC: %4d call_verify: retry stale creds\n",
1039 task->tk_pid);
1040 rpcauth_invalcred(task);
1041 task->tk_action = call_refresh;
1042 return NULL;
1043 case RPC_AUTH_BADCRED:
1044 case RPC_AUTH_BADVERF:
1045 /* possibly garbled cred/verf? */
1046 if (!task->tk_garb_retry)
1047 break;
1048 task->tk_garb_retry--;
1049 dprintk("RPC: %4d call_verify: retry garbled creds\n",
1050 task->tk_pid);
1051 task->tk_action = call_bind;
1052 return NULL;
1053 case RPC_AUTH_TOOWEAK:
1054 printk(KERN_NOTICE "call_verify: server requires stronger "
1055 "authentication.\n");
1056 break;
1057 default:
1058 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n);
1059 error = -EIO;
1060 }
1061 dprintk("RPC: %4d call_verify: call rejected %d\n",
1062 task->tk_pid, n);
1063 goto out_err;
1064 }
1065 if (!(p = rpcauth_checkverf(task, p))) {
1066 printk(KERN_WARNING "call_verify: auth check failed\n");
1067 goto out_retry; /* bad verifier, retry */
1068 }
1069 len = p - (u32 *)iov->iov_base - 1;
1070 if (len < 0)
1071 goto out_overflow;
1072 switch ((n = ntohl(*p++))) {
1073 case RPC_SUCCESS:
1074 return p;
1075 case RPC_PROG_UNAVAIL:
1076 printk(KERN_WARNING "RPC: call_verify: program %u is unsupported by server %s\n",
1077 (unsigned int)task->tk_client->cl_prog,
1078 task->tk_client->cl_server);
1079 goto out_eio;
1080 case RPC_PROG_MISMATCH:
1081 printk(KERN_WARNING "RPC: call_verify: program %u, version %u unsupported by server %s\n",
1082 (unsigned int)task->tk_client->cl_prog,
1083 (unsigned int)task->tk_client->cl_vers,
1084 task->tk_client->cl_server);
1085 goto out_eio;
1086 case RPC_PROC_UNAVAIL:
1087 printk(KERN_WARNING "RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n",
1088 task->tk_msg.rpc_proc,
1089 task->tk_client->cl_prog,
1090 task->tk_client->cl_vers,
1091 task->tk_client->cl_server);
1092 goto out_eio;
1093 case RPC_GARBAGE_ARGS:
1094 dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__);
1095 break; /* retry */
1096 default:
1097 printk(KERN_WARNING "call_verify: server accept status: %x\n", n);
1098 /* Also retry */
1099 }
1100
1101out_retry:
1102 task->tk_client->cl_stats->rpcgarbage++;
1103 if (task->tk_garb_retry) {
1104 task->tk_garb_retry--;
1105 dprintk(KERN_WARNING "RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid);
1106 task->tk_action = call_bind;
1107 return NULL;
1108 }
1109 printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__);
1110out_eio:
1111 error = -EIO;
1112out_err:
1113 rpc_exit(task, error);
1114 return NULL;
1115out_overflow:
1116 printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__);
1117 goto out_retry;
1118}
Trond Myklebust5ee0ed72005-06-22 17:16:20 +00001119
1120static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj)
1121{
1122 return 0;
1123}
1124
1125static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj)
1126{
1127 return 0;
1128}
1129
1130static struct rpc_procinfo rpcproc_null = {
1131 .p_encode = rpcproc_encode_null,
1132 .p_decode = rpcproc_decode_null,
1133};
1134
1135int rpc_ping(struct rpc_clnt *clnt, int flags)
1136{
1137 struct rpc_message msg = {
1138 .rpc_proc = &rpcproc_null,
1139 };
1140 int err;
1141 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
1142 err = rpc_call_sync(clnt, &msg, flags);
1143 put_rpccred(msg.rpc_cred);
1144 return err;
1145}