blob: 826cc269c4457569413c55bc6b258ff93d9ba67d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <andros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sunrpc/clnt.h>
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -050035#include <linux/sunrpc/svc_xprt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Boaz Harrosh9a74af22009-12-03 20:30:56 +020037#include "nfsd.h"
38#include "state.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40#define NFSDDBG_FACILITY NFSDDBG_PROC
41
Benny Halevy54237322011-10-19 19:12:58 -070042static void nfsd4_mark_cb_fault(struct nfs4_client *, int reason);
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#define NFSPROC4_CB_NULL 0
45#define NFSPROC4_CB_COMPOUND 1
46
Linus Torvalds1da177e2005-04-16 15:20:36 -070047/* Index of predefined Linux callback client operations */
48
49enum {
Benny Halevy4be36ca2009-09-10 12:25:46 +030050 NFSPROC4_CLNT_CB_NULL = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 NFSPROC4_CLNT_CB_RECALL,
Andy Adamson38524ab2009-09-10 12:25:59 +030052 NFSPROC4_CLNT_CB_SEQUENCE,
Linus Torvalds1da177e2005-04-16 15:20:36 -070053};
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#define NFS4_MAXTAGLEN 20
56
57#define NFS4_enc_cb_null_sz 0
58#define NFS4_dec_cb_null_sz 0
59#define cb_compound_enc_hdr_sz 4
60#define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2))
Andy Adamson38524ab2009-09-10 12:25:59 +030061#define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2)
62#define cb_sequence_enc_sz (sessionid_sz + 4 + \
63 1 /* no referring calls list yet */)
64#define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define op_enc_sz 1
67#define op_dec_sz 2
68#define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
Benny Halevy0ac68d12007-07-17 04:04:37 -070069#define enc_stateid_sz (NFS4_STATEID_SIZE >> 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
Andy Adamson38524ab2009-09-10 12:25:59 +030071 cb_sequence_enc_sz + \
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 1 + enc_stateid_sz + \
73 enc_nfs4_fh_sz)
74
75#define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
Andy Adamson38524ab2009-09-10 12:25:59 +030076 cb_sequence_dec_sz + \
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 op_dec_sz)
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079struct nfs4_cb_compound_hdr {
Andy Adamson38524ab2009-09-10 12:25:59 +030080 /* args */
81 u32 ident; /* minorversion 0 only */
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 u32 nops;
Andy Adamsonef52bff2009-06-16 04:20:50 +030083 __be32 *nops_p;
Andy Adamsonab52ae62009-06-16 04:20:53 +030084 u32 minorversion;
Andy Adamson38524ab2009-09-10 12:25:59 +030085 /* res */
86 int status;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087};
88
Chuck Lever85a56482010-12-14 14:57:32 +000089/*
90 * Handle decode buffer overflows out-of-line.
91 */
92static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Chuck Lever85a56482010-12-14 14:57:32 +000094 dprintk("NFS: %s prematurely hit the end of our receive buffer. "
95 "Remaining buffer length is %tu words.\n",
96 func, xdr->end - xdr->p);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
Chuck Levera033db42010-12-14 14:57:22 +000099static __be32 *xdr_encode_empty_array(__be32 *p)
100{
101 *p++ = xdr_zero;
102 return p;
103}
104
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105/*
Chuck Levera033db42010-12-14 14:57:22 +0000106 * Encode/decode NFSv4 CB basic data types
107 *
108 * Basic NFSv4 callback data types are defined in section 15 of RFC
109 * 3530: "Network File System (NFS) version 4 Protocol" and section
110 * 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version
111 * 1 Protocol"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 */
113
Chuck Levera033db42010-12-14 14:57:22 +0000114/*
115 * nfs_cb_opnum4
116 *
117 * enum nfs_cb_opnum4 {
118 * OP_CB_GETATTR = 3,
119 * ...
120 * };
121 */
122enum nfs_cb_opnum4 {
123 OP_CB_GETATTR = 3,
124 OP_CB_RECALL = 4,
125 OP_CB_LAYOUTRECALL = 5,
126 OP_CB_NOTIFY = 6,
127 OP_CB_PUSH_DELEG = 7,
128 OP_CB_RECALL_ANY = 8,
129 OP_CB_RECALLABLE_OBJ_AVAIL = 9,
130 OP_CB_RECALL_SLOT = 10,
131 OP_CB_SEQUENCE = 11,
132 OP_CB_WANTS_CANCELLED = 12,
133 OP_CB_NOTIFY_LOCK = 13,
134 OP_CB_NOTIFY_DEVICEID = 14,
135 OP_CB_ILLEGAL = 10044
136};
137
138static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
Benny Halevy9303bbd2010-05-25 09:50:23 +0300139{
140 __be32 *p;
141
Chuck Levera033db42010-12-14 14:57:22 +0000142 p = xdr_reserve_space(xdr, 4);
143 *p = cpu_to_be32(op);
Benny Halevy9303bbd2010-05-25 09:50:23 +0300144}
145
Chuck Levera033db42010-12-14 14:57:22 +0000146/*
147 * nfs_fh4
148 *
149 * typedef opaque nfs_fh4<NFS4_FHSIZE>;
150 */
151static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh)
152{
153 u32 length = fh->fh_size;
154 __be32 *p;
155
156 BUG_ON(length > NFS4_FHSIZE);
157 p = xdr_reserve_space(xdr, 4 + length);
158 xdr_encode_opaque(p, &fh->fh_base, length);
159}
160
161/*
162 * stateid4
163 *
164 * struct stateid4 {
165 * uint32_t seqid;
166 * opaque other[12];
167 * };
168 */
169static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
170{
171 __be32 *p;
172
173 p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
174 *p++ = cpu_to_be32(sid->si_generation);
175 xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
176}
177
178/*
179 * sessionid4
180 *
181 * typedef opaque sessionid4[NFS4_SESSIONID_SIZE];
182 */
183static void encode_sessionid4(struct xdr_stream *xdr,
184 const struct nfsd4_session *session)
185{
186 __be32 *p;
187
188 p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
189 xdr_encode_opaque_fixed(p, session->se_sessionid.data,
190 NFS4_MAX_SESSIONID_LEN);
191}
192
193/*
Chuck Lever85a56482010-12-14 14:57:32 +0000194 * nfsstat4
195 */
196static const struct {
197 int stat;
198 int errno;
199} nfs_cb_errtbl[] = {
200 { NFS4_OK, 0 },
201 { NFS4ERR_PERM, -EPERM },
202 { NFS4ERR_NOENT, -ENOENT },
203 { NFS4ERR_IO, -EIO },
204 { NFS4ERR_NXIO, -ENXIO },
205 { NFS4ERR_ACCESS, -EACCES },
206 { NFS4ERR_EXIST, -EEXIST },
207 { NFS4ERR_XDEV, -EXDEV },
208 { NFS4ERR_NOTDIR, -ENOTDIR },
209 { NFS4ERR_ISDIR, -EISDIR },
210 { NFS4ERR_INVAL, -EINVAL },
211 { NFS4ERR_FBIG, -EFBIG },
212 { NFS4ERR_NOSPC, -ENOSPC },
213 { NFS4ERR_ROFS, -EROFS },
214 { NFS4ERR_MLINK, -EMLINK },
215 { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
216 { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
217 { NFS4ERR_DQUOT, -EDQUOT },
218 { NFS4ERR_STALE, -ESTALE },
219 { NFS4ERR_BADHANDLE, -EBADHANDLE },
220 { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
221 { NFS4ERR_NOTSUPP, -ENOTSUPP },
222 { NFS4ERR_TOOSMALL, -ETOOSMALL },
223 { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
224 { NFS4ERR_BADTYPE, -EBADTYPE },
225 { NFS4ERR_LOCKED, -EAGAIN },
226 { NFS4ERR_RESOURCE, -EREMOTEIO },
227 { NFS4ERR_SYMLINK, -ELOOP },
228 { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
229 { NFS4ERR_DEADLOCK, -EDEADLK },
230 { -1, -EIO }
231};
232
233/*
234 * If we cannot translate the error, the recovery routines should
235 * handle it.
236 *
237 * Note: remaining NFSv4 error codes have values > 10000, so should
238 * not conflict with native Linux error codes.
239 */
240static int nfs_cb_stat_to_errno(int status)
241{
242 int i;
243
244 for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
245 if (nfs_cb_errtbl[i].stat == status)
246 return nfs_cb_errtbl[i].errno;
247 }
248
249 dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status);
250 return -status;
251}
252
253static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
254 enum nfsstat4 *status)
255{
256 __be32 *p;
257 u32 op;
258
259 p = xdr_inline_decode(xdr, 4 + 4);
260 if (unlikely(p == NULL))
261 goto out_overflow;
262 op = be32_to_cpup(p++);
263 if (unlikely(op != expected))
264 goto out_unexpected;
265 *status = be32_to_cpup(p);
266 return 0;
267out_overflow:
268 print_overflow_msg(__func__, xdr);
269 return -EIO;
270out_unexpected:
271 dprintk("NFSD: Callback server returned operation %d but "
272 "we issued a request for %d\n", op, expected);
273 return -EIO;
274}
275
276/*
Chuck Levera033db42010-12-14 14:57:22 +0000277 * CB_COMPOUND4args
278 *
279 * struct CB_COMPOUND4args {
280 * utf8str_cs tag;
281 * uint32_t minorversion;
282 * uint32_t callback_ident;
283 * nfs_cb_argop4 argarray<>;
284 * };
285*/
286static void encode_cb_compound4args(struct xdr_stream *xdr,
287 struct nfs4_cb_compound_hdr *hdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288{
Al Virof00f3282006-10-19 23:29:01 -0700289 __be32 * p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
Chuck Levera033db42010-12-14 14:57:22 +0000291 p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
292 p = xdr_encode_empty_array(p); /* empty tag */
293 *p++ = cpu_to_be32(hdr->minorversion);
294 *p++ = cpu_to_be32(hdr->ident);
295
Andy Adamsonef52bff2009-06-16 04:20:50 +0300296 hdr->nops_p = p;
Chuck Levera033db42010-12-14 14:57:22 +0000297 *p = cpu_to_be32(hdr->nops); /* argarray element count */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
299
Chuck Levera033db42010-12-14 14:57:22 +0000300/*
301 * Update argarray element count
302 */
Andy Adamsonef52bff2009-06-16 04:20:50 +0300303static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
304{
Chuck Levera033db42010-12-14 14:57:22 +0000305 BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS);
306 *hdr->nops_p = cpu_to_be32(hdr->nops);
Andy Adamsonef52bff2009-06-16 04:20:50 +0300307}
308
Chuck Levera033db42010-12-14 14:57:22 +0000309/*
Chuck Lever85a56482010-12-14 14:57:32 +0000310 * CB_COMPOUND4res
311 *
312 * struct CB_COMPOUND4res {
313 * nfsstat4 status;
314 * utf8str_cs tag;
315 * nfs_cb_resop4 resarray<>;
316 * };
317 */
318static int decode_cb_compound4res(struct xdr_stream *xdr,
319 struct nfs4_cb_compound_hdr *hdr)
320{
321 u32 length;
322 __be32 *p;
323
324 p = xdr_inline_decode(xdr, 4 + 4);
325 if (unlikely(p == NULL))
326 goto out_overflow;
327 hdr->status = be32_to_cpup(p++);
328 /* Ignore the tag */
329 length = be32_to_cpup(p++);
330 p = xdr_inline_decode(xdr, length + 4);
331 if (unlikely(p == NULL))
332 goto out_overflow;
333 hdr->nops = be32_to_cpup(p);
334 return 0;
335out_overflow:
336 print_overflow_msg(__func__, xdr);
337 return -EIO;
338}
339
340/*
Chuck Levera033db42010-12-14 14:57:22 +0000341 * CB_RECALL4args
342 *
343 * struct CB_RECALL4args {
344 * stateid4 stateid;
345 * bool truncate;
346 * nfs_fh4 fh;
347 * };
348 */
349static void encode_cb_recall4args(struct xdr_stream *xdr,
350 const struct nfs4_delegation *dp,
351 struct nfs4_cb_compound_hdr *hdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352{
Al Virof00f3282006-10-19 23:29:01 -0700353 __be32 *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Chuck Levera033db42010-12-14 14:57:22 +0000355 encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
J. Bruce Fieldsd5477a82011-09-08 12:07:44 -0400356 encode_stateid4(xdr, &dp->dl_stid.sc_stateid);
Chuck Levera033db42010-12-14 14:57:22 +0000357
358 p = xdr_reserve_space(xdr, 4);
359 *p++ = xdr_zero; /* truncate */
360
361 encode_nfs_fh4(xdr, &dp->dl_fh);
362
Andy Adamsonef52bff2009-06-16 04:20:50 +0300363 hdr->nops++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364}
365
Chuck Levera033db42010-12-14 14:57:22 +0000366/*
367 * CB_SEQUENCE4args
368 *
369 * struct CB_SEQUENCE4args {
370 * sessionid4 csa_sessionid;
371 * sequenceid4 csa_sequenceid;
372 * slotid4 csa_slotid;
373 * slotid4 csa_highest_slotid;
374 * bool csa_cachethis;
375 * referring_call_list4 csa_referring_call_lists<>;
376 * };
377 */
378static void encode_cb_sequence4args(struct xdr_stream *xdr,
379 const struct nfsd4_callback *cb,
380 struct nfs4_cb_compound_hdr *hdr)
Benny Halevy2af73582009-09-10 12:26:51 +0300381{
Chuck Levera033db42010-12-14 14:57:22 +0000382 struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
Benny Halevy2af73582009-09-10 12:26:51 +0300383 __be32 *p;
384
385 if (hdr->minorversion == 0)
386 return;
387
Chuck Levera033db42010-12-14 14:57:22 +0000388 encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
389 encode_sessionid4(xdr, session);
Benny Halevy2af73582009-09-10 12:26:51 +0300390
Chuck Levera033db42010-12-14 14:57:22 +0000391 p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
392 *p++ = cpu_to_be32(session->se_cb_seq_nr); /* csa_sequenceid */
393 *p++ = xdr_zero; /* csa_slotid */
394 *p++ = xdr_zero; /* csa_highest_slotid */
395 *p++ = xdr_zero; /* csa_cachethis */
396 xdr_encode_empty_array(p); /* csa_referring_call_lists */
397
Benny Halevy2af73582009-09-10 12:26:51 +0300398 hdr->nops++;
399}
400
Chuck Levera033db42010-12-14 14:57:22 +0000401/*
Chuck Lever85a56482010-12-14 14:57:32 +0000402 * CB_SEQUENCE4resok
403 *
404 * struct CB_SEQUENCE4resok {
405 * sessionid4 csr_sessionid;
406 * sequenceid4 csr_sequenceid;
407 * slotid4 csr_slotid;
408 * slotid4 csr_highest_slotid;
409 * slotid4 csr_target_highest_slotid;
410 * };
411 *
412 * union CB_SEQUENCE4res switch (nfsstat4 csr_status) {
413 * case NFS4_OK:
414 * CB_SEQUENCE4resok csr_resok4;
415 * default:
416 * void;
417 * };
418 *
419 * Our current back channel implmentation supports a single backchannel
420 * with a single slot.
421 */
422static int decode_cb_sequence4resok(struct xdr_stream *xdr,
423 struct nfsd4_callback *cb)
424{
425 struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
426 struct nfs4_sessionid id;
427 int status;
428 __be32 *p;
429 u32 dummy;
430
431 status = -ESERVERFAULT;
432
433 /*
434 * If the server returns different values for sessionID, slotID or
435 * sequence number, the server is looney tunes.
436 */
Benny Halevy2c9c8f32011-02-22 14:43:22 -0800437 p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
Chuck Lever85a56482010-12-14 14:57:32 +0000438 if (unlikely(p == NULL))
439 goto out_overflow;
440 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
441 if (memcmp(id.data, session->se_sessionid.data,
442 NFS4_MAX_SESSIONID_LEN) != 0) {
443 dprintk("NFS: %s Invalid session id\n", __func__);
444 goto out;
445 }
446 p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
447
448 dummy = be32_to_cpup(p++);
449 if (dummy != session->se_cb_seq_nr) {
450 dprintk("NFS: %s Invalid sequence number\n", __func__);
451 goto out;
452 }
453
454 dummy = be32_to_cpup(p++);
455 if (dummy != 0) {
456 dprintk("NFS: %s Invalid slotid\n", __func__);
457 goto out;
458 }
459
460 /*
461 * FIXME: process highest slotid and target highest slotid
462 */
463 status = 0;
464out:
Benny Halevy54237322011-10-19 19:12:58 -0700465 if (status)
466 nfsd4_mark_cb_fault(cb->cb_clp, status);
Chuck Lever85a56482010-12-14 14:57:32 +0000467 return status;
468out_overflow:
469 print_overflow_msg(__func__, xdr);
470 return -EIO;
471}
472
473static int decode_cb_sequence4res(struct xdr_stream *xdr,
474 struct nfsd4_callback *cb)
475{
476 enum nfsstat4 nfserr;
477 int status;
478
479 if (cb->cb_minorversion == 0)
480 return 0;
481
482 status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &nfserr);
483 if (unlikely(status))
484 goto out;
485 if (unlikely(nfserr != NFS4_OK))
486 goto out_default;
487 status = decode_cb_sequence4resok(xdr, cb);
488out:
489 return status;
490out_default:
Benny Halevy0af3f812011-01-13 11:25:31 +0200491 return nfs_cb_stat_to_errno(nfserr);
Chuck Lever85a56482010-12-14 14:57:32 +0000492}
493
494/*
Chuck Levera033db42010-12-14 14:57:22 +0000495 * NFSv4.0 and NFSv4.1 XDR encode functions
496 *
497 * NFSv4.0 callback argument types are defined in section 15 of RFC
498 * 3530: "Network File System (NFS) version 4 Protocol" and section 20
499 * of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1
500 * Protocol".
501 */
502
503/*
504 * NB: Without this zero space reservation, callbacks over krb5p fail
505 */
Chuck Lever9f06c712010-12-14 14:59:18 +0000506static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
507 void *__unused)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508{
Chuck Levera033db42010-12-14 14:57:22 +0000509 xdr_reserve_space(xdr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510}
511
Chuck Levera033db42010-12-14 14:57:22 +0000512/*
513 * 20.2. Operation 4: CB_RECALL - Recall a Delegation
514 */
Chuck Lever9f06c712010-12-14 14:59:18 +0000515static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
516 const struct nfsd4_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517{
Chuck Levera033db42010-12-14 14:57:22 +0000518 const struct nfs4_delegation *args = cb->cb_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 struct nfs4_cb_compound_hdr hdr = {
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400520 .ident = cb->cb_clp->cl_cb_ident,
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400521 .minorversion = cb->cb_minorversion,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 };
523
Chuck Lever9f06c712010-12-14 14:59:18 +0000524 encode_cb_compound4args(xdr, &hdr);
525 encode_cb_sequence4args(xdr, cb, &hdr);
526 encode_cb_recall4args(xdr, args, &hdr);
Andy Adamsonef52bff2009-06-16 04:20:50 +0300527 encode_cb_nops(&hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528}
529
530
Chuck Lever85a56482010-12-14 14:57:32 +0000531/*
532 * NFSv4.0 and NFSv4.1 XDR decode functions
533 *
534 * NFSv4.0 callback result types are defined in section 15 of RFC
535 * 3530: "Network File System (NFS) version 4 Protocol" and section 20
536 * of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1
537 * Protocol".
538 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
Chuck Leverbf269552010-12-14 14:59:29 +0000540static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
541 void *__unused)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 return 0;
544}
545
Benny Halevy2af73582009-09-10 12:26:51 +0300546/*
Chuck Lever85a56482010-12-14 14:57:32 +0000547 * 20.2. Operation 4: CB_RECALL - Recall a Delegation
Benny Halevy2af73582009-09-10 12:26:51 +0300548 */
Chuck Leverbf269552010-12-14 14:59:29 +0000549static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
550 struct xdr_stream *xdr,
Chuck Lever85a56482010-12-14 14:57:32 +0000551 struct nfsd4_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 struct nfs4_cb_compound_hdr hdr;
Chuck Lever85a56482010-12-14 14:57:32 +0000554 enum nfsstat4 nfserr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 int status;
556
Chuck Leverbf269552010-12-14 14:59:29 +0000557 status = decode_cb_compound4res(xdr, &hdr);
Chuck Lever85a56482010-12-14 14:57:32 +0000558 if (unlikely(status))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 goto out;
Chuck Lever85a56482010-12-14 14:57:32 +0000560
561 if (cb != NULL) {
Chuck Leverbf269552010-12-14 14:59:29 +0000562 status = decode_cb_sequence4res(xdr, cb);
Chuck Lever85a56482010-12-14 14:57:32 +0000563 if (unlikely(status))
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300564 goto out;
565 }
Chuck Lever85a56482010-12-14 14:57:32 +0000566
Chuck Leverbf269552010-12-14 14:59:29 +0000567 status = decode_cb_op_status(xdr, OP_CB_RECALL, &nfserr);
Chuck Lever85a56482010-12-14 14:57:32 +0000568 if (unlikely(status))
569 goto out;
570 if (unlikely(nfserr != NFS4_OK))
Benny Halevy0af3f812011-01-13 11:25:31 +0200571 status = nfs_cb_stat_to_errno(nfserr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572out:
573 return status;
574}
575
576/*
577 * RPC procedure tables
578 */
Chuck Lever7d93bd712010-12-14 14:57:42 +0000579#define PROC(proc, call, argtype, restype) \
580[NFSPROC4_CLNT_##proc] = { \
581 .p_proc = NFSPROC4_CB_##call, \
Chuck Lever9f06c712010-12-14 14:59:18 +0000582 .p_encode = (kxdreproc_t)nfs4_xdr_enc_##argtype, \
Chuck Leverbf269552010-12-14 14:59:29 +0000583 .p_decode = (kxdrdproc_t)nfs4_xdr_dec_##restype, \
Chuck Lever7d93bd712010-12-14 14:57:42 +0000584 .p_arglen = NFS4_enc_##argtype##_sz, \
585 .p_replen = NFS4_dec_##restype##_sz, \
586 .p_statidx = NFSPROC4_CB_##call, \
587 .p_name = #proc, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588}
589
Chuck Lever7d93bd712010-12-14 14:57:42 +0000590static struct rpc_procinfo nfs4_cb_procedures[] = {
591 PROC(CB_NULL, NULL, cb_null, cb_null),
592 PROC(CB_RECALL, COMPOUND, cb_recall, cb_recall),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593};
594
Chuck Lever7d93bd712010-12-14 14:57:42 +0000595static struct rpc_version nfs_cb_version4 = {
J. Bruce Fieldsb7299f42010-05-14 17:57:35 -0400596/*
597 * Note on the callback rpc program version number: despite language in rfc
598 * 5661 section 18.36.3 requiring servers to use 4 in this field, the
599 * official xdr descriptions for both 4.0 and 4.1 specify version 1, and
600 * in practice that appears to be what implementations use. The section
601 * 18.36.3 language is expected to be fixed in an erratum.
602 */
Chuck Lever7d93bd712010-12-14 14:57:42 +0000603 .number = 1,
604 .nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
605 .procs = nfs4_cb_procedures
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606};
607
Trond Myklebusta613fa12012-01-20 13:53:56 -0500608static const struct rpc_version *nfs_cb_version[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 &nfs_cb_version4,
610};
611
Trond Myklebusta613fa12012-01-20 13:53:56 -0500612static const struct rpc_program cb_program;
Olga Kornievskaiaff7d9752008-03-28 16:04:56 -0400613
614static struct rpc_stat cb_stats = {
Chuck Lever7d93bd712010-12-14 14:57:42 +0000615 .program = &cb_program
Olga Kornievskaiaff7d9752008-03-28 16:04:56 -0400616};
617
618#define NFS4_CALLBACK 0x40000000
Trond Myklebusta613fa12012-01-20 13:53:56 -0500619static const struct rpc_program cb_program = {
Chuck Lever7d93bd712010-12-14 14:57:42 +0000620 .name = "nfs4_cb",
621 .number = NFS4_CALLBACK,
622 .nrvers = ARRAY_SIZE(nfs_cb_version),
623 .version = nfs_cb_version,
624 .stats = &cb_stats,
Stanislav Kinsbursky0157d022012-01-11 19:18:01 +0400625 .pipe_dir_name = "nfsd4_cb",
Olga Kornievskaiaff7d9752008-03-28 16:04:56 -0400626};
627
J. Bruce Fields595947a2009-03-05 17:18:10 -0500628static int max_cb_time(void)
629{
J. Bruce Fieldscf07d2e2010-02-28 23:20:19 -0500630 return max(nfsd4_lease/10, (time_t)1) * HZ;
J. Bruce Fields595947a2009-03-05 17:18:10 -0500631}
632
J. Bruce Fieldsc6bb3ca2012-11-01 16:31:02 -0400633static struct rpc_cred *callback_cred;
634
635int set_callback_cred(void)
636{
637 if (callback_cred)
638 return 0;
639 callback_cred = rpc_lookup_machine_cred("nfs");
640 if (!callback_cred)
641 return -ENOMEM;
642 return 0;
643}
644
Fengguang Wu2b4cf662012-11-13 15:41:27 -0500645static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
J. Bruce Fieldsc6bb3ca2012-11-01 16:31:02 -0400646{
647 if (clp->cl_minorversion == 0) {
648 return get_rpccred(callback_cred);
649 } else {
650 struct rpc_auth *auth = client->cl_auth;
651 struct auth_cred acred = {};
652
653 acred.uid = ses->se_cb_sec.uid;
654 acred.gid = ses->se_cb_sec.gid;
655 return auth->au_ops->lookup_cred(client->cl_auth, &acred, 0);
656 }
657}
J. Bruce Fields2b47eec2007-07-27 18:06:50 -0400658
J. Bruce Fieldsdcbeaa62010-06-15 17:25:45 -0400659static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
J. Bruce Fields2b47eec2007-07-27 18:06:50 -0400660{
Chuck Leverae5c7942006-08-22 20:06:21 -0400661 struct rpc_timeout timeparms = {
J. Bruce Fields595947a2009-03-05 17:18:10 -0500662 .to_initval = max_cb_time(),
663 .to_retries = 0,
Chuck Leverae5c7942006-08-22 20:06:21 -0400664 };
Chuck Leverae5c7942006-08-22 20:06:21 -0400665 struct rpc_create_args args = {
Stanislav Kinsburskyc212cec2012-11-14 18:21:10 +0300666 .net = clp->net,
J. Bruce Fields07263f12010-05-31 19:09:40 -0400667 .address = (struct sockaddr *) &conn->cb_addr,
668 .addrsize = conn->cb_addrlen,
Takuma Umeya6f3d7722010-12-15 14:09:01 +0900669 .saddress = (struct sockaddr *) &conn->cb_saddr,
Chuck Leverae5c7942006-08-22 20:06:21 -0400670 .timeout = &timeparms,
Olga Kornievskaiaff7d9752008-03-28 16:04:56 -0400671 .program = &cb_program,
J. Bruce Fieldsb7299f42010-05-14 17:57:35 -0400672 .version = 0,
Olga Kornievskaiab6b61522008-06-09 16:51:31 -0400673 .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
Chuck Leverae5c7942006-08-22 20:06:21 -0400674 };
J. Bruce Fields63c86712007-10-25 19:00:26 -0400675 struct rpc_clnt *client;
J. Bruce Fieldsc6bb3ca2012-11-01 16:31:02 -0400676 struct rpc_cred *cred;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
J. Bruce Fields5d18c1c2010-10-19 23:00:12 -0400678 if (clp->cl_minorversion == 0) {
J. Bruce Fields03a4e1f2012-05-14 19:55:22 -0400679 if (!clp->cl_cred.cr_principal &&
J. Bruce Fields39307652012-08-16 17:01:21 -0400680 (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
J. Bruce Fields5d18c1c2010-10-19 23:00:12 -0400681 return -EINVAL;
J. Bruce Fields03a4e1f2012-05-14 19:55:22 -0400682 args.client_name = clp->cl_cred.cr_principal;
J. Bruce Fields5d18c1c2010-10-19 23:00:12 -0400683 args.prognumber = conn->cb_prog,
684 args.protocol = XPRT_TRANSPORT_TCP;
J. Bruce Fields39307652012-08-16 17:01:21 -0400685 args.authflavor = clp->cl_cred.cr_flavor;
J. Bruce Fields5d18c1c2010-10-19 23:00:12 -0400686 clp->cl_cb_ident = conn->cb_ident;
687 } else {
J. Bruce Fieldsdcbeaa62010-06-15 17:25:45 -0400688 if (!conn->cb_xprt)
689 return -EINVAL;
690 clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
691 clp->cl_cb_session = ses;
J. Bruce Fields07263f12010-05-31 19:09:40 -0400692 args.bc_xprt = conn->cb_xprt;
J. Bruce Fields8b5ce5c2010-10-19 17:31:50 -0400693 args.prognumber = clp->cl_cb_session->se_cb_prog;
Alexandros Batsakis3ddc8bf2009-09-10 12:27:21 +0300694 args.protocol = XPRT_TRANSPORT_BC_TCP;
J. Bruce Fields12fc3e92012-11-05 16:01:48 -0500695 args.authflavor = ses->se_cb_sec.flavor;
Alexandros Batsakis3ddc8bf2009-09-10 12:27:21 +0300696 }
Chuck Leverae5c7942006-08-22 20:06:21 -0400697 /* Create RPC client */
J. Bruce Fields63c86712007-10-25 19:00:26 -0400698 client = rpc_create(&args);
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800699 if (IS_ERR(client)) {
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800700 dprintk("NFSD: couldn't create callback client: %ld\n",
701 PTR_ERR(client));
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800702 return PTR_ERR(client);
703 }
J. Bruce Fieldsc6bb3ca2012-11-01 16:31:02 -0400704 cred = get_backchannel_cred(clp, client, ses);
705 if (IS_ERR(cred)) {
706 rpc_shutdown_client(client);
707 return PTR_ERR(cred);
708 }
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400709 clp->cl_cb_client = client;
J. Bruce Fieldsc6bb3ca2012-11-01 16:31:02 -0400710 clp->cl_cb_cred = cred;
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800711 return 0;
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800712}
713
J. Bruce Fieldsecdd03b2009-02-23 19:35:22 -0800714static void warn_no_callback_path(struct nfs4_client *clp, int reason)
715{
716 dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
717 (int)clp->cl_name.len, clp->cl_name.data, reason);
718}
719
J. Bruce Fields77a35692010-04-30 18:51:44 -0400720static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
721{
722 clp->cl_cb_state = NFSD4_CB_DOWN;
723 warn_no_callback_path(clp, reason);
724}
725
Benny Halevy54237322011-10-19 19:12:58 -0700726static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
727{
728 clp->cl_cb_state = NFSD4_CB_FAULT;
729 warn_no_callback_path(clp, reason);
730}
731
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500732static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
733{
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400734 struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500735
736 if (task->tk_status)
J. Bruce Fields77a35692010-04-30 18:51:44 -0400737 nfsd4_mark_cb_down(clp, task->tk_status);
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500738 else
J. Bruce Fields77a35692010-04-30 18:51:44 -0400739 clp->cl_cb_state = NFSD4_CB_UP;
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500740}
741
742static const struct rpc_call_ops nfsd4_cb_probe_ops = {
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400743 /* XXX: release method to ensure we set the cb channel down if
744 * necessary on early failure? */
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500745 .rpc_call_done = nfsd4_cb_probe_done,
746};
747
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400748static struct workqueue_struct *callback_wq;
J. Bruce Fields80fc0152009-09-15 18:07:35 -0400749
J. Bruce Fields229b2a02010-12-10 17:37:44 -0500750static void run_nfsd4_cb(struct nfsd4_callback *cb)
751{
752 queue_work(callback_wq, &cb->cb_work);
753}
754
J. Bruce Fields5a3c9d72010-10-19 17:56:52 -0400755static void do_probe_callback(struct nfs4_client *clp)
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800756{
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400757 struct nfsd4_callback *cb = &clp->cl_cb_null;
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800758
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400759 cb->cb_op = NULL;
760 cb->cb_clp = clp;
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400761
762 cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL];
763 cb->cb_msg.rpc_argp = NULL;
764 cb->cb_msg.rpc_resp = NULL;
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400765
766 cb->cb_ops = &nfsd4_cb_probe_ops;
767
J. Bruce Fields229b2a02010-12-10 17:37:44 -0500768 run_nfsd4_cb(cb);
J. Bruce Fields63c86712007-10-25 19:00:26 -0400769}
770
771/*
J. Bruce Fields5a3c9d72010-10-19 17:56:52 -0400772 * Poke the callback thread to process any updates to the callback
773 * parameters, and send a null probe.
J. Bruce Fields63c86712007-10-25 19:00:26 -0400774 */
J. Bruce Fields5a3c9d72010-10-19 17:56:52 -0400775void nfsd4_probe_callback(struct nfs4_client *clp)
776{
J. Bruce Fields77a35692010-04-30 18:51:44 -0400777 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
Jeff Laytona52d7262012-03-21 09:52:02 -0400778 set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
J. Bruce Fields5a3c9d72010-10-19 17:56:52 -0400779 do_probe_callback(clp);
780}
781
J. Bruce Fields84f5f7c2010-12-09 15:52:19 -0500782void nfsd4_probe_callback_sync(struct nfs4_client *clp)
783{
784 nfsd4_probe_callback(clp);
785 flush_workqueue(callback_wq);
786}
787
J. Bruce Fields5a3c9d72010-10-19 17:56:52 -0400788void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
J. Bruce Fields63c86712007-10-25 19:00:26 -0400789{
J. Bruce Fields77a35692010-04-30 18:51:44 -0400790 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400791 spin_lock(&clp->cl_lock);
792 memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400793 spin_unlock(&clp->cl_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794}
795
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300796/*
797 * There's currently a single callback channel slot.
798 * If the slot is available, then mark it busy. Otherwise, set the
799 * thread for sleeping on the callback RPC wait queue.
800 */
J. Bruce Fields3ff36002011-01-10 16:37:51 -0500801static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300802{
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300803 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
804 rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
805 dprintk("%s slot is busy\n", __func__);
J. Bruce Fields3ff36002011-01-10 16:37:51 -0500806 return false;
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300807 }
J. Bruce Fields3ff36002011-01-10 16:37:51 -0500808 return true;
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300809}
810
811/*
812 * TODO: cb_sequence should support referring call lists, cachethis, multiple
813 * slots, and mark callback channel down on communication errors.
814 */
815static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
816{
J. Bruce Fields58784532010-05-16 16:47:08 -0400817 struct nfsd4_callback *cb = calldata;
818 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
J. Bruce Fields2a74aba2011-09-23 17:20:02 -0400819 struct nfs4_client *clp = dp->dl_stid.sc_client;
J. Bruce Fields8323c3b2010-10-19 19:36:51 -0400820 u32 minorversion = clp->cl_minorversion;
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300821
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400822 cb->cb_minorversion = minorversion;
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300823 if (minorversion) {
J. Bruce Fields3ff36002011-01-10 16:37:51 -0500824 if (!nfsd41_cb_get_slot(clp, task))
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300825 return;
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300826 }
J. Bruce Fields5ce8ba22011-01-10 16:44:41 -0500827 spin_lock(&clp->cl_lock);
J. Bruce Fieldsa8f28002011-01-14 14:25:48 -0500828 if (list_empty(&cb->cb_per_client)) {
829 /* This is the first call, not a restart */
830 cb->cb_done = false;
831 list_add(&cb->cb_per_client, &clp->cl_callbacks);
832 }
J. Bruce Fields5ce8ba22011-01-10 16:44:41 -0500833 spin_unlock(&clp->cl_lock);
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300834 rpc_call_start(task);
835}
836
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300837static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
838{
J. Bruce Fields58784532010-05-16 16:47:08 -0400839 struct nfsd4_callback *cb = calldata;
840 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
J. Bruce Fields2a74aba2011-09-23 17:20:02 -0400841 struct nfs4_client *clp = dp->dl_stid.sc_client;
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300842
843 dprintk("%s: minorversion=%d\n", __func__,
J. Bruce Fields8323c3b2010-10-19 19:36:51 -0400844 clp->cl_minorversion);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300845
J. Bruce Fields8323c3b2010-10-19 19:36:51 -0400846 if (clp->cl_minorversion) {
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300847 /* No need for lock, access serialized in nfsd4_cb_prepare */
J. Bruce Fieldsac7c46f2010-06-14 19:01:57 -0400848 ++clp->cl_cb_session->se_cb_seq_nr;
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300849 clear_bit(0, &clp->cl_cb_slot_busy);
850 rpc_wake_up_next(&clp->cl_cb_waitq);
851 dprintk("%s: freed slot, new seqid=%d\n", __func__,
J. Bruce Fieldsac7c46f2010-06-14 19:01:57 -0400852 clp->cl_cb_session->se_cb_seq_nr);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300853
854 /* We're done looking into the sequence information */
855 task->tk_msg.rpc_resp = NULL;
856 }
857}
858
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500859
J. Bruce Fields63e48632009-05-01 22:36:55 -0400860static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
861{
J. Bruce Fields58784532010-05-16 16:47:08 -0400862 struct nfsd4_callback *cb = calldata;
863 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
J. Bruce Fields2a74aba2011-09-23 17:20:02 -0400864 struct nfs4_client *clp = dp->dl_stid.sc_client;
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500865 struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
J. Bruce Fields63e48632009-05-01 22:36:55 -0400866
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300867 nfsd4_cb_done(task, calldata);
868
J. Bruce Fieldsa8f28002011-01-14 14:25:48 -0500869 if (current_rpc_client != task->tk_client) {
870 /* We're shutting down or changing cl_cb_client; leave
871 * it to nfsd4_process_cb_update to restart the call if
872 * necessary. */
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500873 return;
874 }
875
J. Bruce Fields5ce8ba22011-01-10 16:44:41 -0500876 if (cb->cb_done)
877 return;
J. Bruce Fields63e48632009-05-01 22:36:55 -0400878 switch (task->tk_status) {
J. Bruce Fields172c85d2010-05-30 11:53:12 -0400879 case 0:
J. Bruce Fields5ce8ba22011-01-10 16:44:41 -0500880 cb->cb_done = true;
J. Bruce Fields172c85d2010-05-30 11:53:12 -0400881 return;
882 case -EBADHANDLE:
883 case -NFS4ERR_BAD_STATEID:
884 /* Race: client probably got cb_recall
885 * before open reply granting delegation */
886 break;
887 default:
J. Bruce Fields63e48632009-05-01 22:36:55 -0400888 /* Network partition? */
J. Bruce Fields77a35692010-04-30 18:51:44 -0400889 nfsd4_mark_cb_down(clp, task->tk_status);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400890 }
891 if (dp->dl_retries--) {
892 rpc_delay(task, 2*HZ);
893 task->tk_status = 0;
Boaz Harroshc18c8212010-06-29 14:33:55 +0300894 rpc_restart_call_prepare(task);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300895 return;
J. Bruce Fields63e48632009-05-01 22:36:55 -0400896 }
J. Bruce Fields5ce8ba22011-01-10 16:44:41 -0500897 nfsd4_mark_cb_down(clp, task->tk_status);
898 cb->cb_done = true;
J. Bruce Fields63e48632009-05-01 22:36:55 -0400899}
900
901static void nfsd4_cb_recall_release(void *calldata)
902{
J. Bruce Fields58784532010-05-16 16:47:08 -0400903 struct nfsd4_callback *cb = calldata;
J. Bruce Fields5ce8ba22011-01-10 16:44:41 -0500904 struct nfs4_client *clp = cb->cb_clp;
J. Bruce Fields58784532010-05-16 16:47:08 -0400905 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400906
J. Bruce Fields5ce8ba22011-01-10 16:44:41 -0500907 if (cb->cb_done) {
908 spin_lock(&clp->cl_lock);
909 list_del(&cb->cb_per_client);
910 spin_unlock(&clp->cl_lock);
911 nfs4_put_delegation(dp);
912 }
J. Bruce Fields63e48632009-05-01 22:36:55 -0400913}
914
915static const struct rpc_call_ops nfsd4_cb_recall_ops = {
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300916 .rpc_call_prepare = nfsd4_cb_prepare,
J. Bruce Fields63e48632009-05-01 22:36:55 -0400917 .rpc_call_done = nfsd4_cb_recall_done,
918 .rpc_release = nfsd4_cb_recall_release,
919};
920
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500921int nfsd4_create_callback_queue(void)
922{
923 callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
924 if (!callback_wq)
925 return -ENOMEM;
926 return 0;
927}
928
929void nfsd4_destroy_callback_queue(void)
930{
931 destroy_workqueue(callback_wq);
932}
933
Benny Halevyab707e152010-05-12 00:14:06 +0300934/* must be called under the state lock */
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400935void nfsd4_shutdown_callback(struct nfs4_client *clp)
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500936{
Jeff Laytona52d7262012-03-21 09:52:02 -0400937 set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500938 /*
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400939 * Note this won't actually result in a null callback;
940 * instead, nfsd4_do_callback_rpc() will detect the killed
941 * client, destroy the rpc client, and stop:
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500942 */
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400943 do_probe_callback(clp);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500944 flush_workqueue(callback_wq);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500945}
946
Kirill A. Shutemov65e4c892010-12-16 15:25:54 +0200947static void nfsd4_release_cb(struct nfsd4_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948{
J. Bruce Fields58784532010-05-16 16:47:08 -0400949 if (cb->cb_ops->rpc_release)
950 cb->cb_ops->rpc_release(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500952
J. Bruce Fieldsdcbeaa62010-06-15 17:25:45 -0400953/* requires cl_lock: */
954static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
955{
956 struct nfsd4_session *s;
957 struct nfsd4_conn *c;
958
959 list_for_each_entry(s, &clp->cl_sessions, se_perclnt) {
960 list_for_each_entry(c, &s->se_conns, cn_persession) {
961 if (c->cn_flags & NFS4_CDFC4_BACK)
962 return c;
963 }
964 }
965 return NULL;
966}
967
Kirill A. Shutemov65e4c892010-12-16 15:25:54 +0200968static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400969{
970 struct nfs4_cb_conn conn;
971 struct nfs4_client *clp = cb->cb_clp;
J. Bruce Fieldsdcbeaa62010-06-15 17:25:45 -0400972 struct nfsd4_session *ses = NULL;
973 struct nfsd4_conn *c;
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400974 int err;
975
976 /*
977 * This is either an update, or the client dying; in either case,
978 * kill the old client:
979 */
980 if (clp->cl_cb_client) {
981 rpc_shutdown_client(clp->cl_cb_client);
982 clp->cl_cb_client = NULL;
J. Bruce Fieldsc6bb3ca2012-11-01 16:31:02 -0400983 put_rpccred(clp->cl_cb_cred);
984 clp->cl_cb_cred = NULL;
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400985 }
J. Bruce Fieldsdcbeaa62010-06-15 17:25:45 -0400986 if (clp->cl_cb_conn.cb_xprt) {
987 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
988 clp->cl_cb_conn.cb_xprt = NULL;
989 }
Jeff Laytona52d7262012-03-21 09:52:02 -0400990 if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400991 return;
992 spin_lock(&clp->cl_lock);
993 /*
994 * Only serialized callback code is allowed to clear these
995 * flags; main nfsd code can only set them:
996 */
Jeff Laytona52d7262012-03-21 09:52:02 -0400997 BUG_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
998 clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400999 memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
J. Bruce Fieldsdcbeaa62010-06-15 17:25:45 -04001000 c = __nfsd4_find_backchannel(clp);
1001 if (c) {
1002 svc_xprt_get(c->cn_xprt);
1003 conn.cb_xprt = c->cn_xprt;
1004 ses = c->cn_session;
1005 }
J. Bruce Fields6ff8da02010-06-04 20:04:45 -04001006 spin_unlock(&clp->cl_lock);
1007
J. Bruce Fieldsdcbeaa62010-06-15 17:25:45 -04001008 err = setup_callback_client(clp, &conn, ses);
J. Bruce Fields5ce8ba22011-01-10 16:44:41 -05001009 if (err) {
J. Bruce Fields8546ee52012-03-09 17:02:28 -05001010 nfsd4_mark_cb_down(clp, err);
J. Bruce Fields5ce8ba22011-01-10 16:44:41 -05001011 return;
1012 }
1013 /* Yay, the callback channel's back! Restart any callbacks: */
1014 list_for_each_entry(cb, &clp->cl_callbacks, cb_per_client)
1015 run_nfsd4_cb(cb);
J. Bruce Fields6ff8da02010-06-04 20:04:45 -04001016}
1017
J. Bruce Fields57725152012-11-05 15:10:26 -05001018static void nfsd4_do_callback_rpc(struct work_struct *w)
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -05001019{
J. Bruce Fields58784532010-05-16 16:47:08 -04001020 struct nfsd4_callback *cb = container_of(w, struct nfsd4_callback, cb_work);
J. Bruce Fieldsfb003922010-05-31 18:21:37 -04001021 struct nfs4_client *clp = cb->cb_clp;
J. Bruce Fields6ff8da02010-06-04 20:04:45 -04001022 struct rpc_clnt *clnt;
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -05001023
Jeff Laytona52d7262012-03-21 09:52:02 -04001024 if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
J. Bruce Fields6ff8da02010-06-04 20:04:45 -04001025 nfsd4_process_cb_update(cb);
1026
1027 clnt = clp->cl_cb_client;
1028 if (!clnt) {
1029 /* Callback channel broken, or client killed; give up: */
J. Bruce Fields58784532010-05-16 16:47:08 -04001030 nfsd4_release_cb(cb);
J. Bruce Fields6ff8da02010-06-04 20:04:45 -04001031 return;
J. Bruce Fields58784532010-05-16 16:47:08 -04001032 }
J. Bruce Fieldsc6bb3ca2012-11-01 16:31:02 -04001033 cb->cb_msg.rpc_cred = clp->cl_cb_cred;
J. Bruce Fieldscee277d2010-05-26 17:52:14 -04001034 rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
1035 cb->cb_ops, cb);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -05001036}
1037
J. Bruce Fields57725152012-11-05 15:10:26 -05001038void nfsd4_init_callback(struct nfsd4_callback *cb)
1039{
1040 INIT_WORK(&cb->cb_work, nfsd4_do_callback_rpc);
1041}
1042
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -05001043void nfsd4_cb_recall(struct nfs4_delegation *dp)
1044{
J. Bruce Fields58784532010-05-16 16:47:08 -04001045 struct nfsd4_callback *cb = &dp->dl_recall;
J. Bruce Fields2a74aba2011-09-23 17:20:02 -04001046 struct nfs4_client *clp = dp->dl_stid.sc_client;
J. Bruce Fields58784532010-05-16 16:47:08 -04001047
1048 dp->dl_retries = 1;
J. Bruce Fieldsfb003922010-05-31 18:21:37 -04001049 cb->cb_op = dp;
J. Bruce Fields5ce8ba22011-01-10 16:44:41 -05001050 cb->cb_clp = clp;
J. Bruce Fields58784532010-05-16 16:47:08 -04001051 cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL];
J. Bruce Fieldsfb003922010-05-31 18:21:37 -04001052 cb->cb_msg.rpc_argp = cb;
1053 cb->cb_msg.rpc_resp = cb;
J. Bruce Fields58784532010-05-16 16:47:08 -04001054
1055 cb->cb_ops = &nfsd4_cb_recall_ops;
J. Bruce Fields58784532010-05-16 16:47:08 -04001056
J. Bruce Fields9ee1ba52011-01-13 17:08:19 -05001057 INIT_LIST_HEAD(&cb->cb_per_client);
J. Bruce Fields5ce8ba22011-01-10 16:44:41 -05001058 cb->cb_done = true;
1059
J. Bruce Fields229b2a02010-12-10 17:37:44 -05001060 run_nfsd4_cb(&dp->dl_recall);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -05001061}