blob: 143da2eecd7b73a1d3181627c9b03e4c16f3e0c1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <andros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sunrpc/clnt.h>
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -050035#include <linux/sunrpc/svc_xprt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Boaz Harrosh9a74af22009-12-03 20:30:56 +020037#include "nfsd.h"
38#include "state.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40#define NFSDDBG_FACILITY NFSDDBG_PROC
41
42#define NFSPROC4_CB_NULL 0
43#define NFSPROC4_CB_COMPOUND 1
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* Index of predefined Linux callback client operations */
46
47enum {
Benny Halevy4be36ca2009-09-10 12:25:46 +030048 NFSPROC4_CLNT_CB_NULL = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 NFSPROC4_CLNT_CB_RECALL,
Andy Adamson38524ab2009-09-10 12:25:59 +030050 NFSPROC4_CLNT_CB_SEQUENCE,
Linus Torvalds1da177e2005-04-16 15:20:36 -070051};
52
53enum nfs_cb_opnum4 {
54 OP_CB_RECALL = 4,
Andy Adamson38524ab2009-09-10 12:25:59 +030055 OP_CB_SEQUENCE = 11,
Linus Torvalds1da177e2005-04-16 15:20:36 -070056};
57
58#define NFS4_MAXTAGLEN 20
59
60#define NFS4_enc_cb_null_sz 0
61#define NFS4_dec_cb_null_sz 0
62#define cb_compound_enc_hdr_sz 4
63#define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2))
Andy Adamson38524ab2009-09-10 12:25:59 +030064#define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2)
65#define cb_sequence_enc_sz (sessionid_sz + 4 + \
66 1 /* no referring calls list yet */)
67#define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4)
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#define op_enc_sz 1
70#define op_dec_sz 2
71#define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
Benny Halevy0ac68d12007-07-17 04:04:37 -070072#define enc_stateid_sz (NFS4_STATEID_SIZE >> 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
Andy Adamson38524ab2009-09-10 12:25:59 +030074 cb_sequence_enc_sz + \
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 1 + enc_stateid_sz + \
76 enc_nfs4_fh_sz)
77
78#define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
Andy Adamson38524ab2009-09-10 12:25:59 +030079 cb_sequence_dec_sz + \
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 op_dec_sz)
81
82/*
83* Generic encode routines from fs/nfs/nfs4xdr.c
84*/
Al Virof00f3282006-10-19 23:29:01 -070085static inline __be32 *
86xdr_writemem(__be32 *p, const void *ptr, int nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
88 int tmp = XDR_QUADLEN(nbytes);
89 if (!tmp)
90 return p;
91 p[tmp-1] = 0;
92 memcpy(p, ptr, nbytes);
93 return p + tmp;
94}
95
96#define WRITE32(n) *p++ = htonl(n)
97#define WRITEMEM(ptr,nbytes) do { \
98 p = xdr_writemem(p, ptr, nbytes); \
99} while (0)
100#define RESERVE_SPACE(nbytes) do { \
101 p = xdr_reserve_space(xdr, nbytes); \
Harvey Harrison8e24eea2008-04-30 00:55:09 -0700102 if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 BUG_ON(!p); \
104} while (0)
105
106/*
107 * Generic decode routines from fs/nfs/nfs4xdr.c
108 */
109#define DECODE_TAIL \
110 status = 0; \
111out: \
112 return status; \
113xdr_error: \
114 dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
115 status = -EIO; \
116 goto out
117
118#define READ32(x) (x) = ntohl(*p++)
119#define READ64(x) do { \
120 (x) = (u64)ntohl(*p++) << 32; \
121 (x) |= ntohl(*p++); \
122} while (0)
123#define READTIME(x) do { \
124 p++; \
125 (x.tv_sec) = ntohl(*p++); \
126 (x.tv_nsec) = ntohl(*p++); \
127} while (0)
128#define READ_BUF(nbytes) do { \
129 p = xdr_inline_decode(xdr, nbytes); \
130 if (!p) { \
Greg Banks3e3b4802006-10-02 02:17:41 -0700131 dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \
Harvey Harrison8e24eea2008-04-30 00:55:09 -0700132 __func__, __LINE__); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 return -EIO; \
134 } \
135} while (0)
136
137struct nfs4_cb_compound_hdr {
Andy Adamson38524ab2009-09-10 12:25:59 +0300138 /* args */
139 u32 ident; /* minorversion 0 only */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 u32 nops;
Andy Adamsonef52bff2009-06-16 04:20:50 +0300141 __be32 *nops_p;
Andy Adamsonab52ae62009-06-16 04:20:53 +0300142 u32 minorversion;
Andy Adamson38524ab2009-09-10 12:25:59 +0300143 /* res */
144 int status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145};
146
147static struct {
148int stat;
149int errno;
150} nfs_cb_errtbl[] = {
151 { NFS4_OK, 0 },
152 { NFS4ERR_PERM, EPERM },
153 { NFS4ERR_NOENT, ENOENT },
154 { NFS4ERR_IO, EIO },
155 { NFS4ERR_NXIO, ENXIO },
156 { NFS4ERR_ACCESS, EACCES },
157 { NFS4ERR_EXIST, EEXIST },
158 { NFS4ERR_XDEV, EXDEV },
159 { NFS4ERR_NOTDIR, ENOTDIR },
160 { NFS4ERR_ISDIR, EISDIR },
161 { NFS4ERR_INVAL, EINVAL },
162 { NFS4ERR_FBIG, EFBIG },
163 { NFS4ERR_NOSPC, ENOSPC },
164 { NFS4ERR_ROFS, EROFS },
165 { NFS4ERR_MLINK, EMLINK },
166 { NFS4ERR_NAMETOOLONG, ENAMETOOLONG },
167 { NFS4ERR_NOTEMPTY, ENOTEMPTY },
168 { NFS4ERR_DQUOT, EDQUOT },
169 { NFS4ERR_STALE, ESTALE },
170 { NFS4ERR_BADHANDLE, EBADHANDLE },
171 { NFS4ERR_BAD_COOKIE, EBADCOOKIE },
172 { NFS4ERR_NOTSUPP, ENOTSUPP },
173 { NFS4ERR_TOOSMALL, ETOOSMALL },
174 { NFS4ERR_SERVERFAULT, ESERVERFAULT },
175 { NFS4ERR_BADTYPE, EBADTYPE },
176 { NFS4ERR_LOCKED, EAGAIN },
177 { NFS4ERR_RESOURCE, EREMOTEIO },
178 { NFS4ERR_SYMLINK, ELOOP },
179 { NFS4ERR_OP_ILLEGAL, EOPNOTSUPP },
180 { NFS4ERR_DEADLOCK, EDEADLK },
181 { -1, EIO }
182};
183
184static int
185nfs_cb_stat_to_errno(int stat)
186{
187 int i;
188 for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
189 if (nfs_cb_errtbl[i].stat == stat)
190 return nfs_cb_errtbl[i].errno;
191 }
192 /* If we cannot translate the error, the recovery routines should
193 * handle it.
194 * Note: remaining NFSv4 error codes have values > 10000, so should
195 * not conflict with native Linux error codes.
196 */
197 return stat;
198}
199
200/*
201 * XDR encode
202 */
203
Andy Adamsonef52bff2009-06-16 04:20:50 +0300204static void
Benny Halevy9303bbd2010-05-25 09:50:23 +0300205encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
206{
207 __be32 *p;
208
209 RESERVE_SPACE(sizeof(stateid_t));
210 WRITE32(sid->si_generation);
211 WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
212}
213
214static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
216{
Al Virof00f3282006-10-19 23:29:01 -0700217 __be32 * p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219 RESERVE_SPACE(16);
220 WRITE32(0); /* tag length is always 0 */
Andy Adamsonab52ae62009-06-16 04:20:53 +0300221 WRITE32(hdr->minorversion);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 WRITE32(hdr->ident);
Andy Adamsonef52bff2009-06-16 04:20:50 +0300223 hdr->nops_p = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 WRITE32(hdr->nops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225}
226
Andy Adamsonef52bff2009-06-16 04:20:50 +0300227static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
228{
229 *hdr->nops_p = htonl(hdr->nops);
230}
231
232static void
233encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
234 struct nfs4_cb_compound_hdr *hdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235{
Al Virof00f3282006-10-19 23:29:01 -0700236 __be32 *p;
J. Bruce Fieldsb53d40c2009-05-01 19:50:00 -0400237 int len = dp->dl_fh.fh_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Benny Halevy9303bbd2010-05-25 09:50:23 +0300239 RESERVE_SPACE(4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 WRITE32(OP_CB_RECALL);
Benny Halevy9303bbd2010-05-25 09:50:23 +0300241 encode_stateid(xdr, &dp->dl_stateid);
242 RESERVE_SPACE(8 + (XDR_QUADLEN(len) << 2));
J. Bruce Fields6707bd32009-05-01 19:57:46 -0400243 WRITE32(0); /* truncate optimization not implemented */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 WRITE32(len);
J. Bruce Fieldsb53d40c2009-05-01 19:50:00 -0400245 WRITEMEM(&dp->dl_fh.fh_base, len);
Andy Adamsonef52bff2009-06-16 04:20:50 +0300246 hdr->nops++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247}
248
Benny Halevy2af73582009-09-10 12:26:51 +0300249static void
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400250encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
Benny Halevy2af73582009-09-10 12:26:51 +0300251 struct nfs4_cb_compound_hdr *hdr)
252{
253 __be32 *p;
J. Bruce Fields90c81452010-06-14 17:49:37 -0400254 struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
Benny Halevy2af73582009-09-10 12:26:51 +0300255
256 if (hdr->minorversion == 0)
257 return;
258
259 RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
260
261 WRITE32(OP_CB_SEQUENCE);
J. Bruce Fields90c81452010-06-14 17:49:37 -0400262 WRITEMEM(ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN);
J. Bruce Fieldsac7c46f2010-06-14 19:01:57 -0400263 WRITE32(ses->se_cb_seq_nr);
Benny Halevy2af73582009-09-10 12:26:51 +0300264 WRITE32(0); /* slotid, always 0 */
265 WRITE32(0); /* highest slotid always 0 */
266 WRITE32(0); /* cachethis always 0 */
267 WRITE32(0); /* FIXME: support referring_call_lists */
268 hdr->nops++;
269}
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271static int
Al Virof00f3282006-10-19 23:29:01 -0700272nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273{
274 struct xdr_stream xdrs, *xdr = &xdrs;
275
276 xdr_init_encode(&xdrs, &req->rq_snd_buf, p);
277 RESERVE_SPACE(0);
278 return 0;
279}
280
281static int
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300282nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400283 struct nfsd4_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284{
285 struct xdr_stream xdr;
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400286 struct nfs4_delegation *args = cb->cb_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 struct nfs4_cb_compound_hdr hdr = {
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400288 .ident = cb->cb_clp->cl_cb_ident,
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400289 .minorversion = cb->cb_minorversion,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 };
291
292 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
293 encode_cb_compound_hdr(&xdr, &hdr);
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400294 encode_cb_sequence(&xdr, cb, &hdr);
Andy Adamsonef52bff2009-06-16 04:20:50 +0300295 encode_cb_recall(&xdr, args, &hdr);
296 encode_cb_nops(&hdr);
297 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
299
300
301static int
302decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
Al Virof00f3282006-10-19 23:29:01 -0700303 __be32 *p;
J. Bruce Fields68a4b482010-05-27 09:30:39 -0400304 u32 taglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 READ_BUF(8);
307 READ32(hdr->status);
J. Bruce Fields68a4b482010-05-27 09:30:39 -0400308 /* We've got no use for the tag; ignore it: */
309 READ32(taglen);
310 READ_BUF(taglen + 4);
311 p += XDR_QUADLEN(taglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 READ32(hdr->nops);
313 return 0;
314}
315
316static int
317decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
318{
Al Virof00f3282006-10-19 23:29:01 -0700319 __be32 *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 u32 op;
321 int32_t nfserr;
322
323 READ_BUF(8);
324 READ32(op);
325 if (op != expected) {
326 dprintk("NFSD: decode_cb_op_hdr: Callback server returned "
327 " operation %d but we issued a request for %d\n",
328 op, expected);
329 return -EIO;
330 }
331 READ32(nfserr);
332 if (nfserr != NFS_OK)
333 return -nfs_cb_stat_to_errno(nfserr);
334 return 0;
335}
336
Benny Halevy2af73582009-09-10 12:26:51 +0300337/*
338 * Our current back channel implmentation supports a single backchannel
339 * with a single slot.
340 */
341static int
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400342decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
Benny Halevy2af73582009-09-10 12:26:51 +0300343 struct rpc_rqst *rqstp)
344{
J. Bruce Fields90c81452010-06-14 17:49:37 -0400345 struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
Benny Halevy2af73582009-09-10 12:26:51 +0300346 struct nfs4_sessionid id;
347 int status;
348 u32 dummy;
349 __be32 *p;
350
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400351 if (cb->cb_minorversion == 0)
Benny Halevy2af73582009-09-10 12:26:51 +0300352 return 0;
353
354 status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
355 if (status)
356 return status;
357
358 /*
359 * If the server returns different values for sessionID, slotID or
360 * sequence number, the server is looney tunes.
361 */
362 status = -ESERVERFAULT;
363
364 READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
365 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
366 p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
J. Bruce Fields90c81452010-06-14 17:49:37 -0400367 if (memcmp(id.data, ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
Benny Halevy2af73582009-09-10 12:26:51 +0300368 dprintk("%s Invalid session id\n", __func__);
369 goto out;
370 }
371 READ32(dummy);
J. Bruce Fieldsac7c46f2010-06-14 19:01:57 -0400372 if (dummy != ses->se_cb_seq_nr) {
Benny Halevy2af73582009-09-10 12:26:51 +0300373 dprintk("%s Invalid sequence number\n", __func__);
374 goto out;
375 }
376 READ32(dummy); /* slotid must be 0 */
377 if (dummy != 0) {
378 dprintk("%s Invalid slotid\n", __func__);
379 goto out;
380 }
381 /* FIXME: process highest slotid and target highest slotid */
382 status = 0;
383out:
384 return status;
385}
386
387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388static int
Al Virof00f3282006-10-19 23:29:01 -0700389nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
391 return 0;
392}
393
394static int
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300395nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400396 struct nfsd4_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397{
398 struct xdr_stream xdr;
399 struct nfs4_cb_compound_hdr hdr;
400 int status;
401
402 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
403 status = decode_cb_compound_hdr(&xdr, &hdr);
404 if (status)
405 goto out;
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400406 if (cb) {
407 status = decode_cb_sequence(&xdr, cb, rqstp);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300408 if (status)
409 goto out;
410 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
412out:
413 return status;
414}
415
416/*
417 * RPC procedure tables
418 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419#define PROC(proc, call, argtype, restype) \
420[NFSPROC4_CLNT_##proc] = { \
421 .p_proc = NFSPROC4_CB_##call, \
422 .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \
423 .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \
Chuck Lever2bea90d2007-03-29 16:47:53 -0400424 .p_arglen = NFS4_##argtype##_sz, \
425 .p_replen = NFS4_##restype##_sz, \
Chuck Levercc0175c2006-03-20 13:44:22 -0500426 .p_statidx = NFSPROC4_CB_##call, \
427 .p_name = #proc, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428}
429
NeilBrownfd39ca92005-06-23 22:04:03 -0700430static struct rpc_procinfo nfs4_cb_procedures[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 PROC(CB_NULL, NULL, enc_cb_null, dec_cb_null),
432 PROC(CB_RECALL, COMPOUND, enc_cb_recall, dec_cb_recall),
433};
434
NeilBrownfd39ca92005-06-23 22:04:03 -0700435static struct rpc_version nfs_cb_version4 = {
J. Bruce Fieldsb7299f42010-05-14 17:57:35 -0400436/*
437 * Note on the callback rpc program version number: despite language in rfc
438 * 5661 section 18.36.3 requiring servers to use 4 in this field, the
439 * official xdr descriptions for both 4.0 and 4.1 specify version 1, and
440 * in practice that appears to be what implementations use. The section
441 * 18.36.3 language is expected to be fixed in an erratum.
442 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 .number = 1,
Tobias Klausere8c96f82006-03-24 03:15:34 -0800444 .nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 .procs = nfs4_cb_procedures
446};
447
448static struct rpc_version * nfs_cb_version[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 &nfs_cb_version4,
450};
451
Olga Kornievskaiaff7d9752008-03-28 16:04:56 -0400452static struct rpc_program cb_program;
453
454static struct rpc_stat cb_stats = {
455 .program = &cb_program
456};
457
458#define NFS4_CALLBACK 0x40000000
459static struct rpc_program cb_program = {
460 .name = "nfs4_cb",
461 .number = NFS4_CALLBACK,
462 .nrvers = ARRAY_SIZE(nfs_cb_version),
463 .version = nfs_cb_version,
464 .stats = &cb_stats,
Olga Kornievskaia61054b12008-12-23 16:19:00 -0500465 .pipe_dir_name = "/nfsd4_cb",
Olga Kornievskaiaff7d9752008-03-28 16:04:56 -0400466};
467
J. Bruce Fields595947a2009-03-05 17:18:10 -0500468static int max_cb_time(void)
469{
J. Bruce Fieldscf07d2e2010-02-28 23:20:19 -0500470 return max(nfsd4_lease/10, (time_t)1) * HZ;
J. Bruce Fields595947a2009-03-05 17:18:10 -0500471}
472
J. Bruce Fields2b47eec2007-07-27 18:06:50 -0400473/* Reference counting, callback cleanup, etc., all look racy as heck.
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500474 * And why is cl_cb_set an atomic? */
J. Bruce Fields2b47eec2007-07-27 18:06:50 -0400475
J. Bruce Fields07263f12010-05-31 19:09:40 -0400476int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
J. Bruce Fields2b47eec2007-07-27 18:06:50 -0400477{
Chuck Leverae5c7942006-08-22 20:06:21 -0400478 struct rpc_timeout timeparms = {
J. Bruce Fields595947a2009-03-05 17:18:10 -0500479 .to_initval = max_cb_time(),
480 .to_retries = 0,
Chuck Leverae5c7942006-08-22 20:06:21 -0400481 };
Chuck Leverae5c7942006-08-22 20:06:21 -0400482 struct rpc_create_args args = {
Pavel Emelyanovc653ce32010-09-29 16:04:45 +0400483 .net = &init_net,
J. Bruce Fields07263f12010-05-31 19:09:40 -0400484 .address = (struct sockaddr *) &conn->cb_addr,
485 .addrsize = conn->cb_addrlen,
Chuck Leverae5c7942006-08-22 20:06:21 -0400486 .timeout = &timeparms,
Olga Kornievskaiaff7d9752008-03-28 16:04:56 -0400487 .program = &cb_program,
J. Bruce Fieldsb7299f42010-05-14 17:57:35 -0400488 .version = 0,
Olga Kornievskaia61054b12008-12-23 16:19:00 -0500489 .authflavor = clp->cl_flavor,
Olga Kornievskaiab6b61522008-06-09 16:51:31 -0400490 .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
Chuck Leverae5c7942006-08-22 20:06:21 -0400491 };
J. Bruce Fields63c86712007-10-25 19:00:26 -0400492 struct rpc_clnt *client;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
J. Bruce Fields5d18c1c2010-10-19 23:00:12 -0400494 if (clp->cl_minorversion == 0) {
495 if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
496 return -EINVAL;
497 args.client_name = clp->cl_principal;
498 args.prognumber = conn->cb_prog,
499 args.protocol = XPRT_TRANSPORT_TCP;
500 clp->cl_cb_ident = conn->cb_ident;
501 } else {
J. Bruce Fields07263f12010-05-31 19:09:40 -0400502 args.bc_xprt = conn->cb_xprt;
J. Bruce Fields8b5ce5c2010-10-19 17:31:50 -0400503 args.prognumber = clp->cl_cb_session->se_cb_prog;
Alexandros Batsakis3ddc8bf2009-09-10 12:27:21 +0300504 args.protocol = XPRT_TRANSPORT_BC_TCP;
505 }
Chuck Leverae5c7942006-08-22 20:06:21 -0400506 /* Create RPC client */
J. Bruce Fields63c86712007-10-25 19:00:26 -0400507 client = rpc_create(&args);
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800508 if (IS_ERR(client)) {
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800509 dprintk("NFSD: couldn't create callback client: %ld\n",
510 PTR_ERR(client));
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800511 return PTR_ERR(client);
512 }
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400513 clp->cl_cb_client = client;
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800514 return 0;
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800515
516}
517
J. Bruce Fieldsecdd03b2009-02-23 19:35:22 -0800518static void warn_no_callback_path(struct nfs4_client *clp, int reason)
519{
520 dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
521 (int)clp->cl_name.len, clp->cl_name.data, reason);
522}
523
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500524static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
525{
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400526 struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500527
528 if (task->tk_status)
529 warn_no_callback_path(clp, task->tk_status);
530 else
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500531 atomic_set(&clp->cl_cb_set, 1);
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500532}
533
534static const struct rpc_call_ops nfsd4_cb_probe_ops = {
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400535 /* XXX: release method to ensure we set the cb channel down if
536 * necessary on early failure? */
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500537 .rpc_call_done = nfsd4_cb_probe_done,
538};
539
J. Bruce Fields80fc0152009-09-15 18:07:35 -0400540static struct rpc_cred *callback_cred;
J. Bruce Fields3cef9ab2009-02-23 21:42:10 -0800541
J. Bruce Fields80fc0152009-09-15 18:07:35 -0400542int set_callback_cred(void)
543{
J. Bruce Fields8d75da8a2010-03-03 16:13:29 -0500544 if (callback_cred)
545 return 0;
J. Bruce Fields80fc0152009-09-15 18:07:35 -0400546 callback_cred = rpc_lookup_machine_cred();
547 if (!callback_cred)
548 return -ENOMEM;
549 return 0;
J. Bruce Fields3cef9ab2009-02-23 21:42:10 -0800550}
551
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400552static struct workqueue_struct *callback_wq;
J. Bruce Fields80fc0152009-09-15 18:07:35 -0400553
J. Bruce Fields5a3c9d72010-10-19 17:56:52 -0400554static void do_probe_callback(struct nfs4_client *clp)
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800555{
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400556 struct nfsd4_callback *cb = &clp->cl_cb_null;
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800557
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400558 cb->cb_op = NULL;
559 cb->cb_clp = clp;
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400560
561 cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL];
562 cb->cb_msg.rpc_argp = NULL;
563 cb->cb_msg.rpc_resp = NULL;
564 cb->cb_msg.rpc_cred = callback_cred;
565
566 cb->cb_ops = &nfsd4_cb_probe_ops;
567
568 queue_work(callback_wq, &cb->cb_work);
J. Bruce Fields63c86712007-10-25 19:00:26 -0400569}
570
571/*
J. Bruce Fields5a3c9d72010-10-19 17:56:52 -0400572 * Poke the callback thread to process any updates to the callback
573 * parameters, and send a null probe.
J. Bruce Fields63c86712007-10-25 19:00:26 -0400574 */
J. Bruce Fields5a3c9d72010-10-19 17:56:52 -0400575void nfsd4_probe_callback(struct nfs4_client *clp)
576{
577 set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
578 do_probe_callback(clp);
579}
580
581void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
J. Bruce Fields63c86712007-10-25 19:00:26 -0400582{
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500583 BUG_ON(atomic_read(&clp->cl_cb_set));
J. Bruce Fields63c86712007-10-25 19:00:26 -0400584
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400585 spin_lock(&clp->cl_lock);
586 memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400587 spin_unlock(&clp->cl_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588}
589
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300590/*
591 * There's currently a single callback channel slot.
592 * If the slot is available, then mark it busy. Otherwise, set the
593 * thread for sleeping on the callback RPC wait queue.
594 */
595static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
596 struct rpc_task *task)
597{
J. Bruce Fields90c81452010-06-14 17:49:37 -0400598 u32 *ptr = (u32 *)clp->cl_cb_session->se_sessionid.data;
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300599 int status = 0;
600
601 dprintk("%s: %u:%u:%u:%u\n", __func__,
602 ptr[0], ptr[1], ptr[2], ptr[3]);
603
604 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
605 rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
606 dprintk("%s slot is busy\n", __func__);
607 status = -EAGAIN;
608 goto out;
609 }
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300610out:
611 dprintk("%s status=%d\n", __func__, status);
612 return status;
613}
614
615/*
616 * TODO: cb_sequence should support referring call lists, cachethis, multiple
617 * slots, and mark callback channel down on communication errors.
618 */
619static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
620{
J. Bruce Fields58784532010-05-16 16:47:08 -0400621 struct nfsd4_callback *cb = calldata;
622 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300623 struct nfs4_client *clp = dp->dl_client;
J. Bruce Fields8323c3b2010-10-19 19:36:51 -0400624 u32 minorversion = clp->cl_minorversion;
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300625 int status = 0;
626
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400627 cb->cb_minorversion = minorversion;
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300628 if (minorversion) {
629 status = nfsd41_cb_setup_sequence(clp, task);
630 if (status) {
631 if (status != -EAGAIN) {
632 /* terminate rpc task */
633 task->tk_status = status;
634 task->tk_action = NULL;
635 }
636 return;
637 }
638 }
639 rpc_call_start(task);
640}
641
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300642static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
643{
J. Bruce Fields58784532010-05-16 16:47:08 -0400644 struct nfsd4_callback *cb = calldata;
645 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300646 struct nfs4_client *clp = dp->dl_client;
647
648 dprintk("%s: minorversion=%d\n", __func__,
J. Bruce Fields8323c3b2010-10-19 19:36:51 -0400649 clp->cl_minorversion);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300650
J. Bruce Fields8323c3b2010-10-19 19:36:51 -0400651 if (clp->cl_minorversion) {
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300652 /* No need for lock, access serialized in nfsd4_cb_prepare */
J. Bruce Fieldsac7c46f2010-06-14 19:01:57 -0400653 ++clp->cl_cb_session->se_cb_seq_nr;
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300654 clear_bit(0, &clp->cl_cb_slot_busy);
655 rpc_wake_up_next(&clp->cl_cb_waitq);
656 dprintk("%s: freed slot, new seqid=%d\n", __func__,
J. Bruce Fieldsac7c46f2010-06-14 19:01:57 -0400657 clp->cl_cb_session->se_cb_seq_nr);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300658
659 /* We're done looking into the sequence information */
660 task->tk_msg.rpc_resp = NULL;
661 }
662}
663
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500664
J. Bruce Fields63e48632009-05-01 22:36:55 -0400665static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
666{
J. Bruce Fields58784532010-05-16 16:47:08 -0400667 struct nfsd4_callback *cb = calldata;
668 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400669 struct nfs4_client *clp = dp->dl_client;
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500670 struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
J. Bruce Fields63e48632009-05-01 22:36:55 -0400671
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300672 nfsd4_cb_done(task, calldata);
673
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500674 if (current_rpc_client == NULL) {
675 /* We're shutting down; give up. */
676 /* XXX: err, or is it ok just to fall through
677 * and rpc_restart_call? */
678 return;
679 }
680
J. Bruce Fields63e48632009-05-01 22:36:55 -0400681 switch (task->tk_status) {
J. Bruce Fields172c85d2010-05-30 11:53:12 -0400682 case 0:
683 return;
684 case -EBADHANDLE:
685 case -NFS4ERR_BAD_STATEID:
686 /* Race: client probably got cb_recall
687 * before open reply granting delegation */
688 break;
689 default:
J. Bruce Fields63e48632009-05-01 22:36:55 -0400690 /* Network partition? */
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500691 atomic_set(&clp->cl_cb_set, 0);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400692 warn_no_callback_path(clp, task->tk_status);
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500693 if (current_rpc_client != task->tk_client) {
694 /* queue a callback on the new connection: */
J. Bruce Fieldscba9ba42010-06-01 11:21:40 -0400695 atomic_inc(&dp->dl_count);
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500696 nfsd4_cb_recall(dp);
697 return;
698 }
J. Bruce Fields63e48632009-05-01 22:36:55 -0400699 }
700 if (dp->dl_retries--) {
701 rpc_delay(task, 2*HZ);
702 task->tk_status = 0;
Boaz Harroshc18c8212010-06-29 14:33:55 +0300703 rpc_restart_call_prepare(task);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300704 return;
J. Bruce Fields63e48632009-05-01 22:36:55 -0400705 } else {
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500706 atomic_set(&clp->cl_cb_set, 0);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400707 warn_no_callback_path(clp, task->tk_status);
708 }
709}
710
711static void nfsd4_cb_recall_release(void *calldata)
712{
J. Bruce Fields58784532010-05-16 16:47:08 -0400713 struct nfsd4_callback *cb = calldata;
714 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400715
716 nfs4_put_delegation(dp);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400717}
718
719static const struct rpc_call_ops nfsd4_cb_recall_ops = {
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300720 .rpc_call_prepare = nfsd4_cb_prepare,
J. Bruce Fields63e48632009-05-01 22:36:55 -0400721 .rpc_call_done = nfsd4_cb_recall_done,
722 .rpc_release = nfsd4_cb_recall_release,
723};
724
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500725int nfsd4_create_callback_queue(void)
726{
727 callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
728 if (!callback_wq)
729 return -ENOMEM;
730 return 0;
731}
732
733void nfsd4_destroy_callback_queue(void)
734{
735 destroy_workqueue(callback_wq);
736}
737
Benny Halevyab707e152010-05-12 00:14:06 +0300738/* must be called under the state lock */
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400739void nfsd4_shutdown_callback(struct nfs4_client *clp)
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500740{
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400741 set_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500742 /*
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400743 * Note this won't actually result in a null callback;
744 * instead, nfsd4_do_callback_rpc() will detect the killed
745 * client, destroy the rpc client, and stop:
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500746 */
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400747 do_probe_callback(clp);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500748 flush_workqueue(callback_wq);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500749}
750
J. Bruce Fields58784532010-05-16 16:47:08 -0400751void nfsd4_release_cb(struct nfsd4_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752{
J. Bruce Fields58784532010-05-16 16:47:08 -0400753 if (cb->cb_ops->rpc_release)
754 cb->cb_ops->rpc_release(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755}
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500756
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400757void nfsd4_process_cb_update(struct nfsd4_callback *cb)
758{
759 struct nfs4_cb_conn conn;
760 struct nfs4_client *clp = cb->cb_clp;
761 int err;
762
763 /*
764 * This is either an update, or the client dying; in either case,
765 * kill the old client:
766 */
767 if (clp->cl_cb_client) {
768 rpc_shutdown_client(clp->cl_cb_client);
769 clp->cl_cb_client = NULL;
770 }
771 if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags))
772 return;
773 spin_lock(&clp->cl_lock);
774 /*
775 * Only serialized callback code is allowed to clear these
776 * flags; main nfsd code can only set them:
777 */
778 BUG_ON(!clp->cl_cb_flags);
779 clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
780 memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
781 spin_unlock(&clp->cl_lock);
782
783 err = setup_callback_client(clp, &conn);
784 if (err)
785 warn_no_callback_path(clp, err);
786}
787
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500788void nfsd4_do_callback_rpc(struct work_struct *w)
789{
J. Bruce Fields58784532010-05-16 16:47:08 -0400790 struct nfsd4_callback *cb = container_of(w, struct nfsd4_callback, cb_work);
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400791 struct nfs4_client *clp = cb->cb_clp;
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400792 struct rpc_clnt *clnt;
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500793
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400794 if (clp->cl_cb_flags)
795 nfsd4_process_cb_update(cb);
796
797 clnt = clp->cl_cb_client;
798 if (!clnt) {
799 /* Callback channel broken, or client killed; give up: */
J. Bruce Fields58784532010-05-16 16:47:08 -0400800 nfsd4_release_cb(cb);
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400801 return;
J. Bruce Fields58784532010-05-16 16:47:08 -0400802 }
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400803 rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
804 cb->cb_ops, cb);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500805}
806
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500807void nfsd4_cb_recall(struct nfs4_delegation *dp)
808{
J. Bruce Fields58784532010-05-16 16:47:08 -0400809 struct nfsd4_callback *cb = &dp->dl_recall;
810
811 dp->dl_retries = 1;
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400812 cb->cb_op = dp;
813 cb->cb_clp = dp->dl_client;
J. Bruce Fields58784532010-05-16 16:47:08 -0400814 cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL];
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400815 cb->cb_msg.rpc_argp = cb;
816 cb->cb_msg.rpc_resp = cb;
J. Bruce Fields58784532010-05-16 16:47:08 -0400817 cb->cb_msg.rpc_cred = callback_cred;
818
819 cb->cb_ops = &nfsd4_cb_recall_ops;
820 dp->dl_retries = 1;
821
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500822 queue_work(callback_wq, &dp->dl_recall.cb_work);
823}