blob: 60137c54b2f7843d6f7300bf8ea482cef1c6878e [file] [log] [blame]
Christoph Hellwig9cf514c2014-05-05 13:11:59 +02001/*
2 * Copyright (c) 2014 Christoph Hellwig.
3 */
Christoph Hellwigc5c707f2014-09-23 12:38:48 +02004#include <linux/kmod.h>
5#include <linux/file.h>
Christoph Hellwig9cf514c2014-05-05 13:11:59 +02006#include <linux/jhash.h>
7#include <linux/sched.h>
Christoph Hellwigc5c707f2014-09-23 12:38:48 +02008#include <linux/sunrpc/addr.h>
Christoph Hellwig9cf514c2014-05-05 13:11:59 +02009
10#include "pnfs.h"
11#include "netns.h"
Christoph Hellwig31ef83d2014-08-16 19:02:22 -050012#include "trace.h"
Christoph Hellwig9cf514c2014-05-05 13:11:59 +020013
14#define NFSDDBG_FACILITY NFSDDBG_PNFS
15
16struct nfs4_layout {
17 struct list_head lo_perstate;
18 struct nfs4_layout_stateid *lo_state;
19 struct nfsd4_layout_seg lo_seg;
20};
21
22static struct kmem_cache *nfs4_layout_cache;
23static struct kmem_cache *nfs4_layout_stateid_cache;
24
Christoph Hellwigc5c707f2014-09-23 12:38:48 +020025static struct nfsd4_callback_ops nfsd4_cb_layout_ops;
26static const struct lock_manager_operations nfsd4_layouts_lm_ops;
27
Christoph Hellwig9cf514c2014-05-05 13:11:59 +020028const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = {
29};
30
31/* pNFS device ID to export fsid mapping */
32#define DEVID_HASH_BITS 8
33#define DEVID_HASH_SIZE (1 << DEVID_HASH_BITS)
34#define DEVID_HASH_MASK (DEVID_HASH_SIZE - 1)
35static u64 nfsd_devid_seq = 1;
36static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
37static DEFINE_SPINLOCK(nfsd_devid_lock);
38
39static inline u32 devid_hashfn(u64 idx)
40{
41 return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
42}
43
44static void
45nfsd4_alloc_devid_map(const struct svc_fh *fhp)
46{
47 const struct knfsd_fh *fh = &fhp->fh_handle;
48 size_t fsid_len = key_len(fh->fh_fsid_type);
49 struct nfsd4_deviceid_map *map, *old;
50 int i;
51
52 map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
53 if (!map)
54 return;
55
56 map->fsid_type = fh->fh_fsid_type;
57 memcpy(&map->fsid, fh->fh_fsid, fsid_len);
58
59 spin_lock(&nfsd_devid_lock);
60 if (fhp->fh_export->ex_devid_map)
61 goto out_unlock;
62
63 for (i = 0; i < DEVID_HASH_SIZE; i++) {
64 list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
65 if (old->fsid_type != fh->fh_fsid_type)
66 continue;
67 if (memcmp(old->fsid, fh->fh_fsid,
68 key_len(old->fsid_type)))
69 continue;
70
71 fhp->fh_export->ex_devid_map = old;
72 goto out_unlock;
73 }
74 }
75
76 map->idx = nfsd_devid_seq++;
77 list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
78 fhp->fh_export->ex_devid_map = map;
79 map = NULL;
80
81out_unlock:
82 spin_unlock(&nfsd_devid_lock);
83 kfree(map);
84}
85
86struct nfsd4_deviceid_map *
87nfsd4_find_devid_map(int idx)
88{
89 struct nfsd4_deviceid_map *map, *ret = NULL;
90
91 rcu_read_lock();
92 list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
93 if (map->idx == idx)
94 ret = map;
95 rcu_read_unlock();
96
97 return ret;
98}
99
100int
101nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
102 u32 device_generation)
103{
104 if (!fhp->fh_export->ex_devid_map) {
105 nfsd4_alloc_devid_map(fhp);
106 if (!fhp->fh_export->ex_devid_map)
107 return -ENOMEM;
108 }
109
110 id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
111 id->generation = device_generation;
112 id->pad = 0;
113 return 0;
114}
115
116void nfsd4_setup_layout_type(struct svc_export *exp)
117{
118 if (exp->ex_flags & NFSEXP_NOPNFS)
119 return;
120}
121
122static void
123nfsd4_free_layout_stateid(struct nfs4_stid *stid)
124{
125 struct nfs4_layout_stateid *ls = layoutstateid(stid);
126 struct nfs4_client *clp = ls->ls_stid.sc_client;
127 struct nfs4_file *fp = ls->ls_stid.sc_file;
128
Christoph Hellwig31ef83d2014-08-16 19:02:22 -0500129 trace_layoutstate_free(&ls->ls_stid.sc_stateid);
130
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200131 spin_lock(&clp->cl_lock);
132 list_del_init(&ls->ls_perclnt);
133 spin_unlock(&clp->cl_lock);
134
135 spin_lock(&fp->fi_lock);
136 list_del_init(&ls->ls_perfile);
137 spin_unlock(&fp->fi_lock);
138
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200139 vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls);
140 fput(ls->ls_file);
141
142 if (ls->ls_recalled)
143 atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
144
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200145 kmem_cache_free(nfs4_layout_stateid_cache, ls);
146}
147
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200148static int
149nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
150{
151 struct file_lock *fl;
152 int status;
153
154 fl = locks_alloc_lock();
155 if (!fl)
156 return -ENOMEM;
157 locks_init_lock(fl);
158 fl->fl_lmops = &nfsd4_layouts_lm_ops;
159 fl->fl_flags = FL_LAYOUT;
160 fl->fl_type = F_RDLCK;
161 fl->fl_end = OFFSET_MAX;
162 fl->fl_owner = ls;
163 fl->fl_pid = current->tgid;
164 fl->fl_file = ls->ls_file;
165
166 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL);
167 if (status) {
168 locks_free_lock(fl);
169 return status;
170 }
171 BUG_ON(fl != NULL);
172 return 0;
173}
174
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200175static struct nfs4_layout_stateid *
176nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
177 struct nfs4_stid *parent, u32 layout_type)
178{
179 struct nfs4_client *clp = cstate->clp;
180 struct nfs4_file *fp = parent->sc_file;
181 struct nfs4_layout_stateid *ls;
182 struct nfs4_stid *stp;
183
184 stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
185 if (!stp)
186 return NULL;
187 stp->sc_free = nfsd4_free_layout_stateid;
188 get_nfs4_file(fp);
189 stp->sc_file = fp;
190
191 ls = layoutstateid(stp);
192 INIT_LIST_HEAD(&ls->ls_perclnt);
193 INIT_LIST_HEAD(&ls->ls_perfile);
194 spin_lock_init(&ls->ls_lock);
195 INIT_LIST_HEAD(&ls->ls_layouts);
196 ls->ls_layout_type = layout_type;
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200197 nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
198 NFSPROC4_CLNT_CB_LAYOUT);
199
200 if (parent->sc_type == NFS4_DELEG_STID)
201 ls->ls_file = get_file(fp->fi_deleg_file);
202 else
203 ls->ls_file = find_any_file(fp);
204 BUG_ON(!ls->ls_file);
205
206 if (nfsd4_layout_setlease(ls)) {
207 put_nfs4_file(fp);
208 kmem_cache_free(nfs4_layout_stateid_cache, ls);
209 return NULL;
210 }
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200211
212 spin_lock(&clp->cl_lock);
213 stp->sc_type = NFS4_LAYOUT_STID;
214 list_add(&ls->ls_perclnt, &clp->cl_lo_states);
215 spin_unlock(&clp->cl_lock);
216
217 spin_lock(&fp->fi_lock);
218 list_add(&ls->ls_perfile, &fp->fi_lo_states);
219 spin_unlock(&fp->fi_lock);
220
Christoph Hellwig31ef83d2014-08-16 19:02:22 -0500221 trace_layoutstate_alloc(&ls->ls_stid.sc_stateid);
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200222 return ls;
223}
224
225__be32
226nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
227 struct nfsd4_compound_state *cstate, stateid_t *stateid,
228 bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
229{
230 struct nfs4_layout_stateid *ls;
231 struct nfs4_stid *stid;
232 unsigned char typemask = NFS4_LAYOUT_STID;
233 __be32 status;
234
235 if (create)
236 typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID);
237
238 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid,
239 net_generic(SVC_NET(rqstp), nfsd_net_id));
240 if (status)
241 goto out;
242
243 if (!fh_match(&cstate->current_fh.fh_handle,
244 &stid->sc_file->fi_fhandle)) {
245 status = nfserr_bad_stateid;
246 goto out_put_stid;
247 }
248
249 if (stid->sc_type != NFS4_LAYOUT_STID) {
250 ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
251 nfs4_put_stid(stid);
252
253 status = nfserr_jukebox;
254 if (!ls)
255 goto out;
256 } else {
257 ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
258
259 status = nfserr_bad_stateid;
260 if (stateid->si_generation > stid->sc_stateid.si_generation)
261 goto out_put_stid;
262 if (layout_type != ls->ls_layout_type)
263 goto out_put_stid;
264 }
265
266 *lsp = ls;
267 return 0;
268
269out_put_stid:
270 nfs4_put_stid(stid);
271out:
272 return status;
273}
274
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200275static void
276nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
277{
278 spin_lock(&ls->ls_lock);
279 if (ls->ls_recalled)
280 goto out_unlock;
281
282 ls->ls_recalled = true;
283 atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
284 if (list_empty(&ls->ls_layouts))
285 goto out_unlock;
286
Christoph Hellwig31ef83d2014-08-16 19:02:22 -0500287 trace_layout_recall(&ls->ls_stid.sc_stateid);
288
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200289 atomic_inc(&ls->ls_stid.sc_count);
290 update_stateid(&ls->ls_stid.sc_stateid);
291 memcpy(&ls->ls_recall_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t));
292 nfsd4_run_cb(&ls->ls_recall);
293
294out_unlock:
295 spin_unlock(&ls->ls_lock);
296}
297
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200298static inline u64
299layout_end(struct nfsd4_layout_seg *seg)
300{
301 u64 end = seg->offset + seg->length;
302 return end >= seg->offset ? end : NFS4_MAX_UINT64;
303}
304
305static void
306layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
307{
308 if (end == NFS4_MAX_UINT64)
309 lo->length = NFS4_MAX_UINT64;
310 else
311 lo->length = end - lo->offset;
312}
313
314static bool
315layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
316{
317 if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
318 return false;
319 if (layout_end(&lo->lo_seg) <= s->offset)
320 return false;
321 if (layout_end(s) <= lo->lo_seg.offset)
322 return false;
323 return true;
324}
325
326static bool
327layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
328{
329 if (lo->iomode != new->iomode)
330 return false;
331 if (layout_end(new) < lo->offset)
332 return false;
333 if (layout_end(lo) < new->offset)
334 return false;
335
336 lo->offset = min(lo->offset, new->offset);
337 layout_update_len(lo, max(layout_end(lo), layout_end(new)));
338 return true;
339}
340
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200341static __be32
342nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
343{
344 struct nfs4_file *fp = ls->ls_stid.sc_file;
345 struct nfs4_layout_stateid *l, *n;
346 __be32 nfserr = nfs_ok;
347
348 assert_spin_locked(&fp->fi_lock);
349
350 list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
351 if (l != ls) {
352 nfsd4_recall_file_layout(l);
353 nfserr = nfserr_recallconflict;
354 }
355 }
356
357 return nfserr;
358}
359
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200360__be32
361nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
362{
363 struct nfsd4_layout_seg *seg = &lgp->lg_seg;
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200364 struct nfs4_file *fp = ls->ls_stid.sc_file;
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200365 struct nfs4_layout *lp, *new = NULL;
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200366 __be32 nfserr;
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200367
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200368 spin_lock(&fp->fi_lock);
369 nfserr = nfsd4_recall_conflict(ls);
370 if (nfserr)
371 goto out;
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200372 spin_lock(&ls->ls_lock);
373 list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
374 if (layouts_try_merge(&lp->lo_seg, seg))
375 goto done;
376 }
377 spin_unlock(&ls->ls_lock);
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200378 spin_unlock(&fp->fi_lock);
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200379
380 new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
381 if (!new)
382 return nfserr_jukebox;
383 memcpy(&new->lo_seg, seg, sizeof(lp->lo_seg));
384 new->lo_state = ls;
385
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200386 spin_lock(&fp->fi_lock);
387 nfserr = nfsd4_recall_conflict(ls);
388 if (nfserr)
389 goto out;
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200390 spin_lock(&ls->ls_lock);
391 list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
392 if (layouts_try_merge(&lp->lo_seg, seg))
393 goto done;
394 }
395
396 atomic_inc(&ls->ls_stid.sc_count);
397 list_add_tail(&new->lo_perstate, &ls->ls_layouts);
398 new = NULL;
399done:
400 update_stateid(&ls->ls_stid.sc_stateid);
401 memcpy(&lgp->lg_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t));
402 spin_unlock(&ls->ls_lock);
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200403out:
404 spin_unlock(&fp->fi_lock);
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200405 if (new)
406 kmem_cache_free(nfs4_layout_cache, new);
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200407 return nfserr;
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200408}
409
410static void
411nfsd4_free_layouts(struct list_head *reaplist)
412{
413 while (!list_empty(reaplist)) {
414 struct nfs4_layout *lp = list_first_entry(reaplist,
415 struct nfs4_layout, lo_perstate);
416
417 list_del(&lp->lo_perstate);
418 nfs4_put_stid(&lp->lo_state->ls_stid);
419 kmem_cache_free(nfs4_layout_cache, lp);
420 }
421}
422
423static void
424nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
425 struct list_head *reaplist)
426{
427 struct nfsd4_layout_seg *lo = &lp->lo_seg;
428 u64 end = layout_end(lo);
429
430 if (seg->offset <= lo->offset) {
431 if (layout_end(seg) >= end) {
432 list_move_tail(&lp->lo_perstate, reaplist);
433 return;
434 }
435 end = seg->offset;
436 } else {
437 /* retain the whole layout segment on a split. */
438 if (layout_end(seg) < end) {
439 dprintk("%s: split not supported\n", __func__);
440 return;
441 }
442
443 lo->offset = layout_end(seg);
444 }
445
446 layout_update_len(lo, end);
447}
448
449__be32
450nfsd4_return_file_layouts(struct svc_rqst *rqstp,
451 struct nfsd4_compound_state *cstate,
452 struct nfsd4_layoutreturn *lrp)
453{
454 struct nfs4_layout_stateid *ls;
455 struct nfs4_layout *lp, *n;
456 LIST_HEAD(reaplist);
457 __be32 nfserr;
458 int found = 0;
459
460 nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
461 false, lrp->lr_layout_type,
462 &ls);
Christoph Hellwig31ef83d2014-08-16 19:02:22 -0500463 if (nfserr) {
464 trace_layout_return_lookup_fail(&lrp->lr_sid);
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200465 return nfserr;
Christoph Hellwig31ef83d2014-08-16 19:02:22 -0500466 }
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200467
468 spin_lock(&ls->ls_lock);
469 list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
470 if (layouts_overlapping(lp, &lrp->lr_seg)) {
471 nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
472 found++;
473 }
474 }
475 if (!list_empty(&ls->ls_layouts)) {
476 if (found) {
477 update_stateid(&ls->ls_stid.sc_stateid);
478 memcpy(&lrp->lr_sid, &ls->ls_stid.sc_stateid,
479 sizeof(stateid_t));
480 }
481 lrp->lrs_present = 1;
482 } else {
Christoph Hellwig31ef83d2014-08-16 19:02:22 -0500483 trace_layoutstate_unhash(&ls->ls_stid.sc_stateid);
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200484 nfs4_unhash_stid(&ls->ls_stid);
485 lrp->lrs_present = 0;
486 }
487 spin_unlock(&ls->ls_lock);
488
489 nfs4_put_stid(&ls->ls_stid);
490 nfsd4_free_layouts(&reaplist);
491 return nfs_ok;
492}
493
494__be32
495nfsd4_return_client_layouts(struct svc_rqst *rqstp,
496 struct nfsd4_compound_state *cstate,
497 struct nfsd4_layoutreturn *lrp)
498{
499 struct nfs4_layout_stateid *ls, *n;
500 struct nfs4_client *clp = cstate->clp;
501 struct nfs4_layout *lp, *t;
502 LIST_HEAD(reaplist);
503
504 lrp->lrs_present = 0;
505
506 spin_lock(&clp->cl_lock);
507 list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
508 if (lrp->lr_return_type == RETURN_FSID &&
509 !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
510 &cstate->current_fh.fh_handle))
511 continue;
512
513 spin_lock(&ls->ls_lock);
514 list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
515 if (lrp->lr_seg.iomode == IOMODE_ANY ||
516 lrp->lr_seg.iomode == lp->lo_seg.iomode)
517 list_move_tail(&lp->lo_perstate, &reaplist);
518 }
519 spin_unlock(&ls->ls_lock);
520 }
521 spin_unlock(&clp->cl_lock);
522
523 nfsd4_free_layouts(&reaplist);
524 return 0;
525}
526
527static void
528nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
529 struct list_head *reaplist)
530{
531 spin_lock(&ls->ls_lock);
532 list_splice_init(&ls->ls_layouts, reaplist);
533 spin_unlock(&ls->ls_lock);
534}
535
536void
537nfsd4_return_all_client_layouts(struct nfs4_client *clp)
538{
539 struct nfs4_layout_stateid *ls, *n;
540 LIST_HEAD(reaplist);
541
542 spin_lock(&clp->cl_lock);
543 list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
544 nfsd4_return_all_layouts(ls, &reaplist);
545 spin_unlock(&clp->cl_lock);
546
547 nfsd4_free_layouts(&reaplist);
548}
549
550void
551nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
552{
553 struct nfs4_layout_stateid *ls, *n;
554 LIST_HEAD(reaplist);
555
556 spin_lock(&fp->fi_lock);
557 list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
558 if (ls->ls_stid.sc_client == clp)
559 nfsd4_return_all_layouts(ls, &reaplist);
560 }
561 spin_unlock(&fp->fi_lock);
562
563 nfsd4_free_layouts(&reaplist);
564}
565
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200566static void
567nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
568{
569 struct nfs4_client *clp = ls->ls_stid.sc_client;
570 char addr_str[INET6_ADDRSTRLEN];
571 static char *envp[] = {
572 "HOME=/",
573 "TERM=linux",
574 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
575 NULL
576 };
577 char *argv[8];
578 int error;
579
580 rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
581
Christoph Hellwig31ef83d2014-08-16 19:02:22 -0500582 nfsd4_cb_layout_fail(ls);
583
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200584 printk(KERN_WARNING
585 "nfsd: client %s failed to respond to layout recall. "
586 " Fencing..\n", addr_str);
587
588 argv[0] = "/sbin/nfsd-recall-failed";
589 argv[1] = addr_str;
590 argv[2] = ls->ls_file->f_path.mnt->mnt_sb->s_id;
591 argv[3] = NULL;
592
593 error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
594 if (error) {
595 printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n",
596 addr_str, error);
597 }
598}
599
600static int
601nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
602{
603 struct nfs4_layout_stateid *ls =
604 container_of(cb, struct nfs4_layout_stateid, ls_recall);
605 LIST_HEAD(reaplist);
606
607 switch (task->tk_status) {
608 case 0:
609 return 1;
610 case -NFS4ERR_NOMATCHING_LAYOUT:
Christoph Hellwig31ef83d2014-08-16 19:02:22 -0500611 trace_layout_recall_done(&ls->ls_stid.sc_stateid);
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200612 task->tk_status = 0;
613 return 1;
614 case -NFS4ERR_DELAY:
615 /* Poll the client until it's done with the layout */
616 /* FIXME: cap number of retries.
617 * The pnfs standard states that we need to only expire
618 * the client after at-least "lease time" .eg lease-time * 2
619 * when failing to communicate a recall
620 */
621 rpc_delay(task, HZ/100); /* 10 mili-seconds */
622 return 0;
623 default:
624 /*
625 * Unknown error or non-responding client, we'll need to fence.
626 */
627 nfsd4_cb_layout_fail(ls);
628 return -1;
629 }
630}
631
632static void
633nfsd4_cb_layout_release(struct nfsd4_callback *cb)
634{
635 struct nfs4_layout_stateid *ls =
636 container_of(cb, struct nfs4_layout_stateid, ls_recall);
637 LIST_HEAD(reaplist);
638
Christoph Hellwig31ef83d2014-08-16 19:02:22 -0500639 trace_layout_recall_release(&ls->ls_stid.sc_stateid);
640
Christoph Hellwigc5c707f2014-09-23 12:38:48 +0200641 nfsd4_return_all_layouts(ls, &reaplist);
642 nfsd4_free_layouts(&reaplist);
643 nfs4_put_stid(&ls->ls_stid);
644}
645
646static struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
647 .done = nfsd4_cb_layout_done,
648 .release = nfsd4_cb_layout_release,
649};
650
651static bool
652nfsd4_layout_lm_break(struct file_lock *fl)
653{
654 /*
655 * We don't want the locks code to timeout the lease for us;
656 * we'll remove it ourself if a layout isn't returned
657 * in time:
658 */
659 fl->fl_break_time = 0;
660 nfsd4_recall_file_layout(fl->fl_owner);
661 return false;
662}
663
664static int
665nfsd4_layout_lm_change(struct file_lock *onlist, int arg,
666 struct list_head *dispose)
667{
668 BUG_ON(!(arg & F_UNLCK));
669 return lease_modify(onlist, arg, dispose);
670}
671
672static const struct lock_manager_operations nfsd4_layouts_lm_ops = {
673 .lm_break = nfsd4_layout_lm_break,
674 .lm_change = nfsd4_layout_lm_change,
675};
676
Christoph Hellwig9cf514c2014-05-05 13:11:59 +0200677int
678nfsd4_init_pnfs(void)
679{
680 int i;
681
682 for (i = 0; i < DEVID_HASH_SIZE; i++)
683 INIT_LIST_HEAD(&nfsd_devid_hash[i]);
684
685 nfs4_layout_cache = kmem_cache_create("nfs4_layout",
686 sizeof(struct nfs4_layout), 0, 0, NULL);
687 if (!nfs4_layout_cache)
688 return -ENOMEM;
689
690 nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid",
691 sizeof(struct nfs4_layout_stateid), 0, 0, NULL);
692 if (!nfs4_layout_stateid_cache) {
693 kmem_cache_destroy(nfs4_layout_cache);
694 return -ENOMEM;
695 }
696 return 0;
697}
698
699void
700nfsd4_exit_pnfs(void)
701{
702 int i;
703
704 kmem_cache_destroy(nfs4_layout_cache);
705 kmem_cache_destroy(nfs4_layout_stateid_cache);
706
707 for (i = 0; i < DEVID_HASH_SIZE; i++) {
708 struct nfsd4_deviceid_map *map, *n;
709
710 list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
711 kfree(map);
712 }
713}