blob: 9540a316c05e6e103a64cffd3dad567a170fbddb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/completion.h>
Trond Myklebust58d9714a2006-01-03 09:55:24 +010010#include <linux/kthread.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/spinlock.h>
14
15#include <linux/nfs4.h>
16#include <linux/nfs_fs.h>
17#include <linux/nfs_xdr.h>
18
Trond Myklebust4ce79712005-06-22 17:16:21 +000019#include "nfs4_fs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "delegation.h"
21
22static struct nfs_delegation *nfs_alloc_delegation(void)
23{
24 return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
25}
26
27static void nfs_free_delegation(struct nfs_delegation *delegation)
28{
29 if (delegation->cred)
30 put_rpccred(delegation->cred);
31 kfree(delegation);
32}
33
Trond Myklebust888e6942005-11-04 15:38:11 -050034static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
35{
36 struct inode *inode = state->inode;
37 struct file_lock *fl;
38 int status;
39
40 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
41 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
42 continue;
43 if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
44 continue;
45 status = nfs4_lock_delegation_recall(state, fl);
46 if (status >= 0)
47 continue;
48 switch (status) {
49 default:
50 printk(KERN_ERR "%s: unhandled error %d.\n",
51 __FUNCTION__, status);
52 case -NFS4ERR_EXPIRED:
53 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
54 case -NFS4ERR_STALE_CLIENTID:
55 nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs4_state);
56 goto out_err;
57 }
58 }
59 return 0;
60out_err:
61 return status;
62}
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064static void nfs_delegation_claim_opens(struct inode *inode)
65{
66 struct nfs_inode *nfsi = NFS_I(inode);
67 struct nfs_open_context *ctx;
68 struct nfs4_state *state;
Trond Myklebust888e6942005-11-04 15:38:11 -050069 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
71again:
72 spin_lock(&inode->i_lock);
73 list_for_each_entry(ctx, &nfsi->open_files, list) {
74 state = ctx->state;
75 if (state == NULL)
76 continue;
77 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
78 continue;
79 get_nfs_open_context(ctx);
80 spin_unlock(&inode->i_lock);
Trond Myklebust888e6942005-11-04 15:38:11 -050081 err = nfs4_open_delegation_recall(ctx->dentry, state);
82 if (err >= 0)
83 err = nfs_delegation_claim_locks(ctx, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 put_nfs_open_context(ctx);
Trond Myklebust888e6942005-11-04 15:38:11 -050085 if (err != 0)
86 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 goto again;
88 }
89 spin_unlock(&inode->i_lock);
90}
91
92/*
93 * Set up a delegation on an inode
94 */
95void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
96{
97 struct nfs_delegation *delegation = NFS_I(inode)->delegation;
98
99 if (delegation == NULL)
100 return;
101 memcpy(delegation->stateid.data, res->delegation.data,
102 sizeof(delegation->stateid.data));
103 delegation->type = res->delegation_type;
104 delegation->maxsize = res->maxsize;
105 put_rpccred(cred);
106 delegation->cred = get_rpccred(cred);
107 delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
108 NFS_I(inode)->delegation_state = delegation->type;
109 smp_wmb();
110}
111
112/*
113 * Set up a delegation on an inode
114 */
115int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
116{
117 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
118 struct nfs_inode *nfsi = NFS_I(inode);
119 struct nfs_delegation *delegation;
120 int status = 0;
121
Trond Myklebustb3c52da2005-10-17 06:02:00 -0400122 /* Ensure we first revalidate the attributes and page cache! */
123 if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
124 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 delegation = nfs_alloc_delegation();
127 if (delegation == NULL)
128 return -ENOMEM;
129 memcpy(delegation->stateid.data, res->delegation.data,
130 sizeof(delegation->stateid.data));
131 delegation->type = res->delegation_type;
132 delegation->maxsize = res->maxsize;
Trond Myklebustbeb2a5e2006-01-03 09:55:37 +0100133 delegation->change_attr = nfsi->change_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 delegation->cred = get_rpccred(cred);
135 delegation->inode = inode;
136
137 spin_lock(&clp->cl_lock);
138 if (nfsi->delegation == NULL) {
139 list_add(&delegation->super_list, &clp->cl_delegations);
140 nfsi->delegation = delegation;
141 nfsi->delegation_state = delegation->type;
142 delegation = NULL;
143 } else {
144 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
145 sizeof(delegation->stateid)) != 0 ||
146 delegation->type != nfsi->delegation->type) {
147 printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
148 __FUNCTION__, NIPQUAD(clp->cl_addr));
149 status = -EIO;
150 }
151 }
152 spin_unlock(&clp->cl_lock);
Jesper Juhlf99d49a2005-11-07 01:01:34 -0800153 kfree(delegation);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 return status;
155}
156
157static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
158{
159 int res = 0;
160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
162 nfs_free_delegation(delegation);
163 return res;
164}
165
166/* Sync all data to disk upon delegation return */
167static void nfs_msync_inode(struct inode *inode)
168{
169 filemap_fdatawrite(inode->i_mapping);
170 nfs_wb_all(inode);
171 filemap_fdatawait(inode->i_mapping);
172}
173
174/*
175 * Basic procedure for returning a delegation to the server
176 */
Trond Myklebustcae7a072005-10-18 14:20:19 -0700177int __nfs_inode_return_delegation(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
179 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
180 struct nfs_inode *nfsi = NFS_I(inode);
181 struct nfs_delegation *delegation;
182 int res = 0;
183
184 nfs_msync_inode(inode);
185 down_read(&clp->cl_sem);
186 /* Guard against new delegated open calls */
187 down_write(&nfsi->rwsem);
188 spin_lock(&clp->cl_lock);
189 delegation = nfsi->delegation;
190 if (delegation != NULL) {
191 list_del_init(&delegation->super_list);
192 nfsi->delegation = NULL;
193 nfsi->delegation_state = 0;
194 }
195 spin_unlock(&clp->cl_lock);
196 nfs_delegation_claim_opens(inode);
197 up_write(&nfsi->rwsem);
198 up_read(&clp->cl_sem);
199 nfs_msync_inode(inode);
200
201 if (delegation != NULL)
202 res = nfs_do_return_delegation(inode, delegation);
203 return res;
204}
205
206/*
207 * Return all delegations associated to a super block
208 */
209void nfs_return_all_delegations(struct super_block *sb)
210{
211 struct nfs4_client *clp = NFS_SB(sb)->nfs4_state;
212 struct nfs_delegation *delegation;
213 struct inode *inode;
214
215 if (clp == NULL)
216 return;
217restart:
218 spin_lock(&clp->cl_lock);
219 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
220 if (delegation->inode->i_sb != sb)
221 continue;
222 inode = igrab(delegation->inode);
223 if (inode == NULL)
224 continue;
225 spin_unlock(&clp->cl_lock);
226 nfs_inode_return_delegation(inode);
227 iput(inode);
228 goto restart;
229 }
230 spin_unlock(&clp->cl_lock);
231}
232
Trond Myklebust58d9714a2006-01-03 09:55:24 +0100233int nfs_do_expire_all_delegations(void *ptr)
234{
235 struct nfs4_client *clp = ptr;
236 struct nfs_delegation *delegation;
237 struct inode *inode;
Trond Myklebust58d9714a2006-01-03 09:55:24 +0100238
239 allow_signal(SIGKILL);
240restart:
241 spin_lock(&clp->cl_lock);
242 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
243 goto out;
244 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
245 goto out;
246 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
247 inode = igrab(delegation->inode);
248 if (inode == NULL)
249 continue;
250 spin_unlock(&clp->cl_lock);
Trond Myklebust26c78e12006-01-03 09:55:58 +0100251 nfs_inode_return_delegation(inode);
Trond Myklebust58d9714a2006-01-03 09:55:24 +0100252 iput(inode);
Trond Myklebust26c78e12006-01-03 09:55:58 +0100253 goto restart;
Trond Myklebust58d9714a2006-01-03 09:55:24 +0100254 }
255out:
256 spin_unlock(&clp->cl_lock);
257 nfs4_put_client(clp);
258 module_put_and_exit(0);
259}
260
261void nfs_expire_all_delegations(struct nfs4_client *clp)
262{
263 struct task_struct *task;
264
265 __module_get(THIS_MODULE);
266 atomic_inc(&clp->cl_count);
267 task = kthread_run(nfs_do_expire_all_delegations, clp,
268 "%u.%u.%u.%u-delegreturn",
269 NIPQUAD(clp->cl_addr));
270 if (!IS_ERR(task))
271 return;
272 nfs4_put_client(clp);
273 module_put(THIS_MODULE);
274}
275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276/*
277 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
278 */
279void nfs_handle_cb_pathdown(struct nfs4_client *clp)
280{
281 struct nfs_delegation *delegation;
282 struct inode *inode;
283
284 if (clp == NULL)
285 return;
286restart:
287 spin_lock(&clp->cl_lock);
288 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
289 inode = igrab(delegation->inode);
290 if (inode == NULL)
291 continue;
292 spin_unlock(&clp->cl_lock);
293 nfs_inode_return_delegation(inode);
294 iput(inode);
295 goto restart;
296 }
297 spin_unlock(&clp->cl_lock);
298}
299
300struct recall_threadargs {
301 struct inode *inode;
302 struct nfs4_client *clp;
303 const nfs4_stateid *stateid;
304
305 struct completion started;
306 int result;
307};
308
309static int recall_thread(void *data)
310{
311 struct recall_threadargs *args = (struct recall_threadargs *)data;
312 struct inode *inode = igrab(args->inode);
313 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
314 struct nfs_inode *nfsi = NFS_I(inode);
315 struct nfs_delegation *delegation;
316
317 daemonize("nfsv4-delegreturn");
318
319 nfs_msync_inode(inode);
320 down_read(&clp->cl_sem);
321 down_write(&nfsi->rwsem);
322 spin_lock(&clp->cl_lock);
323 delegation = nfsi->delegation;
324 if (delegation != NULL && memcmp(delegation->stateid.data,
325 args->stateid->data,
326 sizeof(delegation->stateid.data)) == 0) {
327 list_del_init(&delegation->super_list);
328 nfsi->delegation = NULL;
329 nfsi->delegation_state = 0;
330 args->result = 0;
331 } else {
332 delegation = NULL;
333 args->result = -ENOENT;
334 }
335 spin_unlock(&clp->cl_lock);
336 complete(&args->started);
337 nfs_delegation_claim_opens(inode);
338 up_write(&nfsi->rwsem);
339 up_read(&clp->cl_sem);
340 nfs_msync_inode(inode);
341
342 if (delegation != NULL)
343 nfs_do_return_delegation(inode, delegation);
344 iput(inode);
345 module_put_and_exit(0);
346}
347
348/*
349 * Asynchronous delegation recall!
350 */
351int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
352{
353 struct recall_threadargs data = {
354 .inode = inode,
355 .stateid = stateid,
356 };
357 int status;
358
359 init_completion(&data.started);
360 __module_get(THIS_MODULE);
361 status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
362 if (status < 0)
363 goto out_module_put;
364 wait_for_completion(&data.started);
365 return data.result;
366out_module_put:
367 module_put(THIS_MODULE);
368 return status;
369}
370
371/*
372 * Retrieve the inode associated with a delegation
373 */
374struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle)
375{
376 struct nfs_delegation *delegation;
377 struct inode *res = NULL;
378 spin_lock(&clp->cl_lock);
379 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
380 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
381 res = igrab(delegation->inode);
382 break;
383 }
384 }
385 spin_unlock(&clp->cl_lock);
386 return res;
387}
388
389/*
390 * Mark all delegations as needing to be reclaimed
391 */
392void nfs_delegation_mark_reclaim(struct nfs4_client *clp)
393{
394 struct nfs_delegation *delegation;
395 spin_lock(&clp->cl_lock);
396 list_for_each_entry(delegation, &clp->cl_delegations, super_list)
397 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
398 spin_unlock(&clp->cl_lock);
399}
400
401/*
402 * Reap all unclaimed delegations after reboot recovery is done
403 */
404void nfs_delegation_reap_unclaimed(struct nfs4_client *clp)
405{
406 struct nfs_delegation *delegation, *n;
407 LIST_HEAD(head);
408 spin_lock(&clp->cl_lock);
409 list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
410 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
411 continue;
412 list_move(&delegation->super_list, &head);
413 NFS_I(delegation->inode)->delegation = NULL;
414 NFS_I(delegation->inode)->delegation_state = 0;
415 }
416 spin_unlock(&clp->cl_lock);
417 while(!list_empty(&head)) {
418 delegation = list_entry(head.next, struct nfs_delegation, super_list);
419 list_del(&delegation->super_list);
420 nfs_free_delegation(delegation);
421 }
422}
Trond Myklebust3e4f6292006-03-20 13:44:46 -0500423
424int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
425{
426 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
427 struct nfs_inode *nfsi = NFS_I(inode);
428 struct nfs_delegation *delegation;
429 int res = 0;
430
431 if (nfsi->delegation_state == 0)
432 return 0;
433 spin_lock(&clp->cl_lock);
434 delegation = nfsi->delegation;
435 if (delegation != NULL) {
436 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
437 res = 1;
438 }
439 spin_unlock(&clp->cl_lock);
440 return res;
441}