blob: d7f7eb669d03ebfbdc492ee0d229390267a2f33d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9#include <linux/config.h>
10#include <linux/completion.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/spinlock.h>
14
15#include <linux/nfs4.h>
16#include <linux/nfs_fs.h>
17#include <linux/nfs_xdr.h>
18
Trond Myklebust4ce79712005-06-22 17:16:21 +000019#include "nfs4_fs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "delegation.h"
21
22static struct nfs_delegation *nfs_alloc_delegation(void)
23{
24 return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
25}
26
27static void nfs_free_delegation(struct nfs_delegation *delegation)
28{
29 if (delegation->cred)
30 put_rpccred(delegation->cred);
31 kfree(delegation);
32}
33
34static void nfs_delegation_claim_opens(struct inode *inode)
35{
36 struct nfs_inode *nfsi = NFS_I(inode);
37 struct nfs_open_context *ctx;
38 struct nfs4_state *state;
39
40again:
41 spin_lock(&inode->i_lock);
42 list_for_each_entry(ctx, &nfsi->open_files, list) {
43 state = ctx->state;
44 if (state == NULL)
45 continue;
46 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
47 continue;
48 get_nfs_open_context(ctx);
49 spin_unlock(&inode->i_lock);
50 if (nfs4_open_delegation_recall(ctx->dentry, state) < 0)
51 return;
52 put_nfs_open_context(ctx);
53 goto again;
54 }
55 spin_unlock(&inode->i_lock);
56}
57
58/*
59 * Set up a delegation on an inode
60 */
61void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
62{
63 struct nfs_delegation *delegation = NFS_I(inode)->delegation;
64
65 if (delegation == NULL)
66 return;
67 memcpy(delegation->stateid.data, res->delegation.data,
68 sizeof(delegation->stateid.data));
69 delegation->type = res->delegation_type;
70 delegation->maxsize = res->maxsize;
71 put_rpccred(cred);
72 delegation->cred = get_rpccred(cred);
73 delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
74 NFS_I(inode)->delegation_state = delegation->type;
75 smp_wmb();
76}
77
78/*
79 * Set up a delegation on an inode
80 */
81int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
82{
83 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
84 struct nfs_inode *nfsi = NFS_I(inode);
85 struct nfs_delegation *delegation;
86 int status = 0;
87
88 delegation = nfs_alloc_delegation();
89 if (delegation == NULL)
90 return -ENOMEM;
91 memcpy(delegation->stateid.data, res->delegation.data,
92 sizeof(delegation->stateid.data));
93 delegation->type = res->delegation_type;
94 delegation->maxsize = res->maxsize;
95 delegation->cred = get_rpccred(cred);
96 delegation->inode = inode;
97
98 spin_lock(&clp->cl_lock);
99 if (nfsi->delegation == NULL) {
100 list_add(&delegation->super_list, &clp->cl_delegations);
101 nfsi->delegation = delegation;
102 nfsi->delegation_state = delegation->type;
103 delegation = NULL;
104 } else {
105 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
106 sizeof(delegation->stateid)) != 0 ||
107 delegation->type != nfsi->delegation->type) {
108 printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
109 __FUNCTION__, NIPQUAD(clp->cl_addr));
110 status = -EIO;
111 }
112 }
113 spin_unlock(&clp->cl_lock);
114 if (delegation != NULL)
115 kfree(delegation);
116 return status;
117}
118
119static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
120{
121 int res = 0;
122
123 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
124
125 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
126 nfs_free_delegation(delegation);
127 return res;
128}
129
130/* Sync all data to disk upon delegation return */
131static void nfs_msync_inode(struct inode *inode)
132{
133 filemap_fdatawrite(inode->i_mapping);
134 nfs_wb_all(inode);
135 filemap_fdatawait(inode->i_mapping);
136}
137
138/*
139 * Basic procedure for returning a delegation to the server
140 */
141int nfs_inode_return_delegation(struct inode *inode)
142{
143 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
144 struct nfs_inode *nfsi = NFS_I(inode);
145 struct nfs_delegation *delegation;
146 int res = 0;
147
148 nfs_msync_inode(inode);
149 down_read(&clp->cl_sem);
150 /* Guard against new delegated open calls */
151 down_write(&nfsi->rwsem);
152 spin_lock(&clp->cl_lock);
153 delegation = nfsi->delegation;
154 if (delegation != NULL) {
155 list_del_init(&delegation->super_list);
156 nfsi->delegation = NULL;
157 nfsi->delegation_state = 0;
158 }
159 spin_unlock(&clp->cl_lock);
160 nfs_delegation_claim_opens(inode);
161 up_write(&nfsi->rwsem);
162 up_read(&clp->cl_sem);
163 nfs_msync_inode(inode);
164
165 if (delegation != NULL)
166 res = nfs_do_return_delegation(inode, delegation);
167 return res;
168}
169
170/*
171 * Return all delegations associated to a super block
172 */
173void nfs_return_all_delegations(struct super_block *sb)
174{
175 struct nfs4_client *clp = NFS_SB(sb)->nfs4_state;
176 struct nfs_delegation *delegation;
177 struct inode *inode;
178
179 if (clp == NULL)
180 return;
181restart:
182 spin_lock(&clp->cl_lock);
183 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
184 if (delegation->inode->i_sb != sb)
185 continue;
186 inode = igrab(delegation->inode);
187 if (inode == NULL)
188 continue;
189 spin_unlock(&clp->cl_lock);
190 nfs_inode_return_delegation(inode);
191 iput(inode);
192 goto restart;
193 }
194 spin_unlock(&clp->cl_lock);
195}
196
197/*
198 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
199 */
200void nfs_handle_cb_pathdown(struct nfs4_client *clp)
201{
202 struct nfs_delegation *delegation;
203 struct inode *inode;
204
205 if (clp == NULL)
206 return;
207restart:
208 spin_lock(&clp->cl_lock);
209 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
210 inode = igrab(delegation->inode);
211 if (inode == NULL)
212 continue;
213 spin_unlock(&clp->cl_lock);
214 nfs_inode_return_delegation(inode);
215 iput(inode);
216 goto restart;
217 }
218 spin_unlock(&clp->cl_lock);
219}
220
221struct recall_threadargs {
222 struct inode *inode;
223 struct nfs4_client *clp;
224 const nfs4_stateid *stateid;
225
226 struct completion started;
227 int result;
228};
229
230static int recall_thread(void *data)
231{
232 struct recall_threadargs *args = (struct recall_threadargs *)data;
233 struct inode *inode = igrab(args->inode);
234 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
235 struct nfs_inode *nfsi = NFS_I(inode);
236 struct nfs_delegation *delegation;
237
238 daemonize("nfsv4-delegreturn");
239
240 nfs_msync_inode(inode);
241 down_read(&clp->cl_sem);
242 down_write(&nfsi->rwsem);
243 spin_lock(&clp->cl_lock);
244 delegation = nfsi->delegation;
245 if (delegation != NULL && memcmp(delegation->stateid.data,
246 args->stateid->data,
247 sizeof(delegation->stateid.data)) == 0) {
248 list_del_init(&delegation->super_list);
249 nfsi->delegation = NULL;
250 nfsi->delegation_state = 0;
251 args->result = 0;
252 } else {
253 delegation = NULL;
254 args->result = -ENOENT;
255 }
256 spin_unlock(&clp->cl_lock);
257 complete(&args->started);
258 nfs_delegation_claim_opens(inode);
259 up_write(&nfsi->rwsem);
260 up_read(&clp->cl_sem);
261 nfs_msync_inode(inode);
262
263 if (delegation != NULL)
264 nfs_do_return_delegation(inode, delegation);
265 iput(inode);
266 module_put_and_exit(0);
267}
268
269/*
270 * Asynchronous delegation recall!
271 */
272int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
273{
274 struct recall_threadargs data = {
275 .inode = inode,
276 .stateid = stateid,
277 };
278 int status;
279
280 init_completion(&data.started);
281 __module_get(THIS_MODULE);
282 status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
283 if (status < 0)
284 goto out_module_put;
285 wait_for_completion(&data.started);
286 return data.result;
287out_module_put:
288 module_put(THIS_MODULE);
289 return status;
290}
291
292/*
293 * Retrieve the inode associated with a delegation
294 */
295struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle)
296{
297 struct nfs_delegation *delegation;
298 struct inode *res = NULL;
299 spin_lock(&clp->cl_lock);
300 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
301 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
302 res = igrab(delegation->inode);
303 break;
304 }
305 }
306 spin_unlock(&clp->cl_lock);
307 return res;
308}
309
310/*
311 * Mark all delegations as needing to be reclaimed
312 */
313void nfs_delegation_mark_reclaim(struct nfs4_client *clp)
314{
315 struct nfs_delegation *delegation;
316 spin_lock(&clp->cl_lock);
317 list_for_each_entry(delegation, &clp->cl_delegations, super_list)
318 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
319 spin_unlock(&clp->cl_lock);
320}
321
322/*
323 * Reap all unclaimed delegations after reboot recovery is done
324 */
325void nfs_delegation_reap_unclaimed(struct nfs4_client *clp)
326{
327 struct nfs_delegation *delegation, *n;
328 LIST_HEAD(head);
329 spin_lock(&clp->cl_lock);
330 list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
331 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
332 continue;
333 list_move(&delegation->super_list, &head);
334 NFS_I(delegation->inode)->delegation = NULL;
335 NFS_I(delegation->inode)->delegation_state = 0;
336 }
337 spin_unlock(&clp->cl_lock);
338 while(!list_empty(&head)) {
339 delegation = list_entry(head.next, struct nfs_delegation, super_list);
340 list_del(&delegation->super_list);
341 nfs_free_delegation(delegation);
342 }
343}