blob: 6fda5228ef5627d45da3cdf4933e6a098b018820 [file] [log] [blame]
Benny Halevya1eaecb2011-05-19 22:14:47 -04001/*
2 * Device operations for the pnfs client.
3 *
4 * Copyright (c) 2002
5 * The Regents of the University of Michigan
6 * All Rights Reserved
7 *
8 * Dean Hildebrand <dhildebz@umich.edu>
9 * Garth Goodson <Garth.Goodson@netapp.com>
10 *
11 * Permission is granted to use, copy, create derivative works, and
12 * redistribute this software and such derivative works for any purpose,
13 * so long as the name of the University of Michigan is not used in
14 * any advertising or publicity pertaining to the use or distribution
15 * of this software without specific, written prior authorization. If
16 * the above copyright notice or any other identification of the
17 * University of Michigan is included in any copy of any portion of
18 * this software, then the disclaimer below must also be included.
19 *
20 * This software is provided as is, without representation or warranty
21 * of any kind either express or implied, including without limitation
22 * the implied warranties of merchantability, fitness for a particular
23 * purpose, or noninfringement. The Regents of the University of
24 * Michigan shall not be liable for any damages, including special,
25 * indirect, incidental, or consequential damages, with respect to any
26 * claim arising out of or in connection with the use of the software,
27 * even if it has been or is hereafter advised of the possibility of
28 * such damages.
29 */
30
31#include "pnfs.h"
32
33#define NFSDBG_FACILITY NFSDBG_PNFS
34
35/*
36 * Device ID RCU cache. A device ID is unique per server and layout type.
37 */
38#define NFS4_DEVICE_ID_HASH_BITS 5
39#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
40#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
41
42static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
43static DEFINE_SPINLOCK(nfs4_deviceid_lock);
44
45void
46nfs4_print_deviceid(const struct nfs4_deviceid *id)
47{
48 u32 *p = (u32 *)id;
49
50 dprintk("%s: device id= [%x%x%x%x]\n", __func__,
51 p[0], p[1], p[2], p[3]);
52}
53EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
54
55static inline u32
56nfs4_deviceid_hash(const struct nfs4_deviceid *id)
57{
58 unsigned char *cptr = (unsigned char *)id->data;
59 unsigned int nbytes = NFS4_DEVICEID4_SIZE;
60 u32 x = 0;
61
62 while (nbytes--) {
63 x *= 37;
64 x += *cptr++;
65 }
66 return x & NFS4_DEVICE_ID_HASH_MASK;
67}
68
Marc Eshel1be56832011-05-22 19:47:09 +030069static struct nfs4_deviceid_node *
Benny Halevy35c8bb52011-05-24 18:04:02 +030070_lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
71 const struct nfs_client *clp, const struct nfs4_deviceid *id,
Marc Eshel1be56832011-05-22 19:47:09 +030072 long hash)
73{
74 struct nfs4_deviceid_node *d;
75 struct hlist_node *n;
76
77 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
Benny Halevy35c8bb52011-05-24 18:04:02 +030078 if (d->ld == ld && d->nfs_client == clp &&
79 !memcmp(&d->deviceid, id, sizeof(*id))) {
Marc Eshel1be56832011-05-22 19:47:09 +030080 if (atomic_read(&d->ref))
81 return d;
82 else
83 continue;
84 }
85 return NULL;
86}
87
Benny Halevya1eaecb2011-05-19 22:14:47 -040088/*
89 * Lookup a deviceid in cache and get a reference count on it if found
90 *
91 * @clp nfs_client associated with deviceid
92 * @id deviceid to look up
93 */
94struct nfs4_deviceid_node *
Benny Halevy35c8bb52011-05-24 18:04:02 +030095_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
96 const struct nfs_client *clp, const struct nfs4_deviceid *id,
Marc Eshel1be56832011-05-22 19:47:09 +030097 long hash)
Benny Halevya1eaecb2011-05-19 22:14:47 -040098{
99 struct nfs4_deviceid_node *d;
Benny Halevya1eaecb2011-05-19 22:14:47 -0400100
101 rcu_read_lock();
Benny Halevy35c8bb52011-05-24 18:04:02 +0300102 d = _lookup_deviceid(ld, clp, id, hash);
Trond Myklebust47cb4982011-06-14 12:18:11 -0400103 if (d != NULL)
104 atomic_inc(&d->ref);
Benny Halevya1eaecb2011-05-19 22:14:47 -0400105 rcu_read_unlock();
Marc Eshel1be56832011-05-22 19:47:09 +0300106 return d;
107}
108
109struct nfs4_deviceid_node *
Benny Halevy35c8bb52011-05-24 18:04:02 +0300110nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
111 const struct nfs_client *clp, const struct nfs4_deviceid *id)
Marc Eshel1be56832011-05-22 19:47:09 +0300112{
Benny Halevy35c8bb52011-05-24 18:04:02 +0300113 return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
Benny Halevya1eaecb2011-05-19 22:14:47 -0400114}
115EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
116
Marc Eshel1be56832011-05-22 19:47:09 +0300117/*
Trond Myklebust47cb4982011-06-14 12:18:11 -0400118 * Remove a deviceid from cache
Marc Eshel1be56832011-05-22 19:47:09 +0300119 *
120 * @clp nfs_client associated with deviceid
121 * @id the deviceid to unhash
122 *
123 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
124 */
Trond Myklebust47cb4982011-06-14 12:18:11 -0400125void
126nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
Benny Halevy35c8bb52011-05-24 18:04:02 +0300127 const struct nfs_client *clp, const struct nfs4_deviceid *id)
Marc Eshel1be56832011-05-22 19:47:09 +0300128{
129 struct nfs4_deviceid_node *d;
130
131 spin_lock(&nfs4_deviceid_lock);
132 rcu_read_lock();
Benny Halevy35c8bb52011-05-24 18:04:02 +0300133 d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
Marc Eshel1be56832011-05-22 19:47:09 +0300134 rcu_read_unlock();
135 if (!d) {
136 spin_unlock(&nfs4_deviceid_lock);
Trond Myklebust47cb4982011-06-14 12:18:11 -0400137 return;
Marc Eshel1be56832011-05-22 19:47:09 +0300138 }
139 hlist_del_init_rcu(&d->node);
140 spin_unlock(&nfs4_deviceid_lock);
141 synchronize_rcu();
142
143 /* balance the initial ref set in pnfs_insert_deviceid */
144 if (atomic_dec_and_test(&d->ref))
Trond Myklebust47cb4982011-06-14 12:18:11 -0400145 d->ld->free_deviceid_node(d);
Marc Eshel1be56832011-05-22 19:47:09 +0300146}
147EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
148
Benny Halevya1eaecb2011-05-19 22:14:47 -0400149void
150nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
Benny Halevy1775bc32011-05-20 13:47:33 +0200151 const struct pnfs_layoutdriver_type *ld,
Benny Halevya1eaecb2011-05-19 22:14:47 -0400152 const struct nfs_client *nfs_client,
153 const struct nfs4_deviceid *id)
154{
Benny Halevy1775bc32011-05-20 13:47:33 +0200155 INIT_HLIST_NODE(&d->node);
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400156 INIT_HLIST_NODE(&d->tmpnode);
Benny Halevy1775bc32011-05-20 13:47:33 +0200157 d->ld = ld;
Benny Halevya1eaecb2011-05-19 22:14:47 -0400158 d->nfs_client = nfs_client;
Andy Adamsonc47abcf2011-06-15 17:52:40 -0400159 d->flags = 0;
Benny Halevya1eaecb2011-05-19 22:14:47 -0400160 d->deviceid = *id;
Benny Halevy1775bc32011-05-20 13:47:33 +0200161 atomic_set(&d->ref, 1);
Benny Halevya1eaecb2011-05-19 22:14:47 -0400162}
163EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
164
165/*
166 * Uniquely initialize and insert a deviceid node into cache
167 *
168 * @new new deviceid node
Benny Halevy1775bc32011-05-20 13:47:33 +0200169 * Note that the caller must set up the following members:
170 * new->ld
171 * new->nfs_client
172 * new->deviceid
Benny Halevya1eaecb2011-05-19 22:14:47 -0400173 *
174 * @ret the inserted node, if none found, otherwise, the found entry.
175 */
176struct nfs4_deviceid_node *
177nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
178{
179 struct nfs4_deviceid_node *d;
180 long hash;
181
182 spin_lock(&nfs4_deviceid_lock);
Marc Eshel1be56832011-05-22 19:47:09 +0300183 hash = nfs4_deviceid_hash(&new->deviceid);
Benny Halevy35c8bb52011-05-24 18:04:02 +0300184 d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash);
Benny Halevya1eaecb2011-05-19 22:14:47 -0400185 if (d) {
186 spin_unlock(&nfs4_deviceid_lock);
187 return d;
188 }
189
Benny Halevya1eaecb2011-05-19 22:14:47 -0400190 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
191 spin_unlock(&nfs4_deviceid_lock);
Trond Myklebust1d92a082011-06-14 12:07:38 -0400192 atomic_inc(&new->ref);
Benny Halevya1eaecb2011-05-19 22:14:47 -0400193
194 return new;
195}
196EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node);
197
198/*
199 * Dereference a deviceid node and delete it when its reference count drops
200 * to zero.
201 *
202 * @d deviceid node to put
203 *
Trond Myklebust47cb4982011-06-14 12:18:11 -0400204 * return true iff the node was deleted
205 * Note that since the test for d->ref == 0 is sufficient to establish
206 * that the node is no longer hashed in the global device id cache.
Benny Halevya1eaecb2011-05-19 22:14:47 -0400207 */
208bool
209nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
210{
Trond Myklebust47cb4982011-06-14 12:18:11 -0400211 if (!atomic_dec_and_test(&d->ref))
Benny Halevya1eaecb2011-05-19 22:14:47 -0400212 return false;
Benny Halevy1775bc32011-05-20 13:47:33 +0200213 d->ld->free_deviceid_node(d);
Benny Halevya1eaecb2011-05-19 22:14:47 -0400214 return true;
215}
216EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
Benny Halevy1775bc32011-05-20 13:47:33 +0200217
218static void
219_deviceid_purge_client(const struct nfs_client *clp, long hash)
220{
221 struct nfs4_deviceid_node *d;
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400222 struct hlist_node *n;
Benny Halevy1775bc32011-05-20 13:47:33 +0200223 HLIST_HEAD(tmp);
224
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400225 spin_lock(&nfs4_deviceid_lock);
Benny Halevy1775bc32011-05-20 13:47:33 +0200226 rcu_read_lock();
227 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
228 if (d->nfs_client == clp && atomic_read(&d->ref)) {
229 hlist_del_init_rcu(&d->node);
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400230 hlist_add_head(&d->tmpnode, &tmp);
Benny Halevy1775bc32011-05-20 13:47:33 +0200231 }
232 rcu_read_unlock();
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400233 spin_unlock(&nfs4_deviceid_lock);
Benny Halevy1775bc32011-05-20 13:47:33 +0200234
235 if (hlist_empty(&tmp))
236 return;
237
238 synchronize_rcu();
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400239 while (!hlist_empty(&tmp)) {
240 d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
241 hlist_del(&d->tmpnode);
Benny Halevy1775bc32011-05-20 13:47:33 +0200242 if (atomic_dec_and_test(&d->ref))
243 d->ld->free_deviceid_node(d);
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400244 }
Benny Halevy1775bc32011-05-20 13:47:33 +0200245}
246
247void
248nfs4_deviceid_purge_client(const struct nfs_client *clp)
249{
250 long h;
251
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400252 if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
253 return;
Benny Halevy1775bc32011-05-20 13:47:33 +0200254 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
255 _deviceid_purge_client(clp, h);
Benny Halevy1775bc32011-05-20 13:47:33 +0200256}
Andy Adamsonc47abcf2011-06-15 17:52:40 -0400257
258/*
259 * Stop use of all deviceids associated with an nfs_client
260 */
261void
262nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
263{
264 struct nfs4_deviceid_node *d;
265 struct hlist_node *n;
266 int i;
267
268 rcu_read_lock();
269 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
270 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node)
271 if (d->nfs_client == clp)
272 set_bit(NFS_DEVICEID_INVALID, &d->flags);
273 }
274 rcu_read_unlock();
275}