blob: 4f359d2a26ebe3ce4a2160758c2a5e6c69163ba6 [file] [log] [blame]
Benny Halevya1eaecb2011-05-19 22:14:47 -04001/*
2 * Device operations for the pnfs client.
3 *
4 * Copyright (c) 2002
5 * The Regents of the University of Michigan
6 * All Rights Reserved
7 *
8 * Dean Hildebrand <dhildebz@umich.edu>
9 * Garth Goodson <Garth.Goodson@netapp.com>
10 *
11 * Permission is granted to use, copy, create derivative works, and
12 * redistribute this software and such derivative works for any purpose,
13 * so long as the name of the University of Michigan is not used in
14 * any advertising or publicity pertaining to the use or distribution
15 * of this software without specific, written prior authorization. If
16 * the above copyright notice or any other identification of the
17 * University of Michigan is included in any copy of any portion of
18 * this software, then the disclaimer below must also be included.
19 *
20 * This software is provided as is, without representation or warranty
21 * of any kind either express or implied, including without limitation
22 * the implied warranties of merchantability, fitness for a particular
23 * purpose, or noninfringement. The Regents of the University of
24 * Michigan shall not be liable for any damages, including special,
25 * indirect, incidental, or consequential damages, with respect to any
26 * claim arising out of or in connection with the use of the software,
27 * even if it has been or is hereafter advised of the possibility of
28 * such damages.
29 */
30
Paul Gortmakerafeacc82011-05-26 16:00:52 -040031#include <linux/export.h>
Benny Halevya1eaecb2011-05-19 22:14:47 -040032#include "pnfs.h"
33
34#define NFSDBG_FACILITY NFSDBG_PNFS
35
36/*
37 * Device ID RCU cache. A device ID is unique per server and layout type.
38 */
39#define NFS4_DEVICE_ID_HASH_BITS 5
40#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
41#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
42
43static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
44static DEFINE_SPINLOCK(nfs4_deviceid_lock);
45
46void
47nfs4_print_deviceid(const struct nfs4_deviceid *id)
48{
49 u32 *p = (u32 *)id;
50
51 dprintk("%s: device id= [%x%x%x%x]\n", __func__,
52 p[0], p[1], p[2], p[3]);
53}
54EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
55
56static inline u32
57nfs4_deviceid_hash(const struct nfs4_deviceid *id)
58{
59 unsigned char *cptr = (unsigned char *)id->data;
60 unsigned int nbytes = NFS4_DEVICEID4_SIZE;
61 u32 x = 0;
62
63 while (nbytes--) {
64 x *= 37;
65 x += *cptr++;
66 }
67 return x & NFS4_DEVICE_ID_HASH_MASK;
68}
69
Marc Eshel1be56832011-05-22 19:47:09 +030070static struct nfs4_deviceid_node *
Benny Halevy35c8bb52011-05-24 18:04:02 +030071_lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
72 const struct nfs_client *clp, const struct nfs4_deviceid *id,
Marc Eshel1be56832011-05-22 19:47:09 +030073 long hash)
74{
75 struct nfs4_deviceid_node *d;
76 struct hlist_node *n;
77
78 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
Benny Halevy35c8bb52011-05-24 18:04:02 +030079 if (d->ld == ld && d->nfs_client == clp &&
80 !memcmp(&d->deviceid, id, sizeof(*id))) {
Marc Eshel1be56832011-05-22 19:47:09 +030081 if (atomic_read(&d->ref))
82 return d;
83 else
84 continue;
85 }
86 return NULL;
87}
88
Benny Halevya1eaecb2011-05-19 22:14:47 -040089/*
90 * Lookup a deviceid in cache and get a reference count on it if found
91 *
92 * @clp nfs_client associated with deviceid
93 * @id deviceid to look up
94 */
95struct nfs4_deviceid_node *
Benny Halevy35c8bb52011-05-24 18:04:02 +030096_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
97 const struct nfs_client *clp, const struct nfs4_deviceid *id,
Marc Eshel1be56832011-05-22 19:47:09 +030098 long hash)
Benny Halevya1eaecb2011-05-19 22:14:47 -040099{
100 struct nfs4_deviceid_node *d;
Benny Halevya1eaecb2011-05-19 22:14:47 -0400101
102 rcu_read_lock();
Benny Halevy35c8bb52011-05-24 18:04:02 +0300103 d = _lookup_deviceid(ld, clp, id, hash);
Trond Myklebust47cb4982011-06-14 12:18:11 -0400104 if (d != NULL)
105 atomic_inc(&d->ref);
Benny Halevya1eaecb2011-05-19 22:14:47 -0400106 rcu_read_unlock();
Marc Eshel1be56832011-05-22 19:47:09 +0300107 return d;
108}
109
110struct nfs4_deviceid_node *
Benny Halevy35c8bb52011-05-24 18:04:02 +0300111nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
112 const struct nfs_client *clp, const struct nfs4_deviceid *id)
Marc Eshel1be56832011-05-22 19:47:09 +0300113{
Benny Halevy35c8bb52011-05-24 18:04:02 +0300114 return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
Benny Halevya1eaecb2011-05-19 22:14:47 -0400115}
116EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
117
Marc Eshel1be56832011-05-22 19:47:09 +0300118/*
Trond Myklebust47cb4982011-06-14 12:18:11 -0400119 * Remove a deviceid from cache
Marc Eshel1be56832011-05-22 19:47:09 +0300120 *
121 * @clp nfs_client associated with deviceid
122 * @id the deviceid to unhash
123 *
124 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
125 */
Trond Myklebust47cb4982011-06-14 12:18:11 -0400126void
127nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
Benny Halevy35c8bb52011-05-24 18:04:02 +0300128 const struct nfs_client *clp, const struct nfs4_deviceid *id)
Marc Eshel1be56832011-05-22 19:47:09 +0300129{
130 struct nfs4_deviceid_node *d;
131
132 spin_lock(&nfs4_deviceid_lock);
133 rcu_read_lock();
Benny Halevy35c8bb52011-05-24 18:04:02 +0300134 d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
Marc Eshel1be56832011-05-22 19:47:09 +0300135 rcu_read_unlock();
136 if (!d) {
137 spin_unlock(&nfs4_deviceid_lock);
Trond Myklebust47cb4982011-06-14 12:18:11 -0400138 return;
Marc Eshel1be56832011-05-22 19:47:09 +0300139 }
140 hlist_del_init_rcu(&d->node);
141 spin_unlock(&nfs4_deviceid_lock);
142 synchronize_rcu();
143
144 /* balance the initial ref set in pnfs_insert_deviceid */
145 if (atomic_dec_and_test(&d->ref))
Trond Myklebust47cb4982011-06-14 12:18:11 -0400146 d->ld->free_deviceid_node(d);
Marc Eshel1be56832011-05-22 19:47:09 +0300147}
148EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
149
Benny Halevya1eaecb2011-05-19 22:14:47 -0400150void
151nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
Benny Halevy1775bc32011-05-20 13:47:33 +0200152 const struct pnfs_layoutdriver_type *ld,
Benny Halevya1eaecb2011-05-19 22:14:47 -0400153 const struct nfs_client *nfs_client,
154 const struct nfs4_deviceid *id)
155{
Benny Halevy1775bc32011-05-20 13:47:33 +0200156 INIT_HLIST_NODE(&d->node);
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400157 INIT_HLIST_NODE(&d->tmpnode);
Benny Halevy1775bc32011-05-20 13:47:33 +0200158 d->ld = ld;
Benny Halevya1eaecb2011-05-19 22:14:47 -0400159 d->nfs_client = nfs_client;
Andy Adamsonc47abcf2011-06-15 17:52:40 -0400160 d->flags = 0;
Benny Halevya1eaecb2011-05-19 22:14:47 -0400161 d->deviceid = *id;
Benny Halevy1775bc32011-05-20 13:47:33 +0200162 atomic_set(&d->ref, 1);
Benny Halevya1eaecb2011-05-19 22:14:47 -0400163}
164EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
165
166/*
167 * Uniquely initialize and insert a deviceid node into cache
168 *
169 * @new new deviceid node
Benny Halevy1775bc32011-05-20 13:47:33 +0200170 * Note that the caller must set up the following members:
171 * new->ld
172 * new->nfs_client
173 * new->deviceid
Benny Halevya1eaecb2011-05-19 22:14:47 -0400174 *
175 * @ret the inserted node, if none found, otherwise, the found entry.
176 */
177struct nfs4_deviceid_node *
178nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
179{
180 struct nfs4_deviceid_node *d;
181 long hash;
182
183 spin_lock(&nfs4_deviceid_lock);
Marc Eshel1be56832011-05-22 19:47:09 +0300184 hash = nfs4_deviceid_hash(&new->deviceid);
Benny Halevy35c8bb52011-05-24 18:04:02 +0300185 d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash);
Benny Halevya1eaecb2011-05-19 22:14:47 -0400186 if (d) {
187 spin_unlock(&nfs4_deviceid_lock);
188 return d;
189 }
190
Benny Halevya1eaecb2011-05-19 22:14:47 -0400191 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
192 spin_unlock(&nfs4_deviceid_lock);
Trond Myklebust1d92a082011-06-14 12:07:38 -0400193 atomic_inc(&new->ref);
Benny Halevya1eaecb2011-05-19 22:14:47 -0400194
195 return new;
196}
197EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node);
198
199/*
200 * Dereference a deviceid node and delete it when its reference count drops
201 * to zero.
202 *
203 * @d deviceid node to put
204 *
Trond Myklebust47cb4982011-06-14 12:18:11 -0400205 * return true iff the node was deleted
206 * Note that since the test for d->ref == 0 is sufficient to establish
207 * that the node is no longer hashed in the global device id cache.
Benny Halevya1eaecb2011-05-19 22:14:47 -0400208 */
209bool
210nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
211{
Trond Myklebust47cb4982011-06-14 12:18:11 -0400212 if (!atomic_dec_and_test(&d->ref))
Benny Halevya1eaecb2011-05-19 22:14:47 -0400213 return false;
Benny Halevy1775bc32011-05-20 13:47:33 +0200214 d->ld->free_deviceid_node(d);
Benny Halevya1eaecb2011-05-19 22:14:47 -0400215 return true;
216}
217EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
Benny Halevy1775bc32011-05-20 13:47:33 +0200218
219static void
220_deviceid_purge_client(const struct nfs_client *clp, long hash)
221{
222 struct nfs4_deviceid_node *d;
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400223 struct hlist_node *n;
Benny Halevy1775bc32011-05-20 13:47:33 +0200224 HLIST_HEAD(tmp);
225
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400226 spin_lock(&nfs4_deviceid_lock);
Benny Halevy1775bc32011-05-20 13:47:33 +0200227 rcu_read_lock();
228 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
229 if (d->nfs_client == clp && atomic_read(&d->ref)) {
230 hlist_del_init_rcu(&d->node);
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400231 hlist_add_head(&d->tmpnode, &tmp);
Benny Halevy1775bc32011-05-20 13:47:33 +0200232 }
233 rcu_read_unlock();
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400234 spin_unlock(&nfs4_deviceid_lock);
Benny Halevy1775bc32011-05-20 13:47:33 +0200235
236 if (hlist_empty(&tmp))
237 return;
238
239 synchronize_rcu();
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400240 while (!hlist_empty(&tmp)) {
241 d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
242 hlist_del(&d->tmpnode);
Benny Halevy1775bc32011-05-20 13:47:33 +0200243 if (atomic_dec_and_test(&d->ref))
244 d->ld->free_deviceid_node(d);
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400245 }
Benny Halevy1775bc32011-05-20 13:47:33 +0200246}
247
248void
249nfs4_deviceid_purge_client(const struct nfs_client *clp)
250{
251 long h;
252
Weston Andros Adamson9e3bd4e2011-05-31 21:46:50 -0400253 if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
254 return;
Benny Halevy1775bc32011-05-20 13:47:33 +0200255 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
256 _deviceid_purge_client(clp, h);
Benny Halevy1775bc32011-05-20 13:47:33 +0200257}
Andy Adamsonc47abcf2011-06-15 17:52:40 -0400258
259/*
260 * Stop use of all deviceids associated with an nfs_client
261 */
262void
263nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
264{
265 struct nfs4_deviceid_node *d;
266 struct hlist_node *n;
267 int i;
268
269 rcu_read_lock();
270 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
271 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node)
272 if (d->nfs_client == clp)
273 set_bit(NFS_DEVICEID_INVALID, &d->flags);
274 }
275 rcu_read_unlock();
276}