Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Device operations for the pnfs client. |
| 3 | * |
| 4 | * Copyright (c) 2002 |
| 5 | * The Regents of the University of Michigan |
| 6 | * All Rights Reserved |
| 7 | * |
| 8 | * Dean Hildebrand <dhildebz@umich.edu> |
| 9 | * Garth Goodson <Garth.Goodson@netapp.com> |
| 10 | * |
| 11 | * Permission is granted to use, copy, create derivative works, and |
| 12 | * redistribute this software and such derivative works for any purpose, |
| 13 | * so long as the name of the University of Michigan is not used in |
| 14 | * any advertising or publicity pertaining to the use or distribution |
| 15 | * of this software without specific, written prior authorization. If |
| 16 | * the above copyright notice or any other identification of the |
| 17 | * University of Michigan is included in any copy of any portion of |
| 18 | * this software, then the disclaimer below must also be included. |
| 19 | * |
| 20 | * This software is provided as is, without representation or warranty |
| 21 | * of any kind either express or implied, including without limitation |
| 22 | * the implied warranties of merchantability, fitness for a particular |
| 23 | * purpose, or noninfringement. The Regents of the University of |
| 24 | * Michigan shall not be liable for any damages, including special, |
| 25 | * indirect, incidental, or consequential damages, with respect to any |
| 26 | * claim arising out of or in connection with the use of the software, |
| 27 | * even if it has been or is hereafter advised of the possibility of |
| 28 | * such damages. |
| 29 | */ |
| 30 | |
| 31 | #include "pnfs.h" |
| 32 | |
| 33 | #define NFSDBG_FACILITY NFSDBG_PNFS |
| 34 | |
| 35 | /* |
| 36 | * Device ID RCU cache. A device ID is unique per server and layout type. |
| 37 | */ |
| 38 | #define NFS4_DEVICE_ID_HASH_BITS 5 |
| 39 | #define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS) |
| 40 | #define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1) |
| 41 | |
| 42 | static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE]; |
| 43 | static DEFINE_SPINLOCK(nfs4_deviceid_lock); |
| 44 | |
| 45 | void |
| 46 | nfs4_print_deviceid(const struct nfs4_deviceid *id) |
| 47 | { |
| 48 | u32 *p = (u32 *)id; |
| 49 | |
| 50 | dprintk("%s: device id= [%x%x%x%x]\n", __func__, |
| 51 | p[0], p[1], p[2], p[3]); |
| 52 | } |
| 53 | EXPORT_SYMBOL_GPL(nfs4_print_deviceid); |
| 54 | |
| 55 | static inline u32 |
| 56 | nfs4_deviceid_hash(const struct nfs4_deviceid *id) |
| 57 | { |
| 58 | unsigned char *cptr = (unsigned char *)id->data; |
| 59 | unsigned int nbytes = NFS4_DEVICEID4_SIZE; |
| 60 | u32 x = 0; |
| 61 | |
| 62 | while (nbytes--) { |
| 63 | x *= 37; |
| 64 | x += *cptr++; |
| 65 | } |
| 66 | return x & NFS4_DEVICE_ID_HASH_MASK; |
| 67 | } |
| 68 | |
Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 69 | static struct nfs4_deviceid_node * |
Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 70 | _lookup_deviceid(const struct pnfs_layoutdriver_type *ld, |
| 71 | const struct nfs_client *clp, const struct nfs4_deviceid *id, |
Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 72 | long hash) |
| 73 | { |
| 74 | struct nfs4_deviceid_node *d; |
| 75 | struct hlist_node *n; |
| 76 | |
| 77 | hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) |
Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 78 | if (d->ld == ld && d->nfs_client == clp && |
| 79 | !memcmp(&d->deviceid, id, sizeof(*id))) { |
Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 80 | if (atomic_read(&d->ref)) |
| 81 | return d; |
| 82 | else |
| 83 | continue; |
| 84 | } |
| 85 | return NULL; |
| 86 | } |
| 87 | |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 88 | /* |
| 89 | * Lookup a deviceid in cache and get a reference count on it if found |
| 90 | * |
| 91 | * @clp nfs_client associated with deviceid |
| 92 | * @id deviceid to look up |
| 93 | */ |
| 94 | struct nfs4_deviceid_node * |
Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 95 | _find_get_deviceid(const struct pnfs_layoutdriver_type *ld, |
| 96 | const struct nfs_client *clp, const struct nfs4_deviceid *id, |
Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 97 | long hash) |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 98 | { |
| 99 | struct nfs4_deviceid_node *d; |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 100 | |
| 101 | rcu_read_lock(); |
Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 102 | d = _lookup_deviceid(ld, clp, id, hash); |
Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 103 | if (d != NULL) |
| 104 | atomic_inc(&d->ref); |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 105 | rcu_read_unlock(); |
Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 106 | return d; |
| 107 | } |
| 108 | |
| 109 | struct nfs4_deviceid_node * |
Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 110 | nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld, |
| 111 | const struct nfs_client *clp, const struct nfs4_deviceid *id) |
Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 112 | { |
Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 113 | return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id)); |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 114 | } |
| 115 | EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid); |
| 116 | |
Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 117 | /* |
Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 118 | * Remove a deviceid from cache |
Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 119 | * |
| 120 | * @clp nfs_client associated with deviceid |
| 121 | * @id the deviceid to unhash |
| 122 | * |
| 123 | * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise. |
| 124 | */ |
Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 125 | void |
| 126 | nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld, |
Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 127 | const struct nfs_client *clp, const struct nfs4_deviceid *id) |
Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 128 | { |
| 129 | struct nfs4_deviceid_node *d; |
| 130 | |
| 131 | spin_lock(&nfs4_deviceid_lock); |
| 132 | rcu_read_lock(); |
Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 133 | d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id)); |
Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 134 | rcu_read_unlock(); |
| 135 | if (!d) { |
| 136 | spin_unlock(&nfs4_deviceid_lock); |
Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 137 | return; |
Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 138 | } |
| 139 | hlist_del_init_rcu(&d->node); |
| 140 | spin_unlock(&nfs4_deviceid_lock); |
| 141 | synchronize_rcu(); |
| 142 | |
| 143 | /* balance the initial ref set in pnfs_insert_deviceid */ |
| 144 | if (atomic_dec_and_test(&d->ref)) |
Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 145 | d->ld->free_deviceid_node(d); |
Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 146 | } |
| 147 | EXPORT_SYMBOL_GPL(nfs4_delete_deviceid); |
| 148 | |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 149 | void |
| 150 | nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 151 | const struct pnfs_layoutdriver_type *ld, |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 152 | const struct nfs_client *nfs_client, |
| 153 | const struct nfs4_deviceid *id) |
| 154 | { |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 155 | INIT_HLIST_NODE(&d->node); |
Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 156 | INIT_HLIST_NODE(&d->tmpnode); |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 157 | d->ld = ld; |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 158 | d->nfs_client = nfs_client; |
Andy Adamson | c47abcf | 2011-06-15 17:52:40 -0400 | [diff] [blame] | 159 | d->flags = 0; |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 160 | d->deviceid = *id; |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 161 | atomic_set(&d->ref, 1); |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 162 | } |
| 163 | EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node); |
| 164 | |
| 165 | /* |
| 166 | * Uniquely initialize and insert a deviceid node into cache |
| 167 | * |
| 168 | * @new new deviceid node |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 169 | * Note that the caller must set up the following members: |
| 170 | * new->ld |
| 171 | * new->nfs_client |
| 172 | * new->deviceid |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 173 | * |
| 174 | * @ret the inserted node, if none found, otherwise, the found entry. |
| 175 | */ |
| 176 | struct nfs4_deviceid_node * |
| 177 | nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new) |
| 178 | { |
| 179 | struct nfs4_deviceid_node *d; |
| 180 | long hash; |
| 181 | |
| 182 | spin_lock(&nfs4_deviceid_lock); |
Marc Eshel | 1be5683 | 2011-05-22 19:47:09 +0300 | [diff] [blame] | 183 | hash = nfs4_deviceid_hash(&new->deviceid); |
Benny Halevy | 35c8bb5 | 2011-05-24 18:04:02 +0300 | [diff] [blame] | 184 | d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash); |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 185 | if (d) { |
| 186 | spin_unlock(&nfs4_deviceid_lock); |
| 187 | return d; |
| 188 | } |
| 189 | |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 190 | hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); |
| 191 | spin_unlock(&nfs4_deviceid_lock); |
Trond Myklebust | 1d92a08 | 2011-06-14 12:07:38 -0400 | [diff] [blame] | 192 | atomic_inc(&new->ref); |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 193 | |
| 194 | return new; |
| 195 | } |
| 196 | EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node); |
| 197 | |
| 198 | /* |
| 199 | * Dereference a deviceid node and delete it when its reference count drops |
| 200 | * to zero. |
| 201 | * |
| 202 | * @d deviceid node to put |
| 203 | * |
Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 204 | * return true iff the node was deleted |
| 205 | * Note that since the test for d->ref == 0 is sufficient to establish |
| 206 | * that the node is no longer hashed in the global device id cache. |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 207 | */ |
| 208 | bool |
| 209 | nfs4_put_deviceid_node(struct nfs4_deviceid_node *d) |
| 210 | { |
Trond Myklebust | 47cb498 | 2011-06-14 12:18:11 -0400 | [diff] [blame] | 211 | if (!atomic_dec_and_test(&d->ref)) |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 212 | return false; |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 213 | d->ld->free_deviceid_node(d); |
Benny Halevy | a1eaecb | 2011-05-19 22:14:47 -0400 | [diff] [blame] | 214 | return true; |
| 215 | } |
| 216 | EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node); |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 217 | |
| 218 | static void |
| 219 | _deviceid_purge_client(const struct nfs_client *clp, long hash) |
| 220 | { |
| 221 | struct nfs4_deviceid_node *d; |
Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 222 | struct hlist_node *n; |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 223 | HLIST_HEAD(tmp); |
| 224 | |
Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 225 | spin_lock(&nfs4_deviceid_lock); |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 226 | rcu_read_lock(); |
| 227 | hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) |
| 228 | if (d->nfs_client == clp && atomic_read(&d->ref)) { |
| 229 | hlist_del_init_rcu(&d->node); |
Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 230 | hlist_add_head(&d->tmpnode, &tmp); |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 231 | } |
| 232 | rcu_read_unlock(); |
Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 233 | spin_unlock(&nfs4_deviceid_lock); |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 234 | |
| 235 | if (hlist_empty(&tmp)) |
| 236 | return; |
| 237 | |
| 238 | synchronize_rcu(); |
Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 239 | while (!hlist_empty(&tmp)) { |
| 240 | d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode); |
| 241 | hlist_del(&d->tmpnode); |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 242 | if (atomic_dec_and_test(&d->ref)) |
| 243 | d->ld->free_deviceid_node(d); |
Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 244 | } |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 245 | } |
| 246 | |
| 247 | void |
| 248 | nfs4_deviceid_purge_client(const struct nfs_client *clp) |
| 249 | { |
| 250 | long h; |
| 251 | |
Weston Andros Adamson | 9e3bd4e | 2011-05-31 21:46:50 -0400 | [diff] [blame] | 252 | if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS)) |
| 253 | return; |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 254 | for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++) |
| 255 | _deviceid_purge_client(clp, h); |
Benny Halevy | 1775bc3 | 2011-05-20 13:47:33 +0200 | [diff] [blame] | 256 | } |
Andy Adamson | c47abcf | 2011-06-15 17:52:40 -0400 | [diff] [blame] | 257 | |
| 258 | /* |
| 259 | * Stop use of all deviceids associated with an nfs_client |
| 260 | */ |
| 261 | void |
| 262 | nfs4_deviceid_mark_client_invalid(struct nfs_client *clp) |
| 263 | { |
| 264 | struct nfs4_deviceid_node *d; |
| 265 | struct hlist_node *n; |
| 266 | int i; |
| 267 | |
| 268 | rcu_read_lock(); |
| 269 | for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){ |
| 270 | hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node) |
| 271 | if (d->nfs_client == clp) |
| 272 | set_bit(NFS_DEVICEID_INVALID, &d->flags); |
| 273 | } |
| 274 | rcu_read_unlock(); |
| 275 | } |