Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Resizable, Scalable, Concurrent Hash Table |
| 3 | * |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 4 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 5 | * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 6 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> |
| 7 | * |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 8 | * Code partially derived from nft_hash |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 9 | * Rewritten with rehash code from br_multicast plus single list |
| 10 | * pointer as suggested by Josh Triplett |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of the GNU General Public License version 2 as |
| 14 | * published by the Free Software Foundation. |
| 15 | */ |
| 16 | |
Herbert Xu | 07ee072 | 2015-05-15 11:30:47 +0800 | [diff] [blame] | 17 | #include <linux/atomic.h> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 18 | #include <linux/kernel.h> |
| 19 | #include <linux/init.h> |
| 20 | #include <linux/log2.h> |
Eric Dumazet | 5beb5c9 | 2015-02-26 07:20:34 -0800 | [diff] [blame] | 21 | #include <linux/sched.h> |
Ingo Molnar | b2d0910 | 2017-02-04 01:27:20 +0100 | [diff] [blame] | 22 | #include <linux/rculist.h> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 23 | #include <linux/slab.h> |
| 24 | #include <linux/vmalloc.h> |
| 25 | #include <linux/mm.h> |
Daniel Borkmann | 8754589 | 2014-12-10 16:33:11 +0100 | [diff] [blame] | 26 | #include <linux/jhash.h> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 27 | #include <linux/random.h> |
| 28 | #include <linux/rhashtable.h> |
Stephen Rothwell | 61d7b09 | 2015-02-09 14:04:03 +1100 | [diff] [blame] | 29 | #include <linux/err.h> |
Hauke Mehrtens | 6d79541 | 2015-06-06 22:07:23 +0200 | [diff] [blame] | 30 | #include <linux/export.h> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 31 | |
| 32 | #define HASH_DEFAULT_SIZE 64UL |
Herbert Xu | c2e213c | 2015-03-18 20:01:16 +1100 | [diff] [blame] | 33 | #define HASH_MIN_SIZE 4U |
Florian Westphal | 4cf0b35 | 2016-08-12 12:03:52 +0200 | [diff] [blame] | 34 | #define BUCKET_LOCKS_PER_CPU 32UL |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 35 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 36 | union nested_table { |
| 37 | union nested_table __rcu *table; |
| 38 | struct rhash_head __rcu *bucket; |
| 39 | }; |
| 40 | |
Herbert Xu | 988dfbd | 2015-03-10 09:27:55 +1100 | [diff] [blame] | 41 | static u32 head_hashfn(struct rhashtable *ht, |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 42 | const struct bucket_table *tbl, |
| 43 | const struct rhash_head *he) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 44 | { |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 45 | return rht_head_hashfn(ht, tbl, he, ht->p); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 46 | } |
| 47 | |
Thomas Graf | a03eaec | 2015-02-05 02:03:34 +0100 | [diff] [blame] | 48 | #ifdef CONFIG_PROVE_LOCKING |
Thomas Graf | a03eaec | 2015-02-05 02:03:34 +0100 | [diff] [blame] | 49 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
Thomas Graf | a03eaec | 2015-02-05 02:03:34 +0100 | [diff] [blame] | 50 | |
| 51 | int lockdep_rht_mutex_is_held(struct rhashtable *ht) |
| 52 | { |
| 53 | return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; |
| 54 | } |
| 55 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); |
| 56 | |
| 57 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) |
| 58 | { |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 59 | spinlock_t *lock = rht_bucket_lock(tbl, hash); |
Thomas Graf | a03eaec | 2015-02-05 02:03:34 +0100 | [diff] [blame] | 60 | |
| 61 | return (debug_locks) ? lockdep_is_held(lock) : 1; |
| 62 | } |
| 63 | EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); |
| 64 | #else |
| 65 | #define ASSERT_RHT_MUTEX(HT) |
Thomas Graf | a03eaec | 2015-02-05 02:03:34 +0100 | [diff] [blame] | 66 | #endif |
| 67 | |
| 68 | |
Herbert Xu | b9ecfda | 2015-03-24 00:50:27 +1100 | [diff] [blame] | 69 | static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, |
| 70 | gfp_t gfp) |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 71 | { |
| 72 | unsigned int i, size; |
| 73 | #if defined(CONFIG_PROVE_LOCKING) |
| 74 | unsigned int nr_pcpus = 2; |
| 75 | #else |
| 76 | unsigned int nr_pcpus = num_possible_cpus(); |
| 77 | #endif |
| 78 | |
Florian Westphal | 4cf0b35 | 2016-08-12 12:03:52 +0200 | [diff] [blame] | 79 | nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 80 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); |
| 81 | |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 82 | /* Never allocate more than 0.5 locks per bucket */ |
| 83 | size = min_t(unsigned int, size, tbl->size >> 1); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 84 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 85 | if (tbl->nest) |
| 86 | size = min(size, 1U << tbl->nest); |
| 87 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 88 | if (sizeof(spinlock_t) != 0) { |
Michal Hocko | 43ca5bc | 2017-05-08 15:57:18 -0700 | [diff] [blame] | 89 | if (gfpflags_allow_blocking(gfp)) |
| 90 | tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp); |
| 91 | else |
Eric Dumazet | 9dbeea7 | 2016-08-26 08:51:39 -0700 | [diff] [blame] | 92 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), |
| 93 | gfp); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 94 | if (!tbl->locks) |
| 95 | return -ENOMEM; |
| 96 | for (i = 0; i < size; i++) |
| 97 | spin_lock_init(&tbl->locks[i]); |
| 98 | } |
| 99 | tbl->locks_mask = size - 1; |
| 100 | |
| 101 | return 0; |
| 102 | } |
| 103 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 104 | static void nested_table_free(union nested_table *ntbl, unsigned int size) |
| 105 | { |
| 106 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); |
| 107 | const unsigned int len = 1 << shift; |
| 108 | unsigned int i; |
| 109 | |
| 110 | ntbl = rcu_dereference_raw(ntbl->table); |
| 111 | if (!ntbl) |
| 112 | return; |
| 113 | |
| 114 | if (size > len) { |
| 115 | size >>= shift; |
| 116 | for (i = 0; i < len; i++) |
| 117 | nested_table_free(ntbl + i, size); |
| 118 | } |
| 119 | |
| 120 | kfree(ntbl); |
| 121 | } |
| 122 | |
| 123 | static void nested_bucket_table_free(const struct bucket_table *tbl) |
| 124 | { |
| 125 | unsigned int size = tbl->size >> tbl->nest; |
| 126 | unsigned int len = 1 << tbl->nest; |
| 127 | union nested_table *ntbl; |
| 128 | unsigned int i; |
| 129 | |
| 130 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); |
| 131 | |
| 132 | for (i = 0; i < len; i++) |
| 133 | nested_table_free(ntbl + i, size); |
| 134 | |
| 135 | kfree(ntbl); |
| 136 | } |
| 137 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 138 | static void bucket_table_free(const struct bucket_table *tbl) |
| 139 | { |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 140 | if (tbl->nest) |
| 141 | nested_bucket_table_free(tbl); |
| 142 | |
Herbert Xu | ca43540 | 2017-02-25 22:38:11 +0800 | [diff] [blame] | 143 | kvfree(tbl->locks); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 144 | kvfree(tbl); |
| 145 | } |
| 146 | |
Herbert Xu | 9d901bc | 2015-03-14 13:57:23 +1100 | [diff] [blame] | 147 | static void bucket_table_free_rcu(struct rcu_head *head) |
| 148 | { |
| 149 | bucket_table_free(container_of(head, struct bucket_table, rcu)); |
| 150 | } |
| 151 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 152 | static union nested_table *nested_table_alloc(struct rhashtable *ht, |
| 153 | union nested_table __rcu **prev, |
| 154 | unsigned int shifted, |
| 155 | unsigned int nhash) |
| 156 | { |
| 157 | union nested_table *ntbl; |
| 158 | int i; |
| 159 | |
| 160 | ntbl = rcu_dereference(*prev); |
| 161 | if (ntbl) |
| 162 | return ntbl; |
| 163 | |
| 164 | ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); |
| 165 | |
| 166 | if (ntbl && shifted) { |
| 167 | for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) |
| 168 | INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht, |
| 169 | (i << shifted) | nhash); |
| 170 | } |
| 171 | |
| 172 | rcu_assign_pointer(*prev, ntbl); |
| 173 | |
| 174 | return ntbl; |
| 175 | } |
| 176 | |
| 177 | static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, |
| 178 | size_t nbuckets, |
| 179 | gfp_t gfp) |
| 180 | { |
| 181 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); |
| 182 | struct bucket_table *tbl; |
| 183 | size_t size; |
| 184 | |
| 185 | if (nbuckets < (1 << (shift + 1))) |
| 186 | return NULL; |
| 187 | |
| 188 | size = sizeof(*tbl) + sizeof(tbl->buckets[0]); |
| 189 | |
| 190 | tbl = kzalloc(size, gfp); |
| 191 | if (!tbl) |
| 192 | return NULL; |
| 193 | |
| 194 | if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, |
| 195 | 0, 0)) { |
| 196 | kfree(tbl); |
| 197 | return NULL; |
| 198 | } |
| 199 | |
| 200 | tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; |
| 201 | |
| 202 | return tbl; |
| 203 | } |
| 204 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 205 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
Herbert Xu | b9ecfda | 2015-03-24 00:50:27 +1100 | [diff] [blame] | 206 | size_t nbuckets, |
| 207 | gfp_t gfp) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 208 | { |
Daniel Borkmann | eb6d1ab | 2015-02-20 00:53:38 +0100 | [diff] [blame] | 209 | struct bucket_table *tbl = NULL; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 210 | size_t size; |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 211 | int i; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 212 | |
| 213 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
Michal Hocko | 12e8fd6 | 2017-07-10 15:51:55 -0700 | [diff] [blame] | 214 | if (gfp != GFP_KERNEL) |
Herbert Xu | b9ecfda | 2015-03-24 00:50:27 +1100 | [diff] [blame] | 215 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); |
Michal Hocko | 12e8fd6 | 2017-07-10 15:51:55 -0700 | [diff] [blame] | 216 | else |
| 217 | tbl = kvzalloc(size, gfp); |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 218 | |
| 219 | size = nbuckets; |
| 220 | |
| 221 | if (tbl == NULL && gfp != GFP_KERNEL) { |
| 222 | tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); |
| 223 | nbuckets = 0; |
| 224 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 225 | if (tbl == NULL) |
| 226 | return NULL; |
| 227 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 228 | tbl->size = size; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 229 | |
Herbert Xu | b9ecfda | 2015-03-24 00:50:27 +1100 | [diff] [blame] | 230 | if (alloc_bucket_locks(ht, tbl, gfp) < 0) { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 231 | bucket_table_free(tbl); |
| 232 | return NULL; |
| 233 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 234 | |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 235 | INIT_LIST_HEAD(&tbl->walkers); |
| 236 | |
Jason A. Donenfeld | d48ad08 | 2017-06-07 22:47:13 -0400 | [diff] [blame] | 237 | tbl->hash_rnd = get_random_u32(); |
Herbert Xu | 5269b53 | 2015-03-14 13:57:22 +1100 | [diff] [blame] | 238 | |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 239 | for (i = 0; i < nbuckets; i++) |
| 240 | INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); |
| 241 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 242 | return tbl; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 243 | } |
| 244 | |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 245 | static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, |
| 246 | struct bucket_table *tbl) |
| 247 | { |
| 248 | struct bucket_table *new_tbl; |
| 249 | |
| 250 | do { |
| 251 | new_tbl = tbl; |
| 252 | tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
| 253 | } while (tbl); |
| 254 | |
| 255 | return new_tbl; |
| 256 | } |
| 257 | |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 258 | static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 259 | { |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 260 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 261 | struct bucket_table *new_tbl = rhashtable_last_table(ht, |
| 262 | rht_dereference_rcu(old_tbl->future_tbl, ht)); |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 263 | struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); |
| 264 | int err = -EAGAIN; |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 265 | struct rhash_head *head, *next, *entry; |
| 266 | spinlock_t *new_bucket_lock; |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 267 | unsigned int new_hash; |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 268 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 269 | if (new_tbl->nest) |
| 270 | goto out; |
| 271 | |
| 272 | err = -ENOENT; |
| 273 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 274 | rht_for_each(entry, old_tbl, old_hash) { |
| 275 | err = 0; |
| 276 | next = rht_dereference_bucket(entry->next, old_tbl, old_hash); |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 277 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 278 | if (rht_is_a_nulls(next)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 279 | break; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 280 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 281 | pprev = &entry->next; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 282 | } |
| 283 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 284 | if (err) |
| 285 | goto out; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 286 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 287 | new_hash = head_hashfn(ht, new_tbl, entry); |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 288 | |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 289 | new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 290 | |
Herbert Xu | 8f2484b | 2015-03-14 13:57:21 +1100 | [diff] [blame] | 291 | spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 292 | head = rht_dereference_bucket(new_tbl->buckets[new_hash], |
| 293 | new_tbl, new_hash); |
| 294 | |
Dmitriy Vyukov | 7def0f9 | 2015-09-22 10:51:52 +0200 | [diff] [blame] | 295 | RCU_INIT_POINTER(entry->next, head); |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 296 | |
| 297 | rcu_assign_pointer(new_tbl->buckets[new_hash], entry); |
| 298 | spin_unlock(new_bucket_lock); |
| 299 | |
| 300 | rcu_assign_pointer(*pprev, next); |
| 301 | |
| 302 | out: |
| 303 | return err; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 304 | } |
| 305 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 306 | static int rhashtable_rehash_chain(struct rhashtable *ht, |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 307 | unsigned int old_hash) |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 308 | { |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 309 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
| 310 | spinlock_t *old_bucket_lock; |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 311 | int err; |
Thomas Graf | 7cd10db | 2015-02-05 02:03:35 +0100 | [diff] [blame] | 312 | |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 313 | old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 314 | |
| 315 | spin_lock_bh(old_bucket_lock); |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 316 | while (!(err = rhashtable_rehash_one(ht, old_hash))) |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 317 | ; |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 318 | |
| 319 | if (err == -ENOENT) { |
| 320 | old_tbl->rehash++; |
| 321 | err = 0; |
| 322 | } |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 323 | spin_unlock_bh(old_bucket_lock); |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 324 | |
| 325 | return err; |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 326 | } |
| 327 | |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 328 | static int rhashtable_rehash_attach(struct rhashtable *ht, |
| 329 | struct bucket_table *old_tbl, |
| 330 | struct bucket_table *new_tbl) |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 331 | { |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 332 | /* Protect future_tbl using the first bucket lock. */ |
| 333 | spin_lock_bh(old_tbl->locks); |
| 334 | |
| 335 | /* Did somebody beat us to it? */ |
| 336 | if (rcu_access_pointer(old_tbl->future_tbl)) { |
| 337 | spin_unlock_bh(old_tbl->locks); |
| 338 | return -EEXIST; |
| 339 | } |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 340 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 341 | /* Make insertions go into the new, empty table right away. Deletions |
| 342 | * and lookups will be attempted in both tables until we synchronize. |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 343 | */ |
Herbert Xu | c4db884 | 2015-03-14 13:57:25 +1100 | [diff] [blame] | 344 | rcu_assign_pointer(old_tbl->future_tbl, new_tbl); |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 345 | |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 346 | spin_unlock_bh(old_tbl->locks); |
| 347 | |
| 348 | return 0; |
| 349 | } |
| 350 | |
| 351 | static int rhashtable_rehash_table(struct rhashtable *ht) |
| 352 | { |
| 353 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
| 354 | struct bucket_table *new_tbl; |
| 355 | struct rhashtable_walker *walker; |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 356 | unsigned int old_hash; |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 357 | int err; |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 358 | |
| 359 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); |
| 360 | if (!new_tbl) |
| 361 | return 0; |
| 362 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 363 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { |
| 364 | err = rhashtable_rehash_chain(ht, old_hash); |
| 365 | if (err) |
| 366 | return err; |
| 367 | } |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 368 | |
| 369 | /* Publish the new table pointer. */ |
| 370 | rcu_assign_pointer(ht->tbl, new_tbl); |
| 371 | |
Herbert Xu | ba7c95e | 2015-03-24 09:53:17 +1100 | [diff] [blame] | 372 | spin_lock(&ht->lock); |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 373 | list_for_each_entry(walker, &old_tbl->walkers, list) |
| 374 | walker->tbl = NULL; |
Herbert Xu | ba7c95e | 2015-03-24 09:53:17 +1100 | [diff] [blame] | 375 | spin_unlock(&ht->lock); |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 376 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 377 | /* Wait for readers. All new readers will see the new |
| 378 | * table, and thus no references to the old table will |
| 379 | * remain. |
| 380 | */ |
Herbert Xu | 9d901bc | 2015-03-14 13:57:23 +1100 | [diff] [blame] | 381 | call_rcu(&old_tbl->rcu, bucket_table_free_rcu); |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 382 | |
| 383 | return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 384 | } |
| 385 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 386 | static int rhashtable_rehash_alloc(struct rhashtable *ht, |
| 387 | struct bucket_table *old_tbl, |
| 388 | unsigned int size) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 389 | { |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 390 | struct bucket_table *new_tbl; |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 391 | int err; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 392 | |
| 393 | ASSERT_RHT_MUTEX(ht); |
| 394 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 395 | new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 396 | if (new_tbl == NULL) |
| 397 | return -ENOMEM; |
| 398 | |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 399 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); |
| 400 | if (err) |
| 401 | bucket_table_free(new_tbl); |
| 402 | |
| 403 | return err; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 404 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 405 | |
| 406 | /** |
| 407 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups |
| 408 | * @ht: the hash table to shrink |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 409 | * |
Herbert Xu | 18093d1 | 2015-03-24 00:50:25 +1100 | [diff] [blame] | 410 | * This function shrinks the hash table to fit, i.e., the smallest |
| 411 | * size would not cause it to expand right away automatically. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 412 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 413 | * The caller must ensure that no concurrent resizing occurs by holding |
| 414 | * ht->mutex. |
| 415 | * |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 416 | * The caller must ensure that no concurrent table mutations take place. |
| 417 | * It is however valid to have concurrent lookups if they are RCU protected. |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 418 | * |
| 419 | * It is valid to have concurrent insertions and deletions protected by per |
| 420 | * bucket locks or concurrent RCU protected lookups and traversals. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 421 | */ |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 422 | static int rhashtable_shrink(struct rhashtable *ht) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 423 | { |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 424 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
Vegard Nossum | 1231195 | 2016-08-12 20:10:44 +0200 | [diff] [blame] | 425 | unsigned int nelems = atomic_read(&ht->nelems); |
| 426 | unsigned int size = 0; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 427 | |
Vegard Nossum | 1231195 | 2016-08-12 20:10:44 +0200 | [diff] [blame] | 428 | if (nelems) |
| 429 | size = roundup_pow_of_two(nelems * 3 / 2); |
Herbert Xu | 18093d1 | 2015-03-24 00:50:25 +1100 | [diff] [blame] | 430 | if (size < ht->p.min_size) |
| 431 | size = ht->p.min_size; |
| 432 | |
| 433 | if (old_tbl->size <= size) |
| 434 | return 0; |
| 435 | |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 436 | if (rht_dereference(old_tbl->future_tbl, ht)) |
| 437 | return -EEXIST; |
| 438 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 439 | return rhashtable_rehash_alloc(ht, old_tbl, size); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 440 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 441 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 442 | static void rht_deferred_worker(struct work_struct *work) |
| 443 | { |
| 444 | struct rhashtable *ht; |
| 445 | struct bucket_table *tbl; |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 446 | int err = 0; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 447 | |
Ying Xue | 57699a4 | 2015-01-16 11:13:09 +0800 | [diff] [blame] | 448 | ht = container_of(work, struct rhashtable, run_work); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 449 | mutex_lock(&ht->mutex); |
Herbert Xu | 28134a5 | 2015-02-04 07:33:22 +1100 | [diff] [blame] | 450 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 451 | tbl = rht_dereference(ht->tbl, ht); |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 452 | tbl = rhashtable_last_table(ht, tbl); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 453 | |
Daniel Borkmann | a5b6846 | 2015-03-12 15:28:40 +0100 | [diff] [blame] | 454 | if (rht_grow_above_75(ht, tbl)) |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 455 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); |
Thomas Graf | b5e2c15 | 2015-03-24 20:42:19 +0000 | [diff] [blame] | 456 | else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 457 | err = rhashtable_shrink(ht); |
| 458 | else if (tbl->nest) |
| 459 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size); |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 460 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 461 | if (!err) |
| 462 | err = rhashtable_rehash_table(ht); |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 463 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 464 | mutex_unlock(&ht->mutex); |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 465 | |
| 466 | if (err) |
| 467 | schedule_work(&ht->run_work); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 468 | } |
| 469 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 470 | static int rhashtable_insert_rehash(struct rhashtable *ht, |
| 471 | struct bucket_table *tbl) |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 472 | { |
| 473 | struct bucket_table *old_tbl; |
| 474 | struct bucket_table *new_tbl; |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 475 | unsigned int size; |
| 476 | int err; |
| 477 | |
| 478 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 479 | |
| 480 | size = tbl->size; |
| 481 | |
Herbert Xu | 3cf9222 | 2015-12-03 20:41:29 +0800 | [diff] [blame] | 482 | err = -EBUSY; |
| 483 | |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 484 | if (rht_grow_above_75(ht, tbl)) |
| 485 | size *= 2; |
Thomas Graf | a87b9eb | 2015-04-22 09:41:46 +0200 | [diff] [blame] | 486 | /* Do not schedule more than one rehash */ |
| 487 | else if (old_tbl != tbl) |
Herbert Xu | 3cf9222 | 2015-12-03 20:41:29 +0800 | [diff] [blame] | 488 | goto fail; |
| 489 | |
| 490 | err = -ENOMEM; |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 491 | |
| 492 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); |
Herbert Xu | 3cf9222 | 2015-12-03 20:41:29 +0800 | [diff] [blame] | 493 | if (new_tbl == NULL) |
| 494 | goto fail; |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 495 | |
| 496 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); |
| 497 | if (err) { |
| 498 | bucket_table_free(new_tbl); |
| 499 | if (err == -EEXIST) |
| 500 | err = 0; |
| 501 | } else |
| 502 | schedule_work(&ht->run_work); |
| 503 | |
| 504 | return err; |
Herbert Xu | 3cf9222 | 2015-12-03 20:41:29 +0800 | [diff] [blame] | 505 | |
| 506 | fail: |
| 507 | /* Do not fail the insert if someone else did a rehash. */ |
| 508 | if (likely(rcu_dereference_raw(tbl->future_tbl))) |
| 509 | return 0; |
| 510 | |
| 511 | /* Schedule async rehash to retry allocation in process context. */ |
| 512 | if (err == -ENOMEM) |
| 513 | schedule_work(&ht->run_work); |
| 514 | |
| 515 | return err; |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 516 | } |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 517 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 518 | static void *rhashtable_lookup_one(struct rhashtable *ht, |
| 519 | struct bucket_table *tbl, unsigned int hash, |
| 520 | const void *key, struct rhash_head *obj) |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 521 | { |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 522 | struct rhashtable_compare_arg arg = { |
| 523 | .ht = ht, |
| 524 | .key = key, |
| 525 | }; |
| 526 | struct rhash_head __rcu **pprev; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 527 | struct rhash_head *head; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 528 | int elasticity; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 529 | |
Florian Westphal | 5f8ddea | 2017-04-16 02:55:09 +0200 | [diff] [blame] | 530 | elasticity = RHT_ELASTICITY; |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 531 | pprev = rht_bucket_var(tbl, hash); |
| 532 | rht_for_each_continue(head, *pprev, tbl, hash) { |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 533 | struct rhlist_head *list; |
| 534 | struct rhlist_head *plist; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 535 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 536 | elasticity--; |
| 537 | if (!key || |
| 538 | (ht->p.obj_cmpfn ? |
| 539 | ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : |
| 540 | rhashtable_compare(&arg, rht_obj(ht, head)))) |
| 541 | continue; |
| 542 | |
| 543 | if (!ht->rhlist) |
| 544 | return rht_obj(ht, head); |
| 545 | |
| 546 | list = container_of(obj, struct rhlist_head, rhead); |
| 547 | plist = container_of(head, struct rhlist_head, rhead); |
| 548 | |
| 549 | RCU_INIT_POINTER(list->next, plist); |
| 550 | head = rht_dereference_bucket(head->next, tbl, hash); |
| 551 | RCU_INIT_POINTER(list->rhead.next, head); |
| 552 | rcu_assign_pointer(*pprev, obj); |
| 553 | |
| 554 | return NULL; |
Pablo Neira Ayuso | 5ca8cc5 | 2016-08-24 12:31:31 +0200 | [diff] [blame] | 555 | } |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 556 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 557 | if (elasticity <= 0) |
| 558 | return ERR_PTR(-EAGAIN); |
| 559 | |
| 560 | return ERR_PTR(-ENOENT); |
| 561 | } |
| 562 | |
| 563 | static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, |
| 564 | struct bucket_table *tbl, |
| 565 | unsigned int hash, |
| 566 | struct rhash_head *obj, |
| 567 | void *data) |
| 568 | { |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 569 | struct rhash_head __rcu **pprev; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 570 | struct bucket_table *new_tbl; |
| 571 | struct rhash_head *head; |
| 572 | |
| 573 | if (!IS_ERR_OR_NULL(data)) |
| 574 | return ERR_PTR(-EEXIST); |
| 575 | |
| 576 | if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) |
| 577 | return ERR_CAST(data); |
| 578 | |
| 579 | new_tbl = rcu_dereference(tbl->future_tbl); |
| 580 | if (new_tbl) |
| 581 | return new_tbl; |
| 582 | |
| 583 | if (PTR_ERR(data) != -ENOENT) |
| 584 | return ERR_CAST(data); |
| 585 | |
Herbert Xu | 07ee072 | 2015-05-15 11:30:47 +0800 | [diff] [blame] | 586 | if (unlikely(rht_grow_above_max(ht, tbl))) |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 587 | return ERR_PTR(-E2BIG); |
Herbert Xu | 07ee072 | 2015-05-15 11:30:47 +0800 | [diff] [blame] | 588 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 589 | if (unlikely(rht_grow_above_100(ht, tbl))) |
| 590 | return ERR_PTR(-EAGAIN); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 591 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 592 | pprev = rht_bucket_insert(ht, tbl, hash); |
| 593 | if (!pprev) |
| 594 | return ERR_PTR(-ENOMEM); |
| 595 | |
| 596 | head = rht_dereference_bucket(*pprev, tbl, hash); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 597 | |
| 598 | RCU_INIT_POINTER(obj->next, head); |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 599 | if (ht->rhlist) { |
| 600 | struct rhlist_head *list; |
| 601 | |
| 602 | list = container_of(obj, struct rhlist_head, rhead); |
| 603 | RCU_INIT_POINTER(list->next, NULL); |
| 604 | } |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 605 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 606 | rcu_assign_pointer(*pprev, obj); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 607 | |
| 608 | atomic_inc(&ht->nelems); |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 609 | if (rht_grow_above_75(ht, tbl)) |
| 610 | schedule_work(&ht->run_work); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 611 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 612 | return NULL; |
| 613 | } |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 614 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 615 | static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, |
| 616 | struct rhash_head *obj) |
| 617 | { |
| 618 | struct bucket_table *new_tbl; |
| 619 | struct bucket_table *tbl; |
| 620 | unsigned int hash; |
| 621 | spinlock_t *lock; |
| 622 | void *data; |
| 623 | |
| 624 | tbl = rcu_dereference(ht->tbl); |
| 625 | |
| 626 | /* All insertions must grab the oldest table containing |
| 627 | * the hashed bucket that is yet to be rehashed. |
| 628 | */ |
| 629 | for (;;) { |
| 630 | hash = rht_head_hashfn(ht, tbl, obj, ht->p); |
| 631 | lock = rht_bucket_lock(tbl, hash); |
| 632 | spin_lock_bh(lock); |
| 633 | |
| 634 | if (tbl->rehash <= hash) |
| 635 | break; |
| 636 | |
| 637 | spin_unlock_bh(lock); |
| 638 | tbl = rcu_dereference(tbl->future_tbl); |
| 639 | } |
| 640 | |
| 641 | data = rhashtable_lookup_one(ht, tbl, hash, key, obj); |
| 642 | new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); |
| 643 | if (PTR_ERR(new_tbl) != -EEXIST) |
| 644 | data = ERR_CAST(new_tbl); |
| 645 | |
| 646 | while (!IS_ERR_OR_NULL(new_tbl)) { |
| 647 | tbl = new_tbl; |
| 648 | hash = rht_head_hashfn(ht, tbl, obj, ht->p); |
| 649 | spin_lock_nested(rht_bucket_lock(tbl, hash), |
| 650 | SINGLE_DEPTH_NESTING); |
| 651 | |
| 652 | data = rhashtable_lookup_one(ht, tbl, hash, key, obj); |
| 653 | new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); |
| 654 | if (PTR_ERR(new_tbl) != -EEXIST) |
| 655 | data = ERR_CAST(new_tbl); |
| 656 | |
| 657 | spin_unlock(rht_bucket_lock(tbl, hash)); |
| 658 | } |
| 659 | |
| 660 | spin_unlock_bh(lock); |
| 661 | |
| 662 | if (PTR_ERR(data) == -EAGAIN) |
| 663 | data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: |
| 664 | -EAGAIN); |
| 665 | |
| 666 | return data; |
| 667 | } |
| 668 | |
| 669 | void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, |
| 670 | struct rhash_head *obj) |
| 671 | { |
| 672 | void *data; |
| 673 | |
| 674 | do { |
| 675 | rcu_read_lock(); |
| 676 | data = rhashtable_try_insert(ht, key, obj); |
| 677 | rcu_read_unlock(); |
| 678 | } while (PTR_ERR(data) == -EAGAIN); |
| 679 | |
| 680 | return data; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 681 | } |
| 682 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); |
| 683 | |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 684 | /** |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 685 | * rhashtable_walk_enter - Initialise an iterator |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 686 | * @ht: Table to walk over |
| 687 | * @iter: Hash table Iterator |
| 688 | * |
| 689 | * This function prepares a hash table walk. |
| 690 | * |
| 691 | * Note that if you restart a walk after rhashtable_walk_stop you |
| 692 | * may see the same object twice. Also, you may miss objects if |
| 693 | * there are removals in between rhashtable_walk_stop and the next |
| 694 | * call to rhashtable_walk_start. |
| 695 | * |
| 696 | * For a completely stable walk you should construct your own data |
| 697 | * structure outside the hash table. |
| 698 | * |
| 699 | * This function may sleep so you must not call it from interrupt |
| 700 | * context or with spin locks held. |
| 701 | * |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 702 | * You must call rhashtable_walk_exit after this function returns. |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 703 | */ |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 704 | void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 705 | { |
| 706 | iter->ht = ht; |
| 707 | iter->p = NULL; |
| 708 | iter->slot = 0; |
| 709 | iter->skip = 0; |
| 710 | |
Herbert Xu | c6ff526 | 2015-12-16 16:45:54 +0800 | [diff] [blame] | 711 | spin_lock(&ht->lock); |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 712 | iter->walker.tbl = |
Herbert Xu | 179ccc0 | 2015-12-19 10:45:28 +0800 | [diff] [blame] | 713 | rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 714 | list_add(&iter->walker.list, &iter->walker.tbl->walkers); |
Herbert Xu | c6ff526 | 2015-12-16 16:45:54 +0800 | [diff] [blame] | 715 | spin_unlock(&ht->lock); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 716 | } |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 717 | EXPORT_SYMBOL_GPL(rhashtable_walk_enter); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 718 | |
| 719 | /** |
| 720 | * rhashtable_walk_exit - Free an iterator |
| 721 | * @iter: Hash table Iterator |
| 722 | * |
| 723 | * This function frees resources allocated by rhashtable_walk_init. |
| 724 | */ |
| 725 | void rhashtable_walk_exit(struct rhashtable_iter *iter) |
| 726 | { |
Herbert Xu | c6ff526 | 2015-12-16 16:45:54 +0800 | [diff] [blame] | 727 | spin_lock(&iter->ht->lock); |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 728 | if (iter->walker.tbl) |
| 729 | list_del(&iter->walker.list); |
Herbert Xu | c6ff526 | 2015-12-16 16:45:54 +0800 | [diff] [blame] | 730 | spin_unlock(&iter->ht->lock); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 731 | } |
| 732 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); |
| 733 | |
| 734 | /** |
| 735 | * rhashtable_walk_start - Start a hash table walk |
| 736 | * @iter: Hash table iterator |
| 737 | * |
Andreas Gruenbacher | 0647169 | 2017-09-19 12:41:37 +0200 | [diff] [blame] | 738 | * Start a hash table walk at the current iterator position. Note that we take |
| 739 | * the RCU lock in all cases including when we return an error. So you must |
| 740 | * always call rhashtable_walk_stop to clean up. |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 741 | * |
| 742 | * Returns zero if successful. |
| 743 | * |
| 744 | * Returns -EAGAIN if resize event occured. Note that the iterator |
| 745 | * will rewind back to the beginning and you may use it immediately |
| 746 | * by calling rhashtable_walk_next. |
| 747 | */ |
| 748 | int rhashtable_walk_start(struct rhashtable_iter *iter) |
Thomas Graf | db4374f | 2015-03-16 10:42:27 +0100 | [diff] [blame] | 749 | __acquires(RCU) |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 750 | { |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 751 | struct rhashtable *ht = iter->ht; |
| 752 | |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 753 | rcu_read_lock(); |
| 754 | |
Herbert Xu | c6ff526 | 2015-12-16 16:45:54 +0800 | [diff] [blame] | 755 | spin_lock(&ht->lock); |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 756 | if (iter->walker.tbl) |
| 757 | list_del(&iter->walker.list); |
Herbert Xu | c6ff526 | 2015-12-16 16:45:54 +0800 | [diff] [blame] | 758 | spin_unlock(&ht->lock); |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 759 | |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 760 | if (!iter->walker.tbl) { |
| 761 | iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 762 | return -EAGAIN; |
| 763 | } |
| 764 | |
| 765 | return 0; |
| 766 | } |
| 767 | EXPORT_SYMBOL_GPL(rhashtable_walk_start); |
| 768 | |
| 769 | /** |
| 770 | * rhashtable_walk_next - Return the next object and advance the iterator |
| 771 | * @iter: Hash table iterator |
| 772 | * |
| 773 | * Note that you must call rhashtable_walk_stop when you are finished |
| 774 | * with the walk. |
| 775 | * |
| 776 | * Returns the next object or NULL when the end of the table is reached. |
| 777 | * |
| 778 | * Returns -EAGAIN if resize event occured. Note that the iterator |
| 779 | * will rewind back to the beginning and you may continue to use it. |
| 780 | */ |
| 781 | void *rhashtable_walk_next(struct rhashtable_iter *iter) |
| 782 | { |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 783 | struct bucket_table *tbl = iter->walker.tbl; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 784 | struct rhlist_head *list = iter->list; |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 785 | struct rhashtable *ht = iter->ht; |
| 786 | struct rhash_head *p = iter->p; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 787 | bool rhlist = ht->rhlist; |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 788 | |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 789 | if (p) { |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 790 | if (!rhlist || !(list = rcu_dereference(list->next))) { |
| 791 | p = rcu_dereference(p->next); |
| 792 | list = container_of(p, struct rhlist_head, rhead); |
| 793 | } |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 794 | goto next; |
| 795 | } |
| 796 | |
| 797 | for (; iter->slot < tbl->size; iter->slot++) { |
| 798 | int skip = iter->skip; |
| 799 | |
| 800 | rht_for_each_rcu(p, tbl, iter->slot) { |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 801 | if (rhlist) { |
| 802 | list = container_of(p, struct rhlist_head, |
| 803 | rhead); |
| 804 | do { |
| 805 | if (!skip) |
| 806 | goto next; |
| 807 | skip--; |
| 808 | list = rcu_dereference(list->next); |
| 809 | } while (list); |
| 810 | |
| 811 | continue; |
| 812 | } |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 813 | if (!skip) |
| 814 | break; |
| 815 | skip--; |
| 816 | } |
| 817 | |
| 818 | next: |
| 819 | if (!rht_is_a_nulls(p)) { |
| 820 | iter->skip++; |
| 821 | iter->p = p; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 822 | iter->list = list; |
| 823 | return rht_obj(ht, rhlist ? &list->rhead : p); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 824 | } |
| 825 | |
| 826 | iter->skip = 0; |
| 827 | } |
| 828 | |
Phil Sutter | 142b942 | 2015-07-06 15:51:20 +0200 | [diff] [blame] | 829 | iter->p = NULL; |
| 830 | |
Herbert Xu | d88252f | 2015-03-24 00:50:19 +1100 | [diff] [blame] | 831 | /* Ensure we see any new tables. */ |
| 832 | smp_rmb(); |
| 833 | |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 834 | iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
| 835 | if (iter->walker.tbl) { |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 836 | iter->slot = 0; |
| 837 | iter->skip = 0; |
| 838 | return ERR_PTR(-EAGAIN); |
| 839 | } |
| 840 | |
Thomas Graf | c936a79 | 2015-05-05 02:22:53 +0200 | [diff] [blame] | 841 | return NULL; |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 842 | } |
| 843 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); |
| 844 | |
| 845 | /** |
| 846 | * rhashtable_walk_stop - Finish a hash table walk |
| 847 | * @iter: Hash table iterator |
| 848 | * |
Andreas Gruenbacher | 0647169 | 2017-09-19 12:41:37 +0200 | [diff] [blame] | 849 | * Finish a hash table walk. Does not reset the iterator to the start of the |
| 850 | * hash table. |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 851 | */ |
| 852 | void rhashtable_walk_stop(struct rhashtable_iter *iter) |
Thomas Graf | db4374f | 2015-03-16 10:42:27 +0100 | [diff] [blame] | 853 | __releases(RCU) |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 854 | { |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 855 | struct rhashtable *ht; |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 856 | struct bucket_table *tbl = iter->walker.tbl; |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 857 | |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 858 | if (!tbl) |
Herbert Xu | 963ecbd | 2015-03-15 21:12:04 +1100 | [diff] [blame] | 859 | goto out; |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 860 | |
| 861 | ht = iter->ht; |
| 862 | |
Herbert Xu | ba7c95e | 2015-03-24 09:53:17 +1100 | [diff] [blame] | 863 | spin_lock(&ht->lock); |
Herbert Xu | c4db884 | 2015-03-14 13:57:25 +1100 | [diff] [blame] | 864 | if (tbl->rehash < tbl->size) |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 865 | list_add(&iter->walker.list, &tbl->walkers); |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 866 | else |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 867 | iter->walker.tbl = NULL; |
Herbert Xu | ba7c95e | 2015-03-24 09:53:17 +1100 | [diff] [blame] | 868 | spin_unlock(&ht->lock); |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 869 | |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 870 | iter->p = NULL; |
Herbert Xu | 963ecbd | 2015-03-15 21:12:04 +1100 | [diff] [blame] | 871 | |
| 872 | out: |
| 873 | rcu_read_unlock(); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 874 | } |
| 875 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); |
| 876 | |
Herbert Xu | 488fb86e | 2015-03-20 21:56:59 +1100 | [diff] [blame] | 877 | static size_t rounded_hashtable_size(const struct rhashtable_params *params) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 878 | { |
Ying Xue | 9400017 | 2014-09-03 09:22:36 +0800 | [diff] [blame] | 879 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), |
Herbert Xu | e2e21c1 | 2015-03-18 20:01:21 +1100 | [diff] [blame] | 880 | (unsigned long)params->min_size); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 881 | } |
| 882 | |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 883 | static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) |
| 884 | { |
| 885 | return jhash2(key, length, seed); |
| 886 | } |
| 887 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 888 | /** |
| 889 | * rhashtable_init - initialize a new hash table |
| 890 | * @ht: hash table to be initialized |
| 891 | * @params: configuration parameters |
| 892 | * |
| 893 | * Initializes a new hash table based on the provided configuration |
| 894 | * parameters. A table can be configured either with a variable or |
| 895 | * fixed length key: |
| 896 | * |
| 897 | * Configuration Example 1: Fixed length keys |
| 898 | * struct test_obj { |
| 899 | * int key; |
| 900 | * void * my_member; |
| 901 | * struct rhash_head node; |
| 902 | * }; |
| 903 | * |
| 904 | * struct rhashtable_params params = { |
| 905 | * .head_offset = offsetof(struct test_obj, node), |
| 906 | * .key_offset = offsetof(struct test_obj, key), |
| 907 | * .key_len = sizeof(int), |
Daniel Borkmann | 8754589 | 2014-12-10 16:33:11 +0100 | [diff] [blame] | 908 | * .hashfn = jhash, |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 909 | * .nulls_base = (1U << RHT_BASE_SHIFT), |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 910 | * }; |
| 911 | * |
| 912 | * Configuration Example 2: Variable length keys |
| 913 | * struct test_obj { |
| 914 | * [...] |
| 915 | * struct rhash_head node; |
| 916 | * }; |
| 917 | * |
Patrick McHardy | 49f7b33 | 2015-03-25 13:07:45 +0000 | [diff] [blame] | 918 | * u32 my_hash_fn(const void *data, u32 len, u32 seed) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 919 | * { |
| 920 | * struct test_obj *obj = data; |
| 921 | * |
| 922 | * return [... hash ...]; |
| 923 | * } |
| 924 | * |
| 925 | * struct rhashtable_params params = { |
| 926 | * .head_offset = offsetof(struct test_obj, node), |
Daniel Borkmann | 8754589 | 2014-12-10 16:33:11 +0100 | [diff] [blame] | 927 | * .hashfn = jhash, |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 928 | * .obj_hashfn = my_hash_fn, |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 929 | * }; |
| 930 | */ |
Herbert Xu | 488fb86e | 2015-03-20 21:56:59 +1100 | [diff] [blame] | 931 | int rhashtable_init(struct rhashtable *ht, |
| 932 | const struct rhashtable_params *params) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 933 | { |
| 934 | struct bucket_table *tbl; |
| 935 | size_t size; |
| 936 | |
| 937 | size = HASH_DEFAULT_SIZE; |
| 938 | |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 939 | if ((!params->key_len && !params->obj_hashfn) || |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 940 | (params->obj_hashfn && !params->obj_cmpfn)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 941 | return -EINVAL; |
| 942 | |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 943 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) |
| 944 | return -EINVAL; |
| 945 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 946 | memset(ht, 0, sizeof(*ht)); |
| 947 | mutex_init(&ht->mutex); |
Herbert Xu | ba7c95e | 2015-03-24 09:53:17 +1100 | [diff] [blame] | 948 | spin_lock_init(&ht->lock); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 949 | memcpy(&ht->p, params, sizeof(*params)); |
| 950 | |
Thomas Graf | a998f71 | 2015-03-19 22:31:13 +0000 | [diff] [blame] | 951 | if (params->min_size) |
| 952 | ht->p.min_size = roundup_pow_of_two(params->min_size); |
| 953 | |
Herbert Xu | 6d684e5 | 2017-04-27 13:44:51 +0800 | [diff] [blame] | 954 | /* Cap total entries at 2^31 to avoid nelems overflow. */ |
| 955 | ht->max_elems = 1u << 31; |
Herbert Xu | 2d2ab65 | 2017-04-28 14:10:48 +0800 | [diff] [blame] | 956 | |
| 957 | if (params->max_size) { |
| 958 | ht->p.max_size = rounddown_pow_of_two(params->max_size); |
| 959 | if (ht->p.max_size < ht->max_elems / 2) |
| 960 | ht->max_elems = ht->p.max_size * 2; |
| 961 | } |
Herbert Xu | 6d684e5 | 2017-04-27 13:44:51 +0800 | [diff] [blame] | 962 | |
Florian Westphal | 48e75b43 | 2017-05-01 22:18:01 +0200 | [diff] [blame] | 963 | ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); |
Thomas Graf | a998f71 | 2015-03-19 22:31:13 +0000 | [diff] [blame] | 964 | |
Herbert Xu | 3a32460 | 2015-12-16 18:13:14 +0800 | [diff] [blame] | 965 | if (params->nelem_hint) |
| 966 | size = rounded_hashtable_size(&ht->p); |
| 967 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 968 | if (params->locks_mul) |
| 969 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); |
| 970 | else |
| 971 | ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; |
| 972 | |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 973 | ht->key_len = ht->p.key_len; |
| 974 | if (!params->hashfn) { |
| 975 | ht->p.hashfn = jhash; |
| 976 | |
| 977 | if (!(ht->key_len & (sizeof(u32) - 1))) { |
| 978 | ht->key_len /= sizeof(u32); |
| 979 | ht->p.hashfn = rhashtable_jhash2; |
| 980 | } |
| 981 | } |
| 982 | |
Herbert Xu | b9ecfda | 2015-03-24 00:50:27 +1100 | [diff] [blame] | 983 | tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 984 | if (tbl == NULL) |
| 985 | return -ENOMEM; |
| 986 | |
Ying Xue | 545a148 | 2015-01-07 13:41:57 +0800 | [diff] [blame] | 987 | atomic_set(&ht->nelems, 0); |
Daniel Borkmann | a5b6846 | 2015-03-12 15:28:40 +0100 | [diff] [blame] | 988 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 989 | RCU_INIT_POINTER(ht->tbl, tbl); |
| 990 | |
Daniel Borkmann | 4c4b52d | 2015-02-25 16:31:54 +0100 | [diff] [blame] | 991 | INIT_WORK(&ht->run_work, rht_deferred_worker); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 992 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 993 | return 0; |
| 994 | } |
| 995 | EXPORT_SYMBOL_GPL(rhashtable_init); |
| 996 | |
| 997 | /** |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 998 | * rhltable_init - initialize a new hash list table |
| 999 | * @hlt: hash list table to be initialized |
| 1000 | * @params: configuration parameters |
| 1001 | * |
| 1002 | * Initializes a new hash list table. |
| 1003 | * |
| 1004 | * See documentation for rhashtable_init. |
| 1005 | */ |
| 1006 | int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) |
| 1007 | { |
| 1008 | int err; |
| 1009 | |
| 1010 | /* No rhlist NULLs marking for now. */ |
| 1011 | if (params->nulls_base) |
| 1012 | return -EINVAL; |
| 1013 | |
| 1014 | err = rhashtable_init(&hlt->ht, params); |
| 1015 | hlt->ht.rhlist = true; |
| 1016 | return err; |
| 1017 | } |
| 1018 | EXPORT_SYMBOL_GPL(rhltable_init); |
| 1019 | |
| 1020 | static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, |
| 1021 | void (*free_fn)(void *ptr, void *arg), |
| 1022 | void *arg) |
| 1023 | { |
| 1024 | struct rhlist_head *list; |
| 1025 | |
| 1026 | if (!ht->rhlist) { |
| 1027 | free_fn(rht_obj(ht, obj), arg); |
| 1028 | return; |
| 1029 | } |
| 1030 | |
| 1031 | list = container_of(obj, struct rhlist_head, rhead); |
| 1032 | do { |
| 1033 | obj = &list->rhead; |
| 1034 | list = rht_dereference(list->next, ht); |
| 1035 | free_fn(rht_obj(ht, obj), arg); |
| 1036 | } while (list); |
| 1037 | } |
| 1038 | |
| 1039 | /** |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 1040 | * rhashtable_free_and_destroy - free elements and destroy hash table |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1041 | * @ht: the hash table to destroy |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 1042 | * @free_fn: callback to release resources of element |
| 1043 | * @arg: pointer passed to free_fn |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1044 | * |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 1045 | * Stops an eventual async resize. If defined, invokes free_fn for each |
| 1046 | * element to releasal resources. Please note that RCU protected |
| 1047 | * readers may still be accessing the elements. Releasing of resources |
| 1048 | * must occur in a compatible manner. Then frees the bucket array. |
| 1049 | * |
| 1050 | * This function will eventually sleep to wait for an async resize |
| 1051 | * to complete. The caller is responsible that no further write operations |
| 1052 | * occurs in parallel. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1053 | */ |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 1054 | void rhashtable_free_and_destroy(struct rhashtable *ht, |
| 1055 | void (*free_fn)(void *ptr, void *arg), |
| 1056 | void *arg) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1057 | { |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 1058 | struct bucket_table *tbl; |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 1059 | unsigned int i; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1060 | |
Daniel Borkmann | 4c4b52d | 2015-02-25 16:31:54 +0100 | [diff] [blame] | 1061 | cancel_work_sync(&ht->run_work); |
Ying Xue | 57699a4 | 2015-01-16 11:13:09 +0800 | [diff] [blame] | 1062 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1063 | mutex_lock(&ht->mutex); |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 1064 | tbl = rht_dereference(ht->tbl, ht); |
| 1065 | if (free_fn) { |
| 1066 | for (i = 0; i < tbl->size; i++) { |
| 1067 | struct rhash_head *pos, *next; |
| 1068 | |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 1069 | for (pos = rht_dereference(*rht_bucket(tbl, i), ht), |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 1070 | next = !rht_is_a_nulls(pos) ? |
| 1071 | rht_dereference(pos->next, ht) : NULL; |
| 1072 | !rht_is_a_nulls(pos); |
| 1073 | pos = next, |
| 1074 | next = !rht_is_a_nulls(pos) ? |
| 1075 | rht_dereference(pos->next, ht) : NULL) |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 1076 | rhashtable_free_one(ht, pos, free_fn, arg); |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 1077 | } |
| 1078 | } |
| 1079 | |
| 1080 | bucket_table_free(tbl); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1081 | mutex_unlock(&ht->mutex); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1082 | } |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 1083 | EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); |
| 1084 | |
| 1085 | void rhashtable_destroy(struct rhashtable *ht) |
| 1086 | { |
| 1087 | return rhashtable_free_and_destroy(ht, NULL, NULL); |
| 1088 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1089 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 1090 | |
| 1091 | struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, |
| 1092 | unsigned int hash) |
| 1093 | { |
| 1094 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); |
| 1095 | static struct rhash_head __rcu *rhnull = |
| 1096 | (struct rhash_head __rcu *)NULLS_MARKER(0); |
| 1097 | unsigned int index = hash & ((1 << tbl->nest) - 1); |
| 1098 | unsigned int size = tbl->size >> tbl->nest; |
| 1099 | unsigned int subhash = hash; |
| 1100 | union nested_table *ntbl; |
| 1101 | |
| 1102 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); |
Herbert Xu | c4d2603 | 2017-02-25 22:39:50 +0800 | [diff] [blame] | 1103 | ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 1104 | subhash >>= tbl->nest; |
| 1105 | |
| 1106 | while (ntbl && size > (1 << shift)) { |
| 1107 | index = subhash & ((1 << shift) - 1); |
Herbert Xu | c4d2603 | 2017-02-25 22:39:50 +0800 | [diff] [blame] | 1108 | ntbl = rht_dereference_bucket_rcu(ntbl[index].table, |
| 1109 | tbl, hash); |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 1110 | size >>= shift; |
| 1111 | subhash >>= shift; |
| 1112 | } |
| 1113 | |
| 1114 | if (!ntbl) |
| 1115 | return &rhnull; |
| 1116 | |
| 1117 | return &ntbl[subhash].bucket; |
| 1118 | |
| 1119 | } |
| 1120 | EXPORT_SYMBOL_GPL(rht_bucket_nested); |
| 1121 | |
| 1122 | struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, |
| 1123 | struct bucket_table *tbl, |
| 1124 | unsigned int hash) |
| 1125 | { |
| 1126 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); |
| 1127 | unsigned int index = hash & ((1 << tbl->nest) - 1); |
| 1128 | unsigned int size = tbl->size >> tbl->nest; |
| 1129 | union nested_table *ntbl; |
| 1130 | unsigned int shifted; |
| 1131 | unsigned int nhash; |
| 1132 | |
| 1133 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); |
| 1134 | hash >>= tbl->nest; |
| 1135 | nhash = index; |
| 1136 | shifted = tbl->nest; |
| 1137 | ntbl = nested_table_alloc(ht, &ntbl[index].table, |
| 1138 | size <= (1 << shift) ? shifted : 0, nhash); |
| 1139 | |
| 1140 | while (ntbl && size > (1 << shift)) { |
| 1141 | index = hash & ((1 << shift) - 1); |
| 1142 | size >>= shift; |
| 1143 | hash >>= shift; |
| 1144 | nhash |= index << shifted; |
| 1145 | shifted += shift; |
| 1146 | ntbl = nested_table_alloc(ht, &ntbl[index].table, |
| 1147 | size <= (1 << shift) ? shifted : 0, |
| 1148 | nhash); |
| 1149 | } |
| 1150 | |
| 1151 | if (!ntbl) |
| 1152 | return NULL; |
| 1153 | |
| 1154 | return &ntbl[hash].bucket; |
| 1155 | |
| 1156 | } |
| 1157 | EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); |