blob: 0f3be3fad2b2b7694e1eccd7f22b348ed149f320 [file] [log] [blame]
Thomas Graf7e1e7762014-08-02 11:47:44 +02001/*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
Herbert Xu02fd97c2015-03-20 21:57:00 +11004 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
Thomas Grafa5ec68e2015-02-05 02:03:32 +01005 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
Thomas Graf7e1e7762014-08-02 11:47:44 +02006 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 *
Thomas Graf7e1e7762014-08-02 11:47:44 +02008 * Code partially derived from nft_hash
Herbert Xu02fd97c2015-03-20 21:57:00 +11009 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
Thomas Graf7e1e7762014-08-02 11:47:44 +020011 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
Herbert Xu07ee0722015-05-15 11:30:47 +080017#include <linux/atomic.h>
Thomas Graf7e1e7762014-08-02 11:47:44 +020018#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/log2.h>
Eric Dumazet5beb5c92015-02-26 07:20:34 -080021#include <linux/sched.h>
Thomas Graf7e1e7762014-08-02 11:47:44 +020022#include <linux/slab.h>
23#include <linux/vmalloc.h>
24#include <linux/mm.h>
Daniel Borkmann87545892014-12-10 16:33:11 +010025#include <linux/jhash.h>
Thomas Graf7e1e7762014-08-02 11:47:44 +020026#include <linux/random.h>
27#include <linux/rhashtable.h>
Stephen Rothwell61d7b092015-02-09 14:04:03 +110028#include <linux/err.h>
Hauke Mehrtens6d795412015-06-06 22:07:23 +020029#include <linux/export.h>
Thomas Graf7e1e7762014-08-02 11:47:44 +020030
31#define HASH_DEFAULT_SIZE 64UL
Herbert Xuc2e213c2015-03-18 20:01:16 +110032#define HASH_MIN_SIZE 4U
Thomas Graf97defe12015-01-02 23:00:20 +010033#define BUCKET_LOCKS_PER_CPU 128UL
34
Herbert Xu988dfbd2015-03-10 09:27:55 +110035static u32 head_hashfn(struct rhashtable *ht,
Thomas Graf8d24c0b2015-01-02 23:00:14 +010036 const struct bucket_table *tbl,
37 const struct rhash_head *he)
Thomas Graf7e1e7762014-08-02 11:47:44 +020038{
Herbert Xu02fd97c2015-03-20 21:57:00 +110039 return rht_head_hashfn(ht, tbl, he, ht->p);
Thomas Graf7e1e7762014-08-02 11:47:44 +020040}
41
Thomas Grafa03eaec2015-02-05 02:03:34 +010042#ifdef CONFIG_PROVE_LOCKING
Thomas Grafa03eaec2015-02-05 02:03:34 +010043#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
Thomas Grafa03eaec2015-02-05 02:03:34 +010044
45int lockdep_rht_mutex_is_held(struct rhashtable *ht)
46{
47 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
48}
49EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
50
51int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
52{
Herbert Xu02fd97c2015-03-20 21:57:00 +110053 spinlock_t *lock = rht_bucket_lock(tbl, hash);
Thomas Grafa03eaec2015-02-05 02:03:34 +010054
55 return (debug_locks) ? lockdep_is_held(lock) : 1;
56}
57EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
58#else
59#define ASSERT_RHT_MUTEX(HT)
Thomas Grafa03eaec2015-02-05 02:03:34 +010060#endif
61
62
Herbert Xub9ecfda2015-03-24 00:50:27 +110063static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
64 gfp_t gfp)
Thomas Graf97defe12015-01-02 23:00:20 +010065{
66 unsigned int i, size;
67#if defined(CONFIG_PROVE_LOCKING)
68 unsigned int nr_pcpus = 2;
69#else
70 unsigned int nr_pcpus = num_possible_cpus();
71#endif
72
73 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
74 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
75
Thomas Grafa5ec68e2015-02-05 02:03:32 +010076 /* Never allocate more than 0.5 locks per bucket */
77 size = min_t(unsigned int, size, tbl->size >> 1);
Thomas Graf97defe12015-01-02 23:00:20 +010078
79 if (sizeof(spinlock_t) != 0) {
80#ifdef CONFIG_NUMA
Herbert Xub9ecfda2015-03-24 00:50:27 +110081 if (size * sizeof(spinlock_t) > PAGE_SIZE &&
82 gfp == GFP_KERNEL)
Thomas Graf97defe12015-01-02 23:00:20 +010083 tbl->locks = vmalloc(size * sizeof(spinlock_t));
84 else
85#endif
86 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
Herbert Xub9ecfda2015-03-24 00:50:27 +110087 gfp);
Thomas Graf97defe12015-01-02 23:00:20 +010088 if (!tbl->locks)
89 return -ENOMEM;
90 for (i = 0; i < size; i++)
91 spin_lock_init(&tbl->locks[i]);
92 }
93 tbl->locks_mask = size - 1;
94
95 return 0;
96}
97
98static void bucket_table_free(const struct bucket_table *tbl)
99{
100 if (tbl)
101 kvfree(tbl->locks);
102
103 kvfree(tbl);
104}
105
Herbert Xu9d901bc2015-03-14 13:57:23 +1100106static void bucket_table_free_rcu(struct rcu_head *head)
107{
108 bucket_table_free(container_of(head, struct bucket_table, rcu));
109}
110
Thomas Graf97defe12015-01-02 23:00:20 +0100111static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
Herbert Xub9ecfda2015-03-24 00:50:27 +1100112 size_t nbuckets,
113 gfp_t gfp)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200114{
Daniel Borkmanneb6d1ab2015-02-20 00:53:38 +0100115 struct bucket_table *tbl = NULL;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200116 size_t size;
Thomas Graff89bd6f2015-01-02 23:00:21 +0100117 int i;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200118
119 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
Herbert Xub9ecfda2015-03-24 00:50:27 +1100120 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
121 gfp != GFP_KERNEL)
122 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
123 if (tbl == NULL && gfp == GFP_KERNEL)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200124 tbl = vzalloc(size);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200125 if (tbl == NULL)
126 return NULL;
127
128 tbl->size = nbuckets;
129
Herbert Xub9ecfda2015-03-24 00:50:27 +1100130 if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
Thomas Graf97defe12015-01-02 23:00:20 +0100131 bucket_table_free(tbl);
132 return NULL;
133 }
Thomas Graf7e1e7762014-08-02 11:47:44 +0200134
Herbert Xueddee5ba2015-03-14 13:57:20 +1100135 INIT_LIST_HEAD(&tbl->walkers);
136
Herbert Xu5269b532015-03-14 13:57:22 +1100137 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
138
Thomas Graff89bd6f2015-01-02 23:00:21 +0100139 for (i = 0; i < nbuckets; i++)
140 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
141
Thomas Graf97defe12015-01-02 23:00:20 +0100142 return tbl;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200143}
144
Herbert Xub8244782015-03-24 00:50:26 +1100145static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
146 struct bucket_table *tbl)
147{
148 struct bucket_table *new_tbl;
149
150 do {
151 new_tbl = tbl;
152 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
153 } while (tbl);
154
155 return new_tbl;
156}
157
Thomas Graf299e5c32015-03-24 14:18:17 +0100158static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
Thomas Grafa5ec68e2015-02-05 02:03:32 +0100159{
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100160 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
Herbert Xub8244782015-03-24 00:50:26 +1100161 struct bucket_table *new_tbl = rhashtable_last_table(ht,
162 rht_dereference_rcu(old_tbl->future_tbl, ht));
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100163 struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
164 int err = -ENOENT;
165 struct rhash_head *head, *next, *entry;
166 spinlock_t *new_bucket_lock;
Thomas Graf299e5c32015-03-24 14:18:17 +0100167 unsigned int new_hash;
Thomas Grafa5ec68e2015-02-05 02:03:32 +0100168
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100169 rht_for_each(entry, old_tbl, old_hash) {
170 err = 0;
171 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
Thomas Grafa5ec68e2015-02-05 02:03:32 +0100172
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100173 if (rht_is_a_nulls(next))
Thomas Graf7e1e7762014-08-02 11:47:44 +0200174 break;
Thomas Graf97defe12015-01-02 23:00:20 +0100175
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100176 pprev = &entry->next;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200177 }
178
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100179 if (err)
180 goto out;
Thomas Graf97defe12015-01-02 23:00:20 +0100181
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100182 new_hash = head_hashfn(ht, new_tbl, entry);
Thomas Grafa5ec68e2015-02-05 02:03:32 +0100183
Herbert Xu02fd97c2015-03-20 21:57:00 +1100184 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100185
Herbert Xu8f2484b2015-03-14 13:57:21 +1100186 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100187 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
188 new_tbl, new_hash);
189
Dmitriy Vyukov7def0f92015-09-22 10:51:52 +0200190 RCU_INIT_POINTER(entry->next, head);
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100191
192 rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
193 spin_unlock(new_bucket_lock);
194
195 rcu_assign_pointer(*pprev, next);
196
197out:
198 return err;
Thomas Graf97defe12015-01-02 23:00:20 +0100199}
200
Thomas Graf299e5c32015-03-24 14:18:17 +0100201static void rhashtable_rehash_chain(struct rhashtable *ht,
202 unsigned int old_hash)
Thomas Graf97defe12015-01-02 23:00:20 +0100203{
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100204 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
205 spinlock_t *old_bucket_lock;
Thomas Graf7cd10db2015-02-05 02:03:35 +0100206
Herbert Xu02fd97c2015-03-20 21:57:00 +1100207 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100208
209 spin_lock_bh(old_bucket_lock);
210 while (!rhashtable_rehash_one(ht, old_hash))
211 ;
Herbert Xu63d512d2015-03-14 13:57:24 +1100212 old_tbl->rehash++;
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100213 spin_unlock_bh(old_bucket_lock);
214}
215
Herbert Xub8244782015-03-24 00:50:26 +1100216static int rhashtable_rehash_attach(struct rhashtable *ht,
217 struct bucket_table *old_tbl,
218 struct bucket_table *new_tbl)
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100219{
Herbert Xub8244782015-03-24 00:50:26 +1100220 /* Protect future_tbl using the first bucket lock. */
221 spin_lock_bh(old_tbl->locks);
222
223 /* Did somebody beat us to it? */
224 if (rcu_access_pointer(old_tbl->future_tbl)) {
225 spin_unlock_bh(old_tbl->locks);
226 return -EEXIST;
227 }
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100228
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100229 /* Make insertions go into the new, empty table right away. Deletions
230 * and lookups will be attempted in both tables until we synchronize.
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100231 */
Herbert Xuc4db8842015-03-14 13:57:25 +1100232 rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100233
Herbert Xub8244782015-03-24 00:50:26 +1100234 spin_unlock_bh(old_tbl->locks);
235
236 return 0;
237}
238
239static int rhashtable_rehash_table(struct rhashtable *ht)
240{
241 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
242 struct bucket_table *new_tbl;
243 struct rhashtable_walker *walker;
Thomas Graf299e5c32015-03-24 14:18:17 +0100244 unsigned int old_hash;
Herbert Xub8244782015-03-24 00:50:26 +1100245
246 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
247 if (!new_tbl)
248 return 0;
249
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100250 for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
251 rhashtable_rehash_chain(ht, old_hash);
252
253 /* Publish the new table pointer. */
254 rcu_assign_pointer(ht->tbl, new_tbl);
255
Herbert Xuba7c95e2015-03-24 09:53:17 +1100256 spin_lock(&ht->lock);
Herbert Xueddee5ba2015-03-14 13:57:20 +1100257 list_for_each_entry(walker, &old_tbl->walkers, list)
258 walker->tbl = NULL;
Herbert Xuba7c95e2015-03-24 09:53:17 +1100259 spin_unlock(&ht->lock);
Herbert Xueddee5ba2015-03-14 13:57:20 +1100260
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100261 /* Wait for readers. All new readers will see the new
262 * table, and thus no references to the old table will
263 * remain.
264 */
Herbert Xu9d901bc2015-03-14 13:57:23 +1100265 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
Herbert Xub8244782015-03-24 00:50:26 +1100266
267 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200268}
269
270/**
271 * rhashtable_expand - Expand hash table while allowing concurrent lookups
272 * @ht: the hash table to expand
Thomas Graf7e1e7762014-08-02 11:47:44 +0200273 *
Herbert Xuaa34a6cb02015-03-11 09:43:48 +1100274 * A secondary bucket array is allocated and the hash entries are migrated.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200275 *
276 * This function may only be called in a context where it is safe to call
277 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
278 *
Thomas Graf97defe12015-01-02 23:00:20 +0100279 * The caller must ensure that no concurrent resizing occurs by holding
280 * ht->mutex.
281 *
282 * It is valid to have concurrent insertions and deletions protected by per
283 * bucket locks or concurrent RCU protected lookups and traversals.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200284 */
Herbert Xub8244782015-03-24 00:50:26 +1100285static int rhashtable_expand(struct rhashtable *ht)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200286{
287 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
Herbert Xub8244782015-03-24 00:50:26 +1100288 int err;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200289
290 ASSERT_RHT_MUTEX(ht);
291
Herbert Xub8244782015-03-24 00:50:26 +1100292 old_tbl = rhashtable_last_table(ht, old_tbl);
293
Herbert Xub9ecfda2015-03-24 00:50:27 +1100294 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200295 if (new_tbl == NULL)
296 return -ENOMEM;
297
Herbert Xub8244782015-03-24 00:50:26 +1100298 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
299 if (err)
300 bucket_table_free(new_tbl);
301
302 return err;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200303}
Thomas Graf7e1e7762014-08-02 11:47:44 +0200304
305/**
306 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
307 * @ht: the hash table to shrink
Thomas Graf7e1e7762014-08-02 11:47:44 +0200308 *
Herbert Xu18093d12015-03-24 00:50:25 +1100309 * This function shrinks the hash table to fit, i.e., the smallest
310 * size would not cause it to expand right away automatically.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200311 *
Thomas Graf97defe12015-01-02 23:00:20 +0100312 * The caller must ensure that no concurrent resizing occurs by holding
313 * ht->mutex.
314 *
Thomas Graf7e1e7762014-08-02 11:47:44 +0200315 * The caller must ensure that no concurrent table mutations take place.
316 * It is however valid to have concurrent lookups if they are RCU protected.
Thomas Graf97defe12015-01-02 23:00:20 +0100317 *
318 * It is valid to have concurrent insertions and deletions protected by per
319 * bucket locks or concurrent RCU protected lookups and traversals.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200320 */
Herbert Xub8244782015-03-24 00:50:26 +1100321static int rhashtable_shrink(struct rhashtable *ht)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200322{
Daniel Borkmanna5b68462015-03-12 15:28:40 +0100323 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
Thomas Graf299e5c32015-03-24 14:18:17 +0100324 unsigned int size;
Herbert Xub8244782015-03-24 00:50:26 +1100325 int err;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200326
327 ASSERT_RHT_MUTEX(ht);
328
Thomas Graf299e5c32015-03-24 14:18:17 +0100329 size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
Herbert Xu18093d12015-03-24 00:50:25 +1100330 if (size < ht->p.min_size)
331 size = ht->p.min_size;
332
333 if (old_tbl->size <= size)
334 return 0;
335
Herbert Xub8244782015-03-24 00:50:26 +1100336 if (rht_dereference(old_tbl->future_tbl, ht))
337 return -EEXIST;
338
Herbert Xub9ecfda2015-03-24 00:50:27 +1100339 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
Thomas Graf97defe12015-01-02 23:00:20 +0100340 if (new_tbl == NULL)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200341 return -ENOMEM;
342
Herbert Xub8244782015-03-24 00:50:26 +1100343 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
344 if (err)
345 bucket_table_free(new_tbl);
346
347 return err;
Thomas Graf7e1e7762014-08-02 11:47:44 +0200348}
Thomas Graf7e1e7762014-08-02 11:47:44 +0200349
Thomas Graf97defe12015-01-02 23:00:20 +0100350static void rht_deferred_worker(struct work_struct *work)
351{
352 struct rhashtable *ht;
353 struct bucket_table *tbl;
Herbert Xub8244782015-03-24 00:50:26 +1100354 int err = 0;
Thomas Graf97defe12015-01-02 23:00:20 +0100355
Ying Xue57699a42015-01-16 11:13:09 +0800356 ht = container_of(work, struct rhashtable, run_work);
Thomas Graf97defe12015-01-02 23:00:20 +0100357 mutex_lock(&ht->mutex);
Herbert Xu28134a52015-02-04 07:33:22 +1100358
Thomas Graf97defe12015-01-02 23:00:20 +0100359 tbl = rht_dereference(ht->tbl, ht);
Herbert Xub8244782015-03-24 00:50:26 +1100360 tbl = rhashtable_last_table(ht, tbl);
Thomas Graf97defe12015-01-02 23:00:20 +0100361
Daniel Borkmanna5b68462015-03-12 15:28:40 +0100362 if (rht_grow_above_75(ht, tbl))
Thomas Graf97defe12015-01-02 23:00:20 +0100363 rhashtable_expand(ht);
Thomas Grafb5e2c152015-03-24 20:42:19 +0000364 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
Thomas Graf97defe12015-01-02 23:00:20 +0100365 rhashtable_shrink(ht);
Herbert Xub8244782015-03-24 00:50:26 +1100366
367 err = rhashtable_rehash_table(ht);
368
Thomas Graf97defe12015-01-02 23:00:20 +0100369 mutex_unlock(&ht->mutex);
Herbert Xub8244782015-03-24 00:50:26 +1100370
371 if (err)
372 schedule_work(&ht->run_work);
Thomas Graf97defe12015-01-02 23:00:20 +0100373}
374
Herbert Xuccd57b12015-03-24 00:50:28 +1100375static bool rhashtable_check_elasticity(struct rhashtable *ht,
376 struct bucket_table *tbl,
Thomas Graf299e5c32015-03-24 14:18:17 +0100377 unsigned int hash)
Herbert Xuccd57b12015-03-24 00:50:28 +1100378{
Thomas Graf299e5c32015-03-24 14:18:17 +0100379 unsigned int elasticity = ht->elasticity;
Herbert Xuccd57b12015-03-24 00:50:28 +1100380 struct rhash_head *head;
381
382 rht_for_each(head, tbl, hash)
383 if (!--elasticity)
384 return true;
385
386 return false;
387}
388
389int rhashtable_insert_rehash(struct rhashtable *ht)
390{
391 struct bucket_table *old_tbl;
392 struct bucket_table *new_tbl;
393 struct bucket_table *tbl;
394 unsigned int size;
395 int err;
396
397 old_tbl = rht_dereference_rcu(ht->tbl, ht);
398 tbl = rhashtable_last_table(ht, old_tbl);
399
400 size = tbl->size;
401
402 if (rht_grow_above_75(ht, tbl))
403 size *= 2;
Thomas Grafa87b9eb2015-04-22 09:41:46 +0200404 /* Do not schedule more than one rehash */
405 else if (old_tbl != tbl)
Herbert Xuccd57b12015-03-24 00:50:28 +1100406 return -EBUSY;
407
408 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
Thomas Grafe2307ed2015-04-22 09:41:45 +0200409 if (new_tbl == NULL) {
410 /* Schedule async resize/rehash to try allocation
411 * non-atomic context.
412 */
413 schedule_work(&ht->run_work);
Herbert Xuccd57b12015-03-24 00:50:28 +1100414 return -ENOMEM;
Thomas Grafe2307ed2015-04-22 09:41:45 +0200415 }
Herbert Xuccd57b12015-03-24 00:50:28 +1100416
417 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
418 if (err) {
419 bucket_table_free(new_tbl);
420 if (err == -EEXIST)
421 err = 0;
422 } else
423 schedule_work(&ht->run_work);
424
425 return err;
426}
427EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
428
Herbert Xu02fd97c2015-03-20 21:57:00 +1100429int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
430 struct rhash_head *obj,
431 struct bucket_table *tbl)
432{
433 struct rhash_head *head;
Thomas Graf299e5c32015-03-24 14:18:17 +0100434 unsigned int hash;
Herbert Xuccd57b12015-03-24 00:50:28 +1100435 int err;
Herbert Xu02fd97c2015-03-20 21:57:00 +1100436
Herbert Xub8244782015-03-24 00:50:26 +1100437 tbl = rhashtable_last_table(ht, tbl);
Herbert Xu02fd97c2015-03-20 21:57:00 +1100438 hash = head_hashfn(ht, tbl, obj);
439 spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
440
Herbert Xuccd57b12015-03-24 00:50:28 +1100441 err = -EEXIST;
Herbert Xu02fd97c2015-03-20 21:57:00 +1100442 if (key && rhashtable_lookup_fast(ht, key, ht->p))
443 goto exit;
444
Herbert Xu07ee0722015-05-15 11:30:47 +0800445 err = -E2BIG;
446 if (unlikely(rht_grow_above_max(ht, tbl)))
447 goto exit;
448
Herbert Xuccd57b12015-03-24 00:50:28 +1100449 err = -EAGAIN;
450 if (rhashtable_check_elasticity(ht, tbl, hash) ||
451 rht_grow_above_100(ht, tbl))
452 goto exit;
453
Herbert Xu02fd97c2015-03-20 21:57:00 +1100454 err = 0;
455
456 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
457
458 RCU_INIT_POINTER(obj->next, head);
459
460 rcu_assign_pointer(tbl->buckets[hash], obj);
461
462 atomic_inc(&ht->nelems);
463
464exit:
465 spin_unlock(rht_bucket_lock(tbl, hash));
466
467 return err;
468}
469EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
470
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100471/**
472 * rhashtable_walk_init - Initialise an iterator
473 * @ht: Table to walk over
474 * @iter: Hash table Iterator
475 *
476 * This function prepares a hash table walk.
477 *
478 * Note that if you restart a walk after rhashtable_walk_stop you
479 * may see the same object twice. Also, you may miss objects if
480 * there are removals in between rhashtable_walk_stop and the next
481 * call to rhashtable_walk_start.
482 *
483 * For a completely stable walk you should construct your own data
484 * structure outside the hash table.
485 *
486 * This function may sleep so you must not call it from interrupt
487 * context or with spin locks held.
488 *
489 * You must call rhashtable_walk_exit if this function returns
490 * successfully.
491 */
492int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
493{
494 iter->ht = ht;
495 iter->p = NULL;
496 iter->slot = 0;
497 iter->skip = 0;
498
499 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
500 if (!iter->walker)
501 return -ENOMEM;
502
503 mutex_lock(&ht->mutex);
Herbert Xueddee5ba2015-03-14 13:57:20 +1100504 iter->walker->tbl = rht_dereference(ht->tbl, ht);
505 list_add(&iter->walker->list, &iter->walker->tbl->walkers);
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100506 mutex_unlock(&ht->mutex);
507
508 return 0;
509}
510EXPORT_SYMBOL_GPL(rhashtable_walk_init);
511
512/**
513 * rhashtable_walk_exit - Free an iterator
514 * @iter: Hash table Iterator
515 *
516 * This function frees resources allocated by rhashtable_walk_init.
517 */
518void rhashtable_walk_exit(struct rhashtable_iter *iter)
519{
520 mutex_lock(&iter->ht->mutex);
Herbert Xueddee5ba2015-03-14 13:57:20 +1100521 if (iter->walker->tbl)
522 list_del(&iter->walker->list);
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100523 mutex_unlock(&iter->ht->mutex);
524 kfree(iter->walker);
525}
526EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
527
528/**
529 * rhashtable_walk_start - Start a hash table walk
530 * @iter: Hash table iterator
531 *
532 * Start a hash table walk. Note that we take the RCU lock in all
533 * cases including when we return an error. So you must always call
534 * rhashtable_walk_stop to clean up.
535 *
536 * Returns zero if successful.
537 *
538 * Returns -EAGAIN if resize event occured. Note that the iterator
539 * will rewind back to the beginning and you may use it immediately
540 * by calling rhashtable_walk_next.
541 */
542int rhashtable_walk_start(struct rhashtable_iter *iter)
Thomas Grafdb4374f2015-03-16 10:42:27 +0100543 __acquires(RCU)
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100544{
Herbert Xueddee5ba2015-03-14 13:57:20 +1100545 struct rhashtable *ht = iter->ht;
546
547 mutex_lock(&ht->mutex);
548
549 if (iter->walker->tbl)
550 list_del(&iter->walker->list);
551
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100552 rcu_read_lock();
553
Herbert Xueddee5ba2015-03-14 13:57:20 +1100554 mutex_unlock(&ht->mutex);
555
556 if (!iter->walker->tbl) {
557 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100558 return -EAGAIN;
559 }
560
561 return 0;
562}
563EXPORT_SYMBOL_GPL(rhashtable_walk_start);
564
565/**
566 * rhashtable_walk_next - Return the next object and advance the iterator
567 * @iter: Hash table iterator
568 *
569 * Note that you must call rhashtable_walk_stop when you are finished
570 * with the walk.
571 *
572 * Returns the next object or NULL when the end of the table is reached.
573 *
574 * Returns -EAGAIN if resize event occured. Note that the iterator
575 * will rewind back to the beginning and you may continue to use it.
576 */
577void *rhashtable_walk_next(struct rhashtable_iter *iter)
578{
Herbert Xueddee5ba2015-03-14 13:57:20 +1100579 struct bucket_table *tbl = iter->walker->tbl;
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100580 struct rhashtable *ht = iter->ht;
581 struct rhash_head *p = iter->p;
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100582
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100583 if (p) {
584 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
585 goto next;
586 }
587
588 for (; iter->slot < tbl->size; iter->slot++) {
589 int skip = iter->skip;
590
591 rht_for_each_rcu(p, tbl, iter->slot) {
592 if (!skip)
593 break;
594 skip--;
595 }
596
597next:
598 if (!rht_is_a_nulls(p)) {
599 iter->skip++;
600 iter->p = p;
Thomas Grafc936a792015-05-05 02:22:53 +0200601 return rht_obj(ht, p);
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100602 }
603
604 iter->skip = 0;
605 }
606
Phil Sutter142b9422015-07-06 15:51:20 +0200607 iter->p = NULL;
608
Herbert Xud88252f2015-03-24 00:50:19 +1100609 /* Ensure we see any new tables. */
610 smp_rmb();
611
Herbert Xuc4db8842015-03-14 13:57:25 +1100612 iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
613 if (iter->walker->tbl) {
Herbert Xueddee5ba2015-03-14 13:57:20 +1100614 iter->slot = 0;
615 iter->skip = 0;
616 return ERR_PTR(-EAGAIN);
617 }
618
Thomas Grafc936a792015-05-05 02:22:53 +0200619 return NULL;
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100620}
621EXPORT_SYMBOL_GPL(rhashtable_walk_next);
622
623/**
624 * rhashtable_walk_stop - Finish a hash table walk
625 * @iter: Hash table iterator
626 *
627 * Finish a hash table walk.
628 */
629void rhashtable_walk_stop(struct rhashtable_iter *iter)
Thomas Grafdb4374f2015-03-16 10:42:27 +0100630 __releases(RCU)
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100631{
Herbert Xueddee5ba2015-03-14 13:57:20 +1100632 struct rhashtable *ht;
633 struct bucket_table *tbl = iter->walker->tbl;
634
Herbert Xueddee5ba2015-03-14 13:57:20 +1100635 if (!tbl)
Herbert Xu963ecbd2015-03-15 21:12:04 +1100636 goto out;
Herbert Xueddee5ba2015-03-14 13:57:20 +1100637
638 ht = iter->ht;
639
Herbert Xuba7c95e2015-03-24 09:53:17 +1100640 spin_lock(&ht->lock);
Herbert Xuc4db8842015-03-14 13:57:25 +1100641 if (tbl->rehash < tbl->size)
Herbert Xueddee5ba2015-03-14 13:57:20 +1100642 list_add(&iter->walker->list, &tbl->walkers);
643 else
644 iter->walker->tbl = NULL;
Herbert Xuba7c95e2015-03-24 09:53:17 +1100645 spin_unlock(&ht->lock);
Herbert Xueddee5ba2015-03-14 13:57:20 +1100646
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100647 iter->p = NULL;
Herbert Xu963ecbd2015-03-15 21:12:04 +1100648
649out:
650 rcu_read_unlock();
Herbert Xuf2dba9c2015-02-04 07:33:23 +1100651}
652EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
653
Herbert Xu488fb86e2015-03-20 21:56:59 +1100654static size_t rounded_hashtable_size(const struct rhashtable_params *params)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200655{
Ying Xue94000172014-09-03 09:22:36 +0800656 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
Herbert Xue2e21c12015-03-18 20:01:21 +1100657 (unsigned long)params->min_size);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200658}
659
Herbert Xu31ccde22015-03-24 00:50:21 +1100660static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
661{
662 return jhash2(key, length, seed);
663}
664
Thomas Graf7e1e7762014-08-02 11:47:44 +0200665/**
666 * rhashtable_init - initialize a new hash table
667 * @ht: hash table to be initialized
668 * @params: configuration parameters
669 *
670 * Initializes a new hash table based on the provided configuration
671 * parameters. A table can be configured either with a variable or
672 * fixed length key:
673 *
674 * Configuration Example 1: Fixed length keys
675 * struct test_obj {
676 * int key;
677 * void * my_member;
678 * struct rhash_head node;
679 * };
680 *
681 * struct rhashtable_params params = {
682 * .head_offset = offsetof(struct test_obj, node),
683 * .key_offset = offsetof(struct test_obj, key),
684 * .key_len = sizeof(int),
Daniel Borkmann87545892014-12-10 16:33:11 +0100685 * .hashfn = jhash,
Thomas Graff89bd6f2015-01-02 23:00:21 +0100686 * .nulls_base = (1U << RHT_BASE_SHIFT),
Thomas Graf7e1e7762014-08-02 11:47:44 +0200687 * };
688 *
689 * Configuration Example 2: Variable length keys
690 * struct test_obj {
691 * [...]
692 * struct rhash_head node;
693 * };
694 *
Patrick McHardy49f7b332015-03-25 13:07:45 +0000695 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200696 * {
697 * struct test_obj *obj = data;
698 *
699 * return [... hash ...];
700 * }
701 *
702 * struct rhashtable_params params = {
703 * .head_offset = offsetof(struct test_obj, node),
Daniel Borkmann87545892014-12-10 16:33:11 +0100704 * .hashfn = jhash,
Thomas Graf7e1e7762014-08-02 11:47:44 +0200705 * .obj_hashfn = my_hash_fn,
Thomas Graf7e1e7762014-08-02 11:47:44 +0200706 * };
707 */
Herbert Xu488fb86e2015-03-20 21:56:59 +1100708int rhashtable_init(struct rhashtable *ht,
709 const struct rhashtable_params *params)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200710{
711 struct bucket_table *tbl;
712 size_t size;
713
714 size = HASH_DEFAULT_SIZE;
715
Herbert Xu31ccde22015-03-24 00:50:21 +1100716 if ((!params->key_len && !params->obj_hashfn) ||
Herbert Xu02fd97c2015-03-20 21:57:00 +1100717 (params->obj_hashfn && !params->obj_cmpfn))
Thomas Graf7e1e7762014-08-02 11:47:44 +0200718 return -EINVAL;
719
Thomas Graff89bd6f2015-01-02 23:00:21 +0100720 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
721 return -EINVAL;
722
Thomas Graf7e1e7762014-08-02 11:47:44 +0200723 if (params->nelem_hint)
Ying Xue94000172014-09-03 09:22:36 +0800724 size = rounded_hashtable_size(params);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200725
Thomas Graf97defe12015-01-02 23:00:20 +0100726 memset(ht, 0, sizeof(*ht));
727 mutex_init(&ht->mutex);
Herbert Xuba7c95e2015-03-24 09:53:17 +1100728 spin_lock_init(&ht->lock);
Thomas Graf97defe12015-01-02 23:00:20 +0100729 memcpy(&ht->p, params, sizeof(*params));
730
Thomas Grafa998f712015-03-19 22:31:13 +0000731 if (params->min_size)
732 ht->p.min_size = roundup_pow_of_two(params->min_size);
733
734 if (params->max_size)
735 ht->p.max_size = rounddown_pow_of_two(params->max_size);
736
Herbert Xu07ee0722015-05-15 11:30:47 +0800737 if (params->insecure_max_entries)
738 ht->p.insecure_max_entries =
739 rounddown_pow_of_two(params->insecure_max_entries);
740 else
741 ht->p.insecure_max_entries = ht->p.max_size * 2;
742
Herbert Xu488fb86e2015-03-20 21:56:59 +1100743 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
Thomas Grafa998f712015-03-19 22:31:13 +0000744
Herbert Xu27ed44a2015-03-24 13:37:30 +1100745 /* The maximum (not average) chain length grows with the
746 * size of the hash table, at a rate of (log N)/(log log N).
747 * The value of 16 is selected so that even if the hash
748 * table grew to 2^32 you would not expect the maximum
749 * chain length to exceed it unless we are under attack
750 * (or extremely unlucky).
751 *
752 * As this limit is only to detect attacks, we don't need
753 * to set it to a lower value as you'd need the chain
754 * length to vastly exceed 16 to have any real effect
755 * on the system.
756 */
Herbert Xuccd57b12015-03-24 00:50:28 +1100757 if (!params->insecure_elasticity)
758 ht->elasticity = 16;
759
Thomas Graf97defe12015-01-02 23:00:20 +0100760 if (params->locks_mul)
761 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
762 else
763 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
764
Herbert Xu31ccde22015-03-24 00:50:21 +1100765 ht->key_len = ht->p.key_len;
766 if (!params->hashfn) {
767 ht->p.hashfn = jhash;
768
769 if (!(ht->key_len & (sizeof(u32) - 1))) {
770 ht->key_len /= sizeof(u32);
771 ht->p.hashfn = rhashtable_jhash2;
772 }
773 }
774
Herbert Xub9ecfda2015-03-24 00:50:27 +1100775 tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200776 if (tbl == NULL)
777 return -ENOMEM;
778
Ying Xue545a1482015-01-07 13:41:57 +0800779 atomic_set(&ht->nelems, 0);
Daniel Borkmanna5b68462015-03-12 15:28:40 +0100780
Thomas Graf7e1e7762014-08-02 11:47:44 +0200781 RCU_INIT_POINTER(ht->tbl, tbl);
782
Daniel Borkmann4c4b52d2015-02-25 16:31:54 +0100783 INIT_WORK(&ht->run_work, rht_deferred_worker);
Thomas Graf97defe12015-01-02 23:00:20 +0100784
Thomas Graf7e1e7762014-08-02 11:47:44 +0200785 return 0;
786}
787EXPORT_SYMBOL_GPL(rhashtable_init);
788
789/**
Thomas Graf6b6f3022015-03-24 14:18:20 +0100790 * rhashtable_free_and_destroy - free elements and destroy hash table
Thomas Graf7e1e7762014-08-02 11:47:44 +0200791 * @ht: the hash table to destroy
Thomas Graf6b6f3022015-03-24 14:18:20 +0100792 * @free_fn: callback to release resources of element
793 * @arg: pointer passed to free_fn
Thomas Graf7e1e7762014-08-02 11:47:44 +0200794 *
Thomas Graf6b6f3022015-03-24 14:18:20 +0100795 * Stops an eventual async resize. If defined, invokes free_fn for each
796 * element to releasal resources. Please note that RCU protected
797 * readers may still be accessing the elements. Releasing of resources
798 * must occur in a compatible manner. Then frees the bucket array.
799 *
800 * This function will eventually sleep to wait for an async resize
801 * to complete. The caller is responsible that no further write operations
802 * occurs in parallel.
Thomas Graf7e1e7762014-08-02 11:47:44 +0200803 */
Thomas Graf6b6f3022015-03-24 14:18:20 +0100804void rhashtable_free_and_destroy(struct rhashtable *ht,
805 void (*free_fn)(void *ptr, void *arg),
806 void *arg)
Thomas Graf7e1e7762014-08-02 11:47:44 +0200807{
Thomas Graf6b6f3022015-03-24 14:18:20 +0100808 const struct bucket_table *tbl;
809 unsigned int i;
Thomas Graf97defe12015-01-02 23:00:20 +0100810
Daniel Borkmann4c4b52d2015-02-25 16:31:54 +0100811 cancel_work_sync(&ht->run_work);
Ying Xue57699a42015-01-16 11:13:09 +0800812
Thomas Graf97defe12015-01-02 23:00:20 +0100813 mutex_lock(&ht->mutex);
Thomas Graf6b6f3022015-03-24 14:18:20 +0100814 tbl = rht_dereference(ht->tbl, ht);
815 if (free_fn) {
816 for (i = 0; i < tbl->size; i++) {
817 struct rhash_head *pos, *next;
818
819 for (pos = rht_dereference(tbl->buckets[i], ht),
820 next = !rht_is_a_nulls(pos) ?
821 rht_dereference(pos->next, ht) : NULL;
822 !rht_is_a_nulls(pos);
823 pos = next,
824 next = !rht_is_a_nulls(pos) ?
825 rht_dereference(pos->next, ht) : NULL)
826 free_fn(rht_obj(ht, pos), arg);
827 }
828 }
829
830 bucket_table_free(tbl);
Thomas Graf97defe12015-01-02 23:00:20 +0100831 mutex_unlock(&ht->mutex);
Thomas Graf7e1e7762014-08-02 11:47:44 +0200832}
Thomas Graf6b6f3022015-03-24 14:18:20 +0100833EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
834
835void rhashtable_destroy(struct rhashtable *ht)
836{
837 return rhashtable_free_and_destroy(ht, NULL, NULL);
838}
Thomas Graf7e1e7762014-08-02 11:47:44 +0200839EXPORT_SYMBOL_GPL(rhashtable_destroy);