Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Resizable, Scalable, Concurrent Hash Table |
| 3 | * |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 4 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 5 | * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 6 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> |
| 7 | * |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 8 | * Code partially derived from nft_hash |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 9 | * Rewritten with rehash code from br_multicast plus single list |
| 10 | * pointer as suggested by Josh Triplett |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of the GNU General Public License version 2 as |
| 14 | * published by the Free Software Foundation. |
| 15 | */ |
| 16 | |
Herbert Xu | 07ee072 | 2015-05-15 11:30:47 +0800 | [diff] [blame] | 17 | #include <linux/atomic.h> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 18 | #include <linux/kernel.h> |
| 19 | #include <linux/init.h> |
| 20 | #include <linux/log2.h> |
Eric Dumazet | 5beb5c9 | 2015-02-26 07:20:34 -0800 | [diff] [blame] | 21 | #include <linux/sched.h> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 22 | #include <linux/slab.h> |
| 23 | #include <linux/vmalloc.h> |
| 24 | #include <linux/mm.h> |
Daniel Borkmann | 8754589 | 2014-12-10 16:33:11 +0100 | [diff] [blame] | 25 | #include <linux/jhash.h> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 26 | #include <linux/random.h> |
| 27 | #include <linux/rhashtable.h> |
Stephen Rothwell | 61d7b09 | 2015-02-09 14:04:03 +1100 | [diff] [blame] | 28 | #include <linux/err.h> |
Hauke Mehrtens | 6d79541 | 2015-06-06 22:07:23 +0200 | [diff] [blame] | 29 | #include <linux/export.h> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 30 | |
| 31 | #define HASH_DEFAULT_SIZE 64UL |
Herbert Xu | c2e213c | 2015-03-18 20:01:16 +1100 | [diff] [blame] | 32 | #define HASH_MIN_SIZE 4U |
Florian Westphal | 4cf0b35 | 2016-08-12 12:03:52 +0200 | [diff] [blame] | 33 | #define BUCKET_LOCKS_PER_CPU 32UL |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 34 | |
Herbert Xu | 988dfbd | 2015-03-10 09:27:55 +1100 | [diff] [blame] | 35 | static u32 head_hashfn(struct rhashtable *ht, |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 36 | const struct bucket_table *tbl, |
| 37 | const struct rhash_head *he) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 38 | { |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 39 | return rht_head_hashfn(ht, tbl, he, ht->p); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 40 | } |
| 41 | |
Thomas Graf | a03eaec | 2015-02-05 02:03:34 +0100 | [diff] [blame] | 42 | #ifdef CONFIG_PROVE_LOCKING |
Thomas Graf | a03eaec | 2015-02-05 02:03:34 +0100 | [diff] [blame] | 43 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
Thomas Graf | a03eaec | 2015-02-05 02:03:34 +0100 | [diff] [blame] | 44 | |
| 45 | int lockdep_rht_mutex_is_held(struct rhashtable *ht) |
| 46 | { |
| 47 | return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; |
| 48 | } |
| 49 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); |
| 50 | |
| 51 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) |
| 52 | { |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 53 | spinlock_t *lock = rht_bucket_lock(tbl, hash); |
Thomas Graf | a03eaec | 2015-02-05 02:03:34 +0100 | [diff] [blame] | 54 | |
| 55 | return (debug_locks) ? lockdep_is_held(lock) : 1; |
| 56 | } |
| 57 | EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); |
| 58 | #else |
| 59 | #define ASSERT_RHT_MUTEX(HT) |
Thomas Graf | a03eaec | 2015-02-05 02:03:34 +0100 | [diff] [blame] | 60 | #endif |
| 61 | |
| 62 | |
Herbert Xu | b9ecfda | 2015-03-24 00:50:27 +1100 | [diff] [blame] | 63 | static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, |
| 64 | gfp_t gfp) |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 65 | { |
| 66 | unsigned int i, size; |
| 67 | #if defined(CONFIG_PROVE_LOCKING) |
| 68 | unsigned int nr_pcpus = 2; |
| 69 | #else |
| 70 | unsigned int nr_pcpus = num_possible_cpus(); |
| 71 | #endif |
| 72 | |
Florian Westphal | 4cf0b35 | 2016-08-12 12:03:52 +0200 | [diff] [blame] | 73 | nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 74 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); |
| 75 | |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 76 | /* Never allocate more than 0.5 locks per bucket */ |
| 77 | size = min_t(unsigned int, size, tbl->size >> 1); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 78 | |
| 79 | if (sizeof(spinlock_t) != 0) { |
Eric Dumazet | 9dbeea7 | 2016-08-26 08:51:39 -0700 | [diff] [blame] | 80 | tbl->locks = NULL; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 81 | #ifdef CONFIG_NUMA |
Herbert Xu | b9ecfda | 2015-03-24 00:50:27 +1100 | [diff] [blame] | 82 | if (size * sizeof(spinlock_t) > PAGE_SIZE && |
| 83 | gfp == GFP_KERNEL) |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 84 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 85 | #endif |
Florian Westphal | 4cf0b35 | 2016-08-12 12:03:52 +0200 | [diff] [blame] | 86 | if (gfp != GFP_KERNEL) |
| 87 | gfp |= __GFP_NOWARN | __GFP_NORETRY; |
| 88 | |
Eric Dumazet | 9dbeea7 | 2016-08-26 08:51:39 -0700 | [diff] [blame] | 89 | if (!tbl->locks) |
| 90 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), |
| 91 | gfp); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 92 | if (!tbl->locks) |
| 93 | return -ENOMEM; |
| 94 | for (i = 0; i < size; i++) |
| 95 | spin_lock_init(&tbl->locks[i]); |
| 96 | } |
| 97 | tbl->locks_mask = size - 1; |
| 98 | |
| 99 | return 0; |
| 100 | } |
| 101 | |
| 102 | static void bucket_table_free(const struct bucket_table *tbl) |
| 103 | { |
| 104 | if (tbl) |
| 105 | kvfree(tbl->locks); |
| 106 | |
| 107 | kvfree(tbl); |
| 108 | } |
| 109 | |
Herbert Xu | 9d901bc | 2015-03-14 13:57:23 +1100 | [diff] [blame] | 110 | static void bucket_table_free_rcu(struct rcu_head *head) |
| 111 | { |
| 112 | bucket_table_free(container_of(head, struct bucket_table, rcu)); |
| 113 | } |
| 114 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 115 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
Herbert Xu | b9ecfda | 2015-03-24 00:50:27 +1100 | [diff] [blame] | 116 | size_t nbuckets, |
| 117 | gfp_t gfp) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 118 | { |
Daniel Borkmann | eb6d1ab | 2015-02-20 00:53:38 +0100 | [diff] [blame] | 119 | struct bucket_table *tbl = NULL; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 120 | size_t size; |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 121 | int i; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 122 | |
| 123 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
Herbert Xu | b9ecfda | 2015-03-24 00:50:27 +1100 | [diff] [blame] | 124 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) || |
| 125 | gfp != GFP_KERNEL) |
| 126 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); |
| 127 | if (tbl == NULL && gfp == GFP_KERNEL) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 128 | tbl = vzalloc(size); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 129 | if (tbl == NULL) |
| 130 | return NULL; |
| 131 | |
| 132 | tbl->size = nbuckets; |
| 133 | |
Herbert Xu | b9ecfda | 2015-03-24 00:50:27 +1100 | [diff] [blame] | 134 | if (alloc_bucket_locks(ht, tbl, gfp) < 0) { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 135 | bucket_table_free(tbl); |
| 136 | return NULL; |
| 137 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 138 | |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 139 | INIT_LIST_HEAD(&tbl->walkers); |
| 140 | |
Herbert Xu | 5269b53 | 2015-03-14 13:57:22 +1100 | [diff] [blame] | 141 | get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); |
| 142 | |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 143 | for (i = 0; i < nbuckets; i++) |
| 144 | INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); |
| 145 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 146 | return tbl; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 147 | } |
| 148 | |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 149 | static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, |
| 150 | struct bucket_table *tbl) |
| 151 | { |
| 152 | struct bucket_table *new_tbl; |
| 153 | |
| 154 | do { |
| 155 | new_tbl = tbl; |
| 156 | tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
| 157 | } while (tbl); |
| 158 | |
| 159 | return new_tbl; |
| 160 | } |
| 161 | |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 162 | static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 163 | { |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 164 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 165 | struct bucket_table *new_tbl = rhashtable_last_table(ht, |
| 166 | rht_dereference_rcu(old_tbl->future_tbl, ht)); |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 167 | struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; |
| 168 | int err = -ENOENT; |
| 169 | struct rhash_head *head, *next, *entry; |
| 170 | spinlock_t *new_bucket_lock; |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 171 | unsigned int new_hash; |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 172 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 173 | rht_for_each(entry, old_tbl, old_hash) { |
| 174 | err = 0; |
| 175 | next = rht_dereference_bucket(entry->next, old_tbl, old_hash); |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 176 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 177 | if (rht_is_a_nulls(next)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 178 | break; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 179 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 180 | pprev = &entry->next; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 181 | } |
| 182 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 183 | if (err) |
| 184 | goto out; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 185 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 186 | new_hash = head_hashfn(ht, new_tbl, entry); |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 187 | |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 188 | new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 189 | |
Herbert Xu | 8f2484b | 2015-03-14 13:57:21 +1100 | [diff] [blame] | 190 | spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 191 | head = rht_dereference_bucket(new_tbl->buckets[new_hash], |
| 192 | new_tbl, new_hash); |
| 193 | |
Dmitriy Vyukov | 7def0f9 | 2015-09-22 10:51:52 +0200 | [diff] [blame] | 194 | RCU_INIT_POINTER(entry->next, head); |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 195 | |
| 196 | rcu_assign_pointer(new_tbl->buckets[new_hash], entry); |
| 197 | spin_unlock(new_bucket_lock); |
| 198 | |
| 199 | rcu_assign_pointer(*pprev, next); |
| 200 | |
| 201 | out: |
| 202 | return err; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 203 | } |
| 204 | |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 205 | static void rhashtable_rehash_chain(struct rhashtable *ht, |
| 206 | unsigned int old_hash) |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 207 | { |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 208 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
| 209 | spinlock_t *old_bucket_lock; |
Thomas Graf | 7cd10db | 2015-02-05 02:03:35 +0100 | [diff] [blame] | 210 | |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 211 | old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 212 | |
| 213 | spin_lock_bh(old_bucket_lock); |
| 214 | while (!rhashtable_rehash_one(ht, old_hash)) |
| 215 | ; |
Herbert Xu | 63d512d | 2015-03-14 13:57:24 +1100 | [diff] [blame] | 216 | old_tbl->rehash++; |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 217 | spin_unlock_bh(old_bucket_lock); |
| 218 | } |
| 219 | |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 220 | static int rhashtable_rehash_attach(struct rhashtable *ht, |
| 221 | struct bucket_table *old_tbl, |
| 222 | struct bucket_table *new_tbl) |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 223 | { |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 224 | /* Protect future_tbl using the first bucket lock. */ |
| 225 | spin_lock_bh(old_tbl->locks); |
| 226 | |
| 227 | /* Did somebody beat us to it? */ |
| 228 | if (rcu_access_pointer(old_tbl->future_tbl)) { |
| 229 | spin_unlock_bh(old_tbl->locks); |
| 230 | return -EEXIST; |
| 231 | } |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 232 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 233 | /* Make insertions go into the new, empty table right away. Deletions |
| 234 | * and lookups will be attempted in both tables until we synchronize. |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 235 | */ |
Herbert Xu | c4db884 | 2015-03-14 13:57:25 +1100 | [diff] [blame] | 236 | rcu_assign_pointer(old_tbl->future_tbl, new_tbl); |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 237 | |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 238 | spin_unlock_bh(old_tbl->locks); |
| 239 | |
| 240 | return 0; |
| 241 | } |
| 242 | |
| 243 | static int rhashtable_rehash_table(struct rhashtable *ht) |
| 244 | { |
| 245 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
| 246 | struct bucket_table *new_tbl; |
| 247 | struct rhashtable_walker *walker; |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 248 | unsigned int old_hash; |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 249 | |
| 250 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); |
| 251 | if (!new_tbl) |
| 252 | return 0; |
| 253 | |
Eric Dumazet | fb19348 | 2018-10-10 12:29:55 -0700 | [diff] [blame] | 254 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 255 | rhashtable_rehash_chain(ht, old_hash); |
Eric Dumazet | fb19348 | 2018-10-10 12:29:55 -0700 | [diff] [blame] | 256 | cond_resched(); |
| 257 | } |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 258 | |
| 259 | /* Publish the new table pointer. */ |
| 260 | rcu_assign_pointer(ht->tbl, new_tbl); |
| 261 | |
Herbert Xu | ba7c95e | 2015-03-24 09:53:17 +1100 | [diff] [blame] | 262 | spin_lock(&ht->lock); |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 263 | list_for_each_entry(walker, &old_tbl->walkers, list) |
| 264 | walker->tbl = NULL; |
Herbert Xu | ba7c95e | 2015-03-24 09:53:17 +1100 | [diff] [blame] | 265 | spin_unlock(&ht->lock); |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 266 | |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 267 | /* Wait for readers. All new readers will see the new |
| 268 | * table, and thus no references to the old table will |
| 269 | * remain. |
| 270 | */ |
Herbert Xu | 9d901bc | 2015-03-14 13:57:23 +1100 | [diff] [blame] | 271 | call_rcu(&old_tbl->rcu, bucket_table_free_rcu); |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 272 | |
| 273 | return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 274 | } |
| 275 | |
| 276 | /** |
| 277 | * rhashtable_expand - Expand hash table while allowing concurrent lookups |
| 278 | * @ht: the hash table to expand |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 279 | * |
Herbert Xu | aa34a6cb0 | 2015-03-11 09:43:48 +1100 | [diff] [blame] | 280 | * A secondary bucket array is allocated and the hash entries are migrated. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 281 | * |
| 282 | * This function may only be called in a context where it is safe to call |
| 283 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. |
| 284 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 285 | * The caller must ensure that no concurrent resizing occurs by holding |
| 286 | * ht->mutex. |
| 287 | * |
| 288 | * It is valid to have concurrent insertions and deletions protected by per |
| 289 | * bucket locks or concurrent RCU protected lookups and traversals. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 290 | */ |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 291 | static int rhashtable_expand(struct rhashtable *ht) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 292 | { |
| 293 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 294 | int err; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 295 | |
| 296 | ASSERT_RHT_MUTEX(ht); |
| 297 | |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 298 | old_tbl = rhashtable_last_table(ht, old_tbl); |
| 299 | |
Herbert Xu | b9ecfda | 2015-03-24 00:50:27 +1100 | [diff] [blame] | 300 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 301 | if (new_tbl == NULL) |
| 302 | return -ENOMEM; |
| 303 | |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 304 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); |
| 305 | if (err) |
| 306 | bucket_table_free(new_tbl); |
| 307 | |
| 308 | return err; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 309 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 310 | |
| 311 | /** |
| 312 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups |
| 313 | * @ht: the hash table to shrink |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 314 | * |
Herbert Xu | 18093d1 | 2015-03-24 00:50:25 +1100 | [diff] [blame] | 315 | * This function shrinks the hash table to fit, i.e., the smallest |
| 316 | * size would not cause it to expand right away automatically. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 317 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 318 | * The caller must ensure that no concurrent resizing occurs by holding |
| 319 | * ht->mutex. |
| 320 | * |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 321 | * The caller must ensure that no concurrent table mutations take place. |
| 322 | * It is however valid to have concurrent lookups if they are RCU protected. |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 323 | * |
| 324 | * It is valid to have concurrent insertions and deletions protected by per |
| 325 | * bucket locks or concurrent RCU protected lookups and traversals. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 326 | */ |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 327 | static int rhashtable_shrink(struct rhashtable *ht) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 328 | { |
Daniel Borkmann | a5b6846 | 2015-03-12 15:28:40 +0100 | [diff] [blame] | 329 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
Vegard Nossum | 1231195 | 2016-08-12 20:10:44 +0200 | [diff] [blame] | 330 | unsigned int nelems = atomic_read(&ht->nelems); |
| 331 | unsigned int size = 0; |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 332 | int err; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 333 | |
| 334 | ASSERT_RHT_MUTEX(ht); |
| 335 | |
Vegard Nossum | 1231195 | 2016-08-12 20:10:44 +0200 | [diff] [blame] | 336 | if (nelems) |
| 337 | size = roundup_pow_of_two(nelems * 3 / 2); |
Herbert Xu | 18093d1 | 2015-03-24 00:50:25 +1100 | [diff] [blame] | 338 | if (size < ht->p.min_size) |
| 339 | size = ht->p.min_size; |
| 340 | |
| 341 | if (old_tbl->size <= size) |
| 342 | return 0; |
| 343 | |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 344 | if (rht_dereference(old_tbl->future_tbl, ht)) |
| 345 | return -EEXIST; |
| 346 | |
Herbert Xu | b9ecfda | 2015-03-24 00:50:27 +1100 | [diff] [blame] | 347 | new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 348 | if (new_tbl == NULL) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 349 | return -ENOMEM; |
| 350 | |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 351 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); |
| 352 | if (err) |
| 353 | bucket_table_free(new_tbl); |
| 354 | |
| 355 | return err; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 356 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 357 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 358 | static void rht_deferred_worker(struct work_struct *work) |
| 359 | { |
| 360 | struct rhashtable *ht; |
| 361 | struct bucket_table *tbl; |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 362 | int err = 0; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 363 | |
Ying Xue | 57699a4 | 2015-01-16 11:13:09 +0800 | [diff] [blame] | 364 | ht = container_of(work, struct rhashtable, run_work); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 365 | mutex_lock(&ht->mutex); |
Herbert Xu | 28134a5 | 2015-02-04 07:33:22 +1100 | [diff] [blame] | 366 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 367 | tbl = rht_dereference(ht->tbl, ht); |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 368 | tbl = rhashtable_last_table(ht, tbl); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 369 | |
Daniel Borkmann | a5b6846 | 2015-03-12 15:28:40 +0100 | [diff] [blame] | 370 | if (rht_grow_above_75(ht, tbl)) |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 371 | rhashtable_expand(ht); |
Thomas Graf | b5e2c15 | 2015-03-24 20:42:19 +0000 | [diff] [blame] | 372 | else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 373 | rhashtable_shrink(ht); |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 374 | |
| 375 | err = rhashtable_rehash_table(ht); |
| 376 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 377 | mutex_unlock(&ht->mutex); |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 378 | |
| 379 | if (err) |
| 380 | schedule_work(&ht->run_work); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 381 | } |
| 382 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 383 | static int rhashtable_insert_rehash(struct rhashtable *ht, |
| 384 | struct bucket_table *tbl) |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 385 | { |
| 386 | struct bucket_table *old_tbl; |
| 387 | struct bucket_table *new_tbl; |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 388 | unsigned int size; |
| 389 | int err; |
| 390 | |
| 391 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 392 | |
| 393 | size = tbl->size; |
| 394 | |
Herbert Xu | 3cf9222 | 2015-12-03 20:41:29 +0800 | [diff] [blame] | 395 | err = -EBUSY; |
| 396 | |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 397 | if (rht_grow_above_75(ht, tbl)) |
| 398 | size *= 2; |
Thomas Graf | a87b9eb | 2015-04-22 09:41:46 +0200 | [diff] [blame] | 399 | /* Do not schedule more than one rehash */ |
| 400 | else if (old_tbl != tbl) |
Herbert Xu | 3cf9222 | 2015-12-03 20:41:29 +0800 | [diff] [blame] | 401 | goto fail; |
| 402 | |
| 403 | err = -ENOMEM; |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 404 | |
| 405 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); |
Herbert Xu | 3cf9222 | 2015-12-03 20:41:29 +0800 | [diff] [blame] | 406 | if (new_tbl == NULL) |
| 407 | goto fail; |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 408 | |
| 409 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); |
| 410 | if (err) { |
| 411 | bucket_table_free(new_tbl); |
| 412 | if (err == -EEXIST) |
| 413 | err = 0; |
| 414 | } else |
| 415 | schedule_work(&ht->run_work); |
| 416 | |
| 417 | return err; |
Herbert Xu | 3cf9222 | 2015-12-03 20:41:29 +0800 | [diff] [blame] | 418 | |
| 419 | fail: |
| 420 | /* Do not fail the insert if someone else did a rehash. */ |
| 421 | if (likely(rcu_dereference_raw(tbl->future_tbl))) |
| 422 | return 0; |
| 423 | |
| 424 | /* Schedule async rehash to retry allocation in process context. */ |
| 425 | if (err == -ENOMEM) |
| 426 | schedule_work(&ht->run_work); |
| 427 | |
| 428 | return err; |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 429 | } |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 430 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 431 | static void *rhashtable_lookup_one(struct rhashtable *ht, |
| 432 | struct bucket_table *tbl, unsigned int hash, |
| 433 | const void *key, struct rhash_head *obj) |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 434 | { |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 435 | struct rhashtable_compare_arg arg = { |
| 436 | .ht = ht, |
| 437 | .key = key, |
| 438 | }; |
| 439 | struct rhash_head __rcu **pprev; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 440 | struct rhash_head *head; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 441 | int elasticity; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 442 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 443 | elasticity = ht->elasticity; |
| 444 | pprev = &tbl->buckets[hash]; |
| 445 | rht_for_each(head, tbl, hash) { |
| 446 | struct rhlist_head *list; |
| 447 | struct rhlist_head *plist; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 448 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 449 | elasticity--; |
| 450 | if (!key || |
| 451 | (ht->p.obj_cmpfn ? |
| 452 | ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : |
Paul Blakey | ad62170 | 2018-03-04 17:29:48 +0200 | [diff] [blame] | 453 | rhashtable_compare(&arg, rht_obj(ht, head)))) { |
| 454 | pprev = &head->next; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 455 | continue; |
Paul Blakey | ad62170 | 2018-03-04 17:29:48 +0200 | [diff] [blame] | 456 | } |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 457 | |
| 458 | if (!ht->rhlist) |
| 459 | return rht_obj(ht, head); |
| 460 | |
| 461 | list = container_of(obj, struct rhlist_head, rhead); |
| 462 | plist = container_of(head, struct rhlist_head, rhead); |
| 463 | |
| 464 | RCU_INIT_POINTER(list->next, plist); |
| 465 | head = rht_dereference_bucket(head->next, tbl, hash); |
| 466 | RCU_INIT_POINTER(list->rhead.next, head); |
| 467 | rcu_assign_pointer(*pprev, obj); |
| 468 | |
| 469 | return NULL; |
Pablo Neira Ayuso | 5ca8cc5 | 2016-08-24 12:31:31 +0200 | [diff] [blame] | 470 | } |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 471 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 472 | if (elasticity <= 0) |
| 473 | return ERR_PTR(-EAGAIN); |
| 474 | |
| 475 | return ERR_PTR(-ENOENT); |
| 476 | } |
| 477 | |
| 478 | static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, |
| 479 | struct bucket_table *tbl, |
| 480 | unsigned int hash, |
| 481 | struct rhash_head *obj, |
| 482 | void *data) |
| 483 | { |
| 484 | struct bucket_table *new_tbl; |
| 485 | struct rhash_head *head; |
| 486 | |
| 487 | if (!IS_ERR_OR_NULL(data)) |
| 488 | return ERR_PTR(-EEXIST); |
| 489 | |
| 490 | if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) |
| 491 | return ERR_CAST(data); |
| 492 | |
| 493 | new_tbl = rcu_dereference(tbl->future_tbl); |
| 494 | if (new_tbl) |
| 495 | return new_tbl; |
| 496 | |
| 497 | if (PTR_ERR(data) != -ENOENT) |
| 498 | return ERR_CAST(data); |
| 499 | |
Herbert Xu | 07ee072 | 2015-05-15 11:30:47 +0800 | [diff] [blame] | 500 | if (unlikely(rht_grow_above_max(ht, tbl))) |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 501 | return ERR_PTR(-E2BIG); |
Herbert Xu | 07ee072 | 2015-05-15 11:30:47 +0800 | [diff] [blame] | 502 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 503 | if (unlikely(rht_grow_above_100(ht, tbl))) |
| 504 | return ERR_PTR(-EAGAIN); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 505 | |
| 506 | head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); |
| 507 | |
| 508 | RCU_INIT_POINTER(obj->next, head); |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 509 | if (ht->rhlist) { |
| 510 | struct rhlist_head *list; |
| 511 | |
| 512 | list = container_of(obj, struct rhlist_head, rhead); |
| 513 | RCU_INIT_POINTER(list->next, NULL); |
| 514 | } |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 515 | |
| 516 | rcu_assign_pointer(tbl->buckets[hash], obj); |
| 517 | |
| 518 | atomic_inc(&ht->nelems); |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 519 | if (rht_grow_above_75(ht, tbl)) |
| 520 | schedule_work(&ht->run_work); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 521 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 522 | return NULL; |
| 523 | } |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 524 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 525 | static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, |
| 526 | struct rhash_head *obj) |
| 527 | { |
| 528 | struct bucket_table *new_tbl; |
| 529 | struct bucket_table *tbl; |
| 530 | unsigned int hash; |
| 531 | spinlock_t *lock; |
| 532 | void *data; |
| 533 | |
| 534 | tbl = rcu_dereference(ht->tbl); |
| 535 | |
| 536 | /* All insertions must grab the oldest table containing |
| 537 | * the hashed bucket that is yet to be rehashed. |
| 538 | */ |
| 539 | for (;;) { |
| 540 | hash = rht_head_hashfn(ht, tbl, obj, ht->p); |
| 541 | lock = rht_bucket_lock(tbl, hash); |
| 542 | spin_lock_bh(lock); |
| 543 | |
| 544 | if (tbl->rehash <= hash) |
| 545 | break; |
| 546 | |
| 547 | spin_unlock_bh(lock); |
| 548 | tbl = rcu_dereference(tbl->future_tbl); |
| 549 | } |
| 550 | |
| 551 | data = rhashtable_lookup_one(ht, tbl, hash, key, obj); |
| 552 | new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); |
| 553 | if (PTR_ERR(new_tbl) != -EEXIST) |
| 554 | data = ERR_CAST(new_tbl); |
| 555 | |
| 556 | while (!IS_ERR_OR_NULL(new_tbl)) { |
| 557 | tbl = new_tbl; |
| 558 | hash = rht_head_hashfn(ht, tbl, obj, ht->p); |
| 559 | spin_lock_nested(rht_bucket_lock(tbl, hash), |
| 560 | SINGLE_DEPTH_NESTING); |
| 561 | |
| 562 | data = rhashtable_lookup_one(ht, tbl, hash, key, obj); |
| 563 | new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); |
| 564 | if (PTR_ERR(new_tbl) != -EEXIST) |
| 565 | data = ERR_CAST(new_tbl); |
| 566 | |
| 567 | spin_unlock(rht_bucket_lock(tbl, hash)); |
| 568 | } |
| 569 | |
| 570 | spin_unlock_bh(lock); |
| 571 | |
| 572 | if (PTR_ERR(data) == -EAGAIN) |
| 573 | data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: |
| 574 | -EAGAIN); |
| 575 | |
| 576 | return data; |
| 577 | } |
| 578 | |
| 579 | void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, |
| 580 | struct rhash_head *obj) |
| 581 | { |
| 582 | void *data; |
| 583 | |
| 584 | do { |
| 585 | rcu_read_lock(); |
| 586 | data = rhashtable_try_insert(ht, key, obj); |
| 587 | rcu_read_unlock(); |
| 588 | } while (PTR_ERR(data) == -EAGAIN); |
| 589 | |
| 590 | return data; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 591 | } |
| 592 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); |
| 593 | |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 594 | /** |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 595 | * rhashtable_walk_enter - Initialise an iterator |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 596 | * @ht: Table to walk over |
| 597 | * @iter: Hash table Iterator |
| 598 | * |
| 599 | * This function prepares a hash table walk. |
| 600 | * |
| 601 | * Note that if you restart a walk after rhashtable_walk_stop you |
| 602 | * may see the same object twice. Also, you may miss objects if |
| 603 | * there are removals in between rhashtable_walk_stop and the next |
| 604 | * call to rhashtable_walk_start. |
| 605 | * |
| 606 | * For a completely stable walk you should construct your own data |
| 607 | * structure outside the hash table. |
| 608 | * |
| 609 | * This function may sleep so you must not call it from interrupt |
| 610 | * context or with spin locks held. |
| 611 | * |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 612 | * You must call rhashtable_walk_exit after this function returns. |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 613 | */ |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 614 | void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 615 | { |
| 616 | iter->ht = ht; |
| 617 | iter->p = NULL; |
| 618 | iter->slot = 0; |
| 619 | iter->skip = 0; |
| 620 | |
Herbert Xu | c6ff526 | 2015-12-16 16:45:54 +0800 | [diff] [blame] | 621 | spin_lock(&ht->lock); |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 622 | iter->walker.tbl = |
Herbert Xu | 179ccc0 | 2015-12-19 10:45:28 +0800 | [diff] [blame] | 623 | rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 624 | list_add(&iter->walker.list, &iter->walker.tbl->walkers); |
Herbert Xu | c6ff526 | 2015-12-16 16:45:54 +0800 | [diff] [blame] | 625 | spin_unlock(&ht->lock); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 626 | } |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 627 | EXPORT_SYMBOL_GPL(rhashtable_walk_enter); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 628 | |
| 629 | /** |
| 630 | * rhashtable_walk_exit - Free an iterator |
| 631 | * @iter: Hash table Iterator |
| 632 | * |
| 633 | * This function frees resources allocated by rhashtable_walk_init. |
| 634 | */ |
| 635 | void rhashtable_walk_exit(struct rhashtable_iter *iter) |
| 636 | { |
Herbert Xu | c6ff526 | 2015-12-16 16:45:54 +0800 | [diff] [blame] | 637 | spin_lock(&iter->ht->lock); |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 638 | if (iter->walker.tbl) |
| 639 | list_del(&iter->walker.list); |
Herbert Xu | c6ff526 | 2015-12-16 16:45:54 +0800 | [diff] [blame] | 640 | spin_unlock(&iter->ht->lock); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 641 | } |
| 642 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); |
| 643 | |
| 644 | /** |
| 645 | * rhashtable_walk_start - Start a hash table walk |
| 646 | * @iter: Hash table iterator |
| 647 | * |
| 648 | * Start a hash table walk. Note that we take the RCU lock in all |
| 649 | * cases including when we return an error. So you must always call |
| 650 | * rhashtable_walk_stop to clean up. |
| 651 | * |
| 652 | * Returns zero if successful. |
| 653 | * |
| 654 | * Returns -EAGAIN if resize event occured. Note that the iterator |
| 655 | * will rewind back to the beginning and you may use it immediately |
| 656 | * by calling rhashtable_walk_next. |
| 657 | */ |
| 658 | int rhashtable_walk_start(struct rhashtable_iter *iter) |
Thomas Graf | db4374f | 2015-03-16 10:42:27 +0100 | [diff] [blame] | 659 | __acquires(RCU) |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 660 | { |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 661 | struct rhashtable *ht = iter->ht; |
| 662 | |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 663 | rcu_read_lock(); |
| 664 | |
Herbert Xu | c6ff526 | 2015-12-16 16:45:54 +0800 | [diff] [blame] | 665 | spin_lock(&ht->lock); |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 666 | if (iter->walker.tbl) |
| 667 | list_del(&iter->walker.list); |
Herbert Xu | c6ff526 | 2015-12-16 16:45:54 +0800 | [diff] [blame] | 668 | spin_unlock(&ht->lock); |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 669 | |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 670 | if (!iter->walker.tbl) { |
| 671 | iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 672 | return -EAGAIN; |
| 673 | } |
| 674 | |
| 675 | return 0; |
| 676 | } |
| 677 | EXPORT_SYMBOL_GPL(rhashtable_walk_start); |
| 678 | |
| 679 | /** |
| 680 | * rhashtable_walk_next - Return the next object and advance the iterator |
| 681 | * @iter: Hash table iterator |
| 682 | * |
| 683 | * Note that you must call rhashtable_walk_stop when you are finished |
| 684 | * with the walk. |
| 685 | * |
| 686 | * Returns the next object or NULL when the end of the table is reached. |
| 687 | * |
| 688 | * Returns -EAGAIN if resize event occured. Note that the iterator |
| 689 | * will rewind back to the beginning and you may continue to use it. |
| 690 | */ |
| 691 | void *rhashtable_walk_next(struct rhashtable_iter *iter) |
| 692 | { |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 693 | struct bucket_table *tbl = iter->walker.tbl; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 694 | struct rhlist_head *list = iter->list; |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 695 | struct rhashtable *ht = iter->ht; |
| 696 | struct rhash_head *p = iter->p; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 697 | bool rhlist = ht->rhlist; |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 698 | |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 699 | if (p) { |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 700 | if (!rhlist || !(list = rcu_dereference(list->next))) { |
| 701 | p = rcu_dereference(p->next); |
| 702 | list = container_of(p, struct rhlist_head, rhead); |
| 703 | } |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 704 | goto next; |
| 705 | } |
| 706 | |
| 707 | for (; iter->slot < tbl->size; iter->slot++) { |
| 708 | int skip = iter->skip; |
| 709 | |
| 710 | rht_for_each_rcu(p, tbl, iter->slot) { |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 711 | if (rhlist) { |
| 712 | list = container_of(p, struct rhlist_head, |
| 713 | rhead); |
| 714 | do { |
| 715 | if (!skip) |
| 716 | goto next; |
| 717 | skip--; |
| 718 | list = rcu_dereference(list->next); |
| 719 | } while (list); |
| 720 | |
| 721 | continue; |
| 722 | } |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 723 | if (!skip) |
| 724 | break; |
| 725 | skip--; |
| 726 | } |
| 727 | |
| 728 | next: |
| 729 | if (!rht_is_a_nulls(p)) { |
| 730 | iter->skip++; |
| 731 | iter->p = p; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 732 | iter->list = list; |
| 733 | return rht_obj(ht, rhlist ? &list->rhead : p); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 734 | } |
| 735 | |
| 736 | iter->skip = 0; |
| 737 | } |
| 738 | |
Phil Sutter | 142b942 | 2015-07-06 15:51:20 +0200 | [diff] [blame] | 739 | iter->p = NULL; |
| 740 | |
Herbert Xu | d88252f | 2015-03-24 00:50:19 +1100 | [diff] [blame] | 741 | /* Ensure we see any new tables. */ |
| 742 | smp_rmb(); |
| 743 | |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 744 | iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
| 745 | if (iter->walker.tbl) { |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 746 | iter->slot = 0; |
| 747 | iter->skip = 0; |
| 748 | return ERR_PTR(-EAGAIN); |
| 749 | } |
| 750 | |
Thomas Graf | c936a79 | 2015-05-05 02:22:53 +0200 | [diff] [blame] | 751 | return NULL; |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 752 | } |
| 753 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); |
| 754 | |
| 755 | /** |
| 756 | * rhashtable_walk_stop - Finish a hash table walk |
| 757 | * @iter: Hash table iterator |
| 758 | * |
| 759 | * Finish a hash table walk. |
| 760 | */ |
| 761 | void rhashtable_walk_stop(struct rhashtable_iter *iter) |
Thomas Graf | db4374f | 2015-03-16 10:42:27 +0100 | [diff] [blame] | 762 | __releases(RCU) |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 763 | { |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 764 | struct rhashtable *ht; |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 765 | struct bucket_table *tbl = iter->walker.tbl; |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 766 | |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 767 | if (!tbl) |
Herbert Xu | 963ecbd | 2015-03-15 21:12:04 +1100 | [diff] [blame] | 768 | goto out; |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 769 | |
| 770 | ht = iter->ht; |
| 771 | |
Herbert Xu | ba7c95e | 2015-03-24 09:53:17 +1100 | [diff] [blame] | 772 | spin_lock(&ht->lock); |
Herbert Xu | c4db884 | 2015-03-14 13:57:25 +1100 | [diff] [blame] | 773 | if (tbl->rehash < tbl->size) |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 774 | list_add(&iter->walker.list, &tbl->walkers); |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 775 | else |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 776 | iter->walker.tbl = NULL; |
Herbert Xu | ba7c95e | 2015-03-24 09:53:17 +1100 | [diff] [blame] | 777 | spin_unlock(&ht->lock); |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 778 | |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 779 | iter->p = NULL; |
Herbert Xu | 963ecbd | 2015-03-15 21:12:04 +1100 | [diff] [blame] | 780 | |
| 781 | out: |
| 782 | rcu_read_unlock(); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 783 | } |
| 784 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); |
| 785 | |
Herbert Xu | 488fb86e | 2015-03-20 21:56:59 +1100 | [diff] [blame] | 786 | static size_t rounded_hashtable_size(const struct rhashtable_params *params) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 787 | { |
Davidlohr Bueso | 09ae008 | 2018-07-16 13:26:13 -0700 | [diff] [blame] | 788 | size_t retsize; |
| 789 | |
| 790 | if (params->nelem_hint) |
| 791 | retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), |
| 792 | (unsigned long)params->min_size); |
| 793 | else |
| 794 | retsize = max(HASH_DEFAULT_SIZE, |
| 795 | (unsigned long)params->min_size); |
| 796 | |
| 797 | return retsize; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 798 | } |
| 799 | |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 800 | static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) |
| 801 | { |
| 802 | return jhash2(key, length, seed); |
| 803 | } |
| 804 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 805 | /** |
| 806 | * rhashtable_init - initialize a new hash table |
| 807 | * @ht: hash table to be initialized |
| 808 | * @params: configuration parameters |
| 809 | * |
| 810 | * Initializes a new hash table based on the provided configuration |
| 811 | * parameters. A table can be configured either with a variable or |
| 812 | * fixed length key: |
| 813 | * |
| 814 | * Configuration Example 1: Fixed length keys |
| 815 | * struct test_obj { |
| 816 | * int key; |
| 817 | * void * my_member; |
| 818 | * struct rhash_head node; |
| 819 | * }; |
| 820 | * |
| 821 | * struct rhashtable_params params = { |
| 822 | * .head_offset = offsetof(struct test_obj, node), |
| 823 | * .key_offset = offsetof(struct test_obj, key), |
| 824 | * .key_len = sizeof(int), |
Daniel Borkmann | 8754589 | 2014-12-10 16:33:11 +0100 | [diff] [blame] | 825 | * .hashfn = jhash, |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 826 | * .nulls_base = (1U << RHT_BASE_SHIFT), |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 827 | * }; |
| 828 | * |
| 829 | * Configuration Example 2: Variable length keys |
| 830 | * struct test_obj { |
| 831 | * [...] |
| 832 | * struct rhash_head node; |
| 833 | * }; |
| 834 | * |
Patrick McHardy | 49f7b33 | 2015-03-25 13:07:45 +0000 | [diff] [blame] | 835 | * u32 my_hash_fn(const void *data, u32 len, u32 seed) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 836 | * { |
| 837 | * struct test_obj *obj = data; |
| 838 | * |
| 839 | * return [... hash ...]; |
| 840 | * } |
| 841 | * |
| 842 | * struct rhashtable_params params = { |
| 843 | * .head_offset = offsetof(struct test_obj, node), |
Daniel Borkmann | 8754589 | 2014-12-10 16:33:11 +0100 | [diff] [blame] | 844 | * .hashfn = jhash, |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 845 | * .obj_hashfn = my_hash_fn, |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 846 | * }; |
| 847 | */ |
Herbert Xu | 488fb86e | 2015-03-20 21:56:59 +1100 | [diff] [blame] | 848 | int rhashtable_init(struct rhashtable *ht, |
| 849 | const struct rhashtable_params *params) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 850 | { |
| 851 | struct bucket_table *tbl; |
| 852 | size_t size; |
| 853 | |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 854 | if ((!params->key_len && !params->obj_hashfn) || |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 855 | (params->obj_hashfn && !params->obj_cmpfn)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 856 | return -EINVAL; |
| 857 | |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 858 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) |
| 859 | return -EINVAL; |
| 860 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 861 | memset(ht, 0, sizeof(*ht)); |
| 862 | mutex_init(&ht->mutex); |
Herbert Xu | ba7c95e | 2015-03-24 09:53:17 +1100 | [diff] [blame] | 863 | spin_lock_init(&ht->lock); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 864 | memcpy(&ht->p, params, sizeof(*params)); |
| 865 | |
Thomas Graf | a998f71 | 2015-03-19 22:31:13 +0000 | [diff] [blame] | 866 | if (params->min_size) |
| 867 | ht->p.min_size = roundup_pow_of_two(params->min_size); |
| 868 | |
| 869 | if (params->max_size) |
| 870 | ht->p.max_size = rounddown_pow_of_two(params->max_size); |
| 871 | |
Herbert Xu | 07ee072 | 2015-05-15 11:30:47 +0800 | [diff] [blame] | 872 | if (params->insecure_max_entries) |
| 873 | ht->p.insecure_max_entries = |
| 874 | rounddown_pow_of_two(params->insecure_max_entries); |
| 875 | else |
| 876 | ht->p.insecure_max_entries = ht->p.max_size * 2; |
| 877 | |
Herbert Xu | 488fb86e | 2015-03-20 21:56:59 +1100 | [diff] [blame] | 878 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); |
Thomas Graf | a998f71 | 2015-03-19 22:31:13 +0000 | [diff] [blame] | 879 | |
Davidlohr Bueso | 09ae008 | 2018-07-16 13:26:13 -0700 | [diff] [blame] | 880 | size = rounded_hashtable_size(&ht->p); |
Herbert Xu | 3a32460 | 2015-12-16 18:13:14 +0800 | [diff] [blame] | 881 | |
Herbert Xu | 27ed44a | 2015-03-24 13:37:30 +1100 | [diff] [blame] | 882 | /* The maximum (not average) chain length grows with the |
| 883 | * size of the hash table, at a rate of (log N)/(log log N). |
| 884 | * The value of 16 is selected so that even if the hash |
| 885 | * table grew to 2^32 you would not expect the maximum |
| 886 | * chain length to exceed it unless we are under attack |
| 887 | * (or extremely unlucky). |
| 888 | * |
| 889 | * As this limit is only to detect attacks, we don't need |
| 890 | * to set it to a lower value as you'd need the chain |
| 891 | * length to vastly exceed 16 to have any real effect |
| 892 | * on the system. |
| 893 | */ |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 894 | if (!params->insecure_elasticity) |
| 895 | ht->elasticity = 16; |
| 896 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 897 | if (params->locks_mul) |
| 898 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); |
| 899 | else |
| 900 | ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; |
| 901 | |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 902 | ht->key_len = ht->p.key_len; |
| 903 | if (!params->hashfn) { |
| 904 | ht->p.hashfn = jhash; |
| 905 | |
| 906 | if (!(ht->key_len & (sizeof(u32) - 1))) { |
| 907 | ht->key_len /= sizeof(u32); |
| 908 | ht->p.hashfn = rhashtable_jhash2; |
| 909 | } |
| 910 | } |
| 911 | |
Herbert Xu | b9ecfda | 2015-03-24 00:50:27 +1100 | [diff] [blame] | 912 | tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 913 | if (tbl == NULL) |
| 914 | return -ENOMEM; |
| 915 | |
Ying Xue | 545a148 | 2015-01-07 13:41:57 +0800 | [diff] [blame] | 916 | atomic_set(&ht->nelems, 0); |
Daniel Borkmann | a5b6846 | 2015-03-12 15:28:40 +0100 | [diff] [blame] | 917 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 918 | RCU_INIT_POINTER(ht->tbl, tbl); |
| 919 | |
Daniel Borkmann | 4c4b52d | 2015-02-25 16:31:54 +0100 | [diff] [blame] | 920 | INIT_WORK(&ht->run_work, rht_deferred_worker); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 921 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 922 | return 0; |
| 923 | } |
| 924 | EXPORT_SYMBOL_GPL(rhashtable_init); |
| 925 | |
| 926 | /** |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 927 | * rhltable_init - initialize a new hash list table |
| 928 | * @hlt: hash list table to be initialized |
| 929 | * @params: configuration parameters |
| 930 | * |
| 931 | * Initializes a new hash list table. |
| 932 | * |
| 933 | * See documentation for rhashtable_init. |
| 934 | */ |
| 935 | int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) |
| 936 | { |
| 937 | int err; |
| 938 | |
| 939 | /* No rhlist NULLs marking for now. */ |
| 940 | if (params->nulls_base) |
| 941 | return -EINVAL; |
| 942 | |
| 943 | err = rhashtable_init(&hlt->ht, params); |
| 944 | hlt->ht.rhlist = true; |
| 945 | return err; |
| 946 | } |
| 947 | EXPORT_SYMBOL_GPL(rhltable_init); |
| 948 | |
| 949 | static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, |
| 950 | void (*free_fn)(void *ptr, void *arg), |
| 951 | void *arg) |
| 952 | { |
| 953 | struct rhlist_head *list; |
| 954 | |
| 955 | if (!ht->rhlist) { |
| 956 | free_fn(rht_obj(ht, obj), arg); |
| 957 | return; |
| 958 | } |
| 959 | |
| 960 | list = container_of(obj, struct rhlist_head, rhead); |
| 961 | do { |
| 962 | obj = &list->rhead; |
| 963 | list = rht_dereference(list->next, ht); |
| 964 | free_fn(rht_obj(ht, obj), arg); |
| 965 | } while (list); |
| 966 | } |
| 967 | |
| 968 | /** |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 969 | * rhashtable_free_and_destroy - free elements and destroy hash table |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 970 | * @ht: the hash table to destroy |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 971 | * @free_fn: callback to release resources of element |
| 972 | * @arg: pointer passed to free_fn |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 973 | * |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 974 | * Stops an eventual async resize. If defined, invokes free_fn for each |
| 975 | * element to releasal resources. Please note that RCU protected |
| 976 | * readers may still be accessing the elements. Releasing of resources |
| 977 | * must occur in a compatible manner. Then frees the bucket array. |
| 978 | * |
| 979 | * This function will eventually sleep to wait for an async resize |
| 980 | * to complete. The caller is responsible that no further write operations |
| 981 | * occurs in parallel. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 982 | */ |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 983 | void rhashtable_free_and_destroy(struct rhashtable *ht, |
| 984 | void (*free_fn)(void *ptr, void *arg), |
| 985 | void *arg) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 986 | { |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 987 | const struct bucket_table *tbl; |
| 988 | unsigned int i; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 989 | |
Daniel Borkmann | 4c4b52d | 2015-02-25 16:31:54 +0100 | [diff] [blame] | 990 | cancel_work_sync(&ht->run_work); |
Ying Xue | 57699a4 | 2015-01-16 11:13:09 +0800 | [diff] [blame] | 991 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 992 | mutex_lock(&ht->mutex); |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 993 | tbl = rht_dereference(ht->tbl, ht); |
| 994 | if (free_fn) { |
| 995 | for (i = 0; i < tbl->size; i++) { |
| 996 | struct rhash_head *pos, *next; |
| 997 | |
Eric Dumazet | fb19348 | 2018-10-10 12:29:55 -0700 | [diff] [blame] | 998 | cond_resched(); |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 999 | for (pos = rht_dereference(tbl->buckets[i], ht), |
| 1000 | next = !rht_is_a_nulls(pos) ? |
| 1001 | rht_dereference(pos->next, ht) : NULL; |
| 1002 | !rht_is_a_nulls(pos); |
| 1003 | pos = next, |
| 1004 | next = !rht_is_a_nulls(pos) ? |
| 1005 | rht_dereference(pos->next, ht) : NULL) |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 1006 | rhashtable_free_one(ht, pos, free_fn, arg); |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 1007 | } |
| 1008 | } |
| 1009 | |
| 1010 | bucket_table_free(tbl); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1011 | mutex_unlock(&ht->mutex); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1012 | } |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 1013 | EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); |
| 1014 | |
| 1015 | void rhashtable_destroy(struct rhashtable *ht) |
| 1016 | { |
| 1017 | return rhashtable_free_and_destroy(ht, NULL, NULL); |
| 1018 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1019 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |