blob: f7f5d1932a2720767dd31f4033f196815ff08447 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* flow.c: Generic flow cache.
2 *
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/list.h>
10#include <linux/jhash.h>
11#include <linux/interrupt.h>
12#include <linux/mm.h>
13#include <linux/random.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/smp.h>
17#include <linux/completion.h>
18#include <linux/percpu.h>
19#include <linux/bitops.h>
20#include <linux/notifier.h>
21#include <linux/cpu.h>
22#include <linux/cpumask.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080023#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <net/flow.h>
Arun Sharma600634972011-07-26 16:09:06 -070025#include <linux/atomic.h>
Trent Jaegerdf718372005-12-13 23:12:27 -080026#include <linux/security.h>
Fan Duca925cf2014-01-18 09:55:27 +080027#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29struct flow_cache_entry {
Timo Teräs8e4795602010-04-07 00:30:07 +000030 union {
31 struct hlist_node hlist;
32 struct list_head gc_list;
33 } u;
dpward0542b692011-08-31 06:05:27 +000034 struct net *net;
Timo Teräsfe1a5f02010-04-07 00:30:04 +000035 u16 family;
36 u8 dir;
37 u32 genid;
38 struct flowi key;
39 struct flow_cache_object *object;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040};
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042struct flow_flush_info {
Timo Teräsfe1a5f02010-04-07 00:30:04 +000043 struct flow_cache *cache;
Timo Teräsd7997fe2010-03-31 00:17:06 +000044 atomic_t cpuleft;
45 struct completion completion;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046};
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Eric Dumazetd32d9bb2014-03-10 07:09:07 -070048static struct kmem_cache *flow_cachep __read_mostly;
49
Alexey Dobriyanf31cc7e2017-04-03 00:52:29 +030050#define flow_cache_hash_size(cache) (1U << (cache)->hash_shift)
Timo Teräsd7997fe2010-03-31 00:17:06 +000051#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53static void flow_cache_new_hashrnd(unsigned long arg)
54{
Timo Teräsd7997fe2010-03-31 00:17:06 +000055 struct flow_cache *fc = (void *) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 int i;
57
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -070058 for_each_possible_cpu(i)
Timo Teräsd7997fe2010-03-31 00:17:06 +000059 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Timo Teräsd7997fe2010-03-31 00:17:06 +000061 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
62 add_timer(&fc->rnd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063}
64
Fan Duca925cf2014-01-18 09:55:27 +080065static int flow_entry_valid(struct flow_cache_entry *fle,
66 struct netns_xfrm *xfrm)
Timo Teräsfe1a5f02010-04-07 00:30:04 +000067{
Fan Duca925cf2014-01-18 09:55:27 +080068 if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
Timo Teräsfe1a5f02010-04-07 00:30:04 +000069 return 0;
70 if (fle->object && !fle->object->ops->check(fle->object))
71 return 0;
72 return 1;
73}
74
Fan Duca925cf2014-01-18 09:55:27 +080075static void flow_entry_kill(struct flow_cache_entry *fle,
76 struct netns_xfrm *xfrm)
James Morris134b0fc2006-10-05 15:42:27 -050077{
78 if (fle->object)
Timo Teräsfe1a5f02010-04-07 00:30:04 +000079 fle->object->ops->delete(fle->object);
Eric Dumazetd32d9bb2014-03-10 07:09:07 -070080 kmem_cache_free(flow_cachep, fle);
Timo Teräs8e4795602010-04-07 00:30:07 +000081}
82
83static void flow_cache_gc_task(struct work_struct *work)
84{
85 struct list_head gc_list;
86 struct flow_cache_entry *fce, *n;
Fan Duca925cf2014-01-18 09:55:27 +080087 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
88 flow_cache_gc_work);
Timo Teräs8e4795602010-04-07 00:30:07 +000089
90 INIT_LIST_HEAD(&gc_list);
Fan Duca925cf2014-01-18 09:55:27 +080091 spin_lock_bh(&xfrm->flow_cache_gc_lock);
92 list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
93 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
Timo Teräs8e4795602010-04-07 00:30:07 +000094
Steffen Klassert6ad31222016-02-22 10:40:07 +010095 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
Fan Duca925cf2014-01-18 09:55:27 +080096 flow_entry_kill(fce, xfrm);
Steffen Klassert6ad31222016-02-22 10:40:07 +010097 atomic_dec(&xfrm->flow_cache_gc_count);
Steffen Klassert6ad31222016-02-22 10:40:07 +010098 }
Timo Teräs8e4795602010-04-07 00:30:07 +000099}
Timo Teräs8e4795602010-04-07 00:30:07 +0000100
101static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
Alexey Dobriyanec2e45a2017-04-03 00:53:15 +0300102 unsigned int deleted,
103 struct list_head *gc_list,
Fan Duca925cf2014-01-18 09:55:27 +0800104 struct netns_xfrm *xfrm)
Timo Teräs8e4795602010-04-07 00:30:07 +0000105{
106 if (deleted) {
Steffen Klassert6ad31222016-02-22 10:40:07 +0100107 atomic_add(deleted, &xfrm->flow_cache_gc_count);
Timo Teräs8e4795602010-04-07 00:30:07 +0000108 fcp->hash_count -= deleted;
Fan Duca925cf2014-01-18 09:55:27 +0800109 spin_lock_bh(&xfrm->flow_cache_gc_lock);
110 list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
111 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
112 schedule_work(&xfrm->flow_cache_gc_work);
Timo Teräs8e4795602010-04-07 00:30:07 +0000113 }
James Morris134b0fc2006-10-05 15:42:27 -0500114}
115
Timo Teräsd7997fe2010-03-31 00:17:06 +0000116static void __flow_cache_shrink(struct flow_cache *fc,
117 struct flow_cache_percpu *fcp,
Alexey Dobriyanec2e45a2017-04-03 00:53:15 +0300118 unsigned int shrink_to)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
Timo Teräs8e4795602010-04-07 00:30:07 +0000120 struct flow_cache_entry *fle;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800121 struct hlist_node *tmp;
Timo Teräs8e4795602010-04-07 00:30:07 +0000122 LIST_HEAD(gc_list);
Alexey Dobriyanec2e45a2017-04-03 00:53:15 +0300123 unsigned int deleted = 0;
Fan Duca925cf2014-01-18 09:55:27 +0800124 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
125 flow_cache_global);
Alexey Dobriyanf31cc7e2017-04-03 00:52:29 +0300126 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Timo Teräsd7997fe2010-03-31 00:17:06 +0000128 for (i = 0; i < flow_cache_hash_size(fc); i++) {
Alexey Dobriyanec2e45a2017-04-03 00:53:15 +0300129 unsigned int saved = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Sasha Levinb67bfe02013-02-27 17:06:00 -0800131 hlist_for_each_entry_safe(fle, tmp,
Timo Teräs8e4795602010-04-07 00:30:07 +0000132 &fcp->hash_table[i], u.hlist) {
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000133 if (saved < shrink_to &&
Fan Duca925cf2014-01-18 09:55:27 +0800134 flow_entry_valid(fle, xfrm)) {
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000135 saved++;
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000136 } else {
Timo Teräs8e4795602010-04-07 00:30:07 +0000137 deleted++;
138 hlist_del(&fle->u.hlist);
139 list_add_tail(&fle->u.gc_list, &gc_list);
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 }
142 }
Timo Teräs8e4795602010-04-07 00:30:07 +0000143
Fan Duca925cf2014-01-18 09:55:27 +0800144 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145}
146
Timo Teräsd7997fe2010-03-31 00:17:06 +0000147static void flow_cache_shrink(struct flow_cache *fc,
148 struct flow_cache_percpu *fcp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149{
Alexey Dobriyanec2e45a2017-04-03 00:53:15 +0300150 unsigned int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Timo Teräsd7997fe2010-03-31 00:17:06 +0000152 __flow_cache_shrink(fc, fcp, shrink_to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153}
154
Timo Teräsd7997fe2010-03-31 00:17:06 +0000155static void flow_new_hash_rnd(struct flow_cache *fc,
156 struct flow_cache_percpu *fcp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
Timo Teräsd7997fe2010-03-31 00:17:06 +0000158 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
159 fcp->hash_rnd_recalc = 0;
160 __flow_cache_shrink(fc, fcp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161}
162
Timo Teräsd7997fe2010-03-31 00:17:06 +0000163static u32 flow_hash_code(struct flow_cache *fc,
164 struct flow_cache_percpu *fcp,
dpwardaa1c3662011-09-05 16:47:24 +0000165 const struct flowi *key,
Alexey Dobriyan5a17d9e2017-04-03 00:51:50 +0300166 unsigned int keysize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
David S. Millerdee9f4b2011-02-22 18:44:31 -0800168 const u32 *k = (const u32 *) key;
dpwardaa1c3662011-09-05 16:47:24 +0000169 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
dpwardaa1c3662011-09-05 16:47:24 +0000171 return jhash2(k, length, fcp->hash_rnd)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000172 & (flow_cache_hash_size(fc) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173}
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175/* I hear what you're saying, use memcmp. But memcmp cannot make
dpwardaa1c3662011-09-05 16:47:24 +0000176 * important assumptions that we can here, such as alignment.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 */
dpwardaa1c3662011-09-05 16:47:24 +0000178static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
Alexey Dobriyan5a17d9e2017-04-03 00:51:50 +0300179 unsigned int keysize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
David S. Millerdee9f4b2011-02-22 18:44:31 -0800181 const flow_compare_t *k1, *k1_lim, *k2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
David S. Millerdee9f4b2011-02-22 18:44:31 -0800183 k1 = (const flow_compare_t *) key1;
dpwardaa1c3662011-09-05 16:47:24 +0000184 k1_lim = k1 + keysize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
David S. Millerdee9f4b2011-02-22 18:44:31 -0800186 k2 = (const flow_compare_t *) key2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188 do {
189 if (*k1++ != *k2++)
190 return 1;
191 } while (k1 < k1_lim);
192
193 return 0;
194}
195
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000196struct flow_cache_object *
David S. Millerdee9f4b2011-02-22 18:44:31 -0800197flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000198 flow_resolve_t resolver, void *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
Fan Duca925cf2014-01-18 09:55:27 +0800200 struct flow_cache *fc = &net->xfrm.flow_cache_global;
Timo Teräsd7997fe2010-03-31 00:17:06 +0000201 struct flow_cache_percpu *fcp;
Timo Teräs8e4795602010-04-07 00:30:07 +0000202 struct flow_cache_entry *fle, *tfle;
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000203 struct flow_cache_object *flo;
Alexey Dobriyan5a17d9e2017-04-03 00:51:50 +0300204 unsigned int keysize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 local_bh_disable();
Eric Dumazet7a9b2d52010-06-24 00:52:37 +0000208 fcp = this_cpu_ptr(fc->percpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 fle = NULL;
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000211 flo = NULL;
dpwardaa1c3662011-09-05 16:47:24 +0000212
213 keysize = flow_key_size(family);
214 if (!keysize)
215 goto nocache;
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 /* Packet really early in init? Making flow_cache_init a
218 * pre-smp initcall would solve this. --RR */
Timo Teräsd7997fe2010-03-31 00:17:06 +0000219 if (!fcp->hash_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 goto nocache;
221
Timo Teräsd7997fe2010-03-31 00:17:06 +0000222 if (fcp->hash_rnd_recalc)
223 flow_new_hash_rnd(fc, fcp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
dpwardaa1c3662011-09-05 16:47:24 +0000225 hash = flow_hash_code(fc, fcp, key, keysize);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800226 hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
dpward0542b692011-08-31 06:05:27 +0000227 if (tfle->net == net &&
228 tfle->family == family &&
Timo Teräs8e4795602010-04-07 00:30:07 +0000229 tfle->dir == dir &&
dpwardaa1c3662011-09-05 16:47:24 +0000230 flow_key_compare(key, &tfle->key, keysize) == 0) {
Timo Teräs8e4795602010-04-07 00:30:07 +0000231 fle = tfle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 break;
Timo Teräs8e4795602010-04-07 00:30:07 +0000233 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 }
235
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000236 if (unlikely(!fle)) {
Timo Teräsd7997fe2010-03-31 00:17:06 +0000237 if (fcp->hash_count > fc->high_watermark)
238 flow_cache_shrink(fc, fcp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Miroslav Urbanek6b226482016-11-21 15:48:21 +0100240 if (atomic_read(&net->xfrm.flow_cache_gc_count) >
241 2 * num_online_cpus() * fc->high_watermark) {
Steffen Klassert6ad31222016-02-22 10:40:07 +0100242 flo = ERR_PTR(-ENOBUFS);
243 goto ret_object;
244 }
245
Eric Dumazetd32d9bb2014-03-10 07:09:07 -0700246 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 if (fle) {
dpward0542b692011-08-31 06:05:27 +0000248 fle->net = net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 fle->family = family;
250 fle->dir = dir;
dpwardaa1c3662011-09-05 16:47:24 +0000251 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 fle->object = NULL;
Timo Teräs8e4795602010-04-07 00:30:07 +0000253 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
Timo Teräsd7997fe2010-03-31 00:17:06 +0000254 fcp->hash_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 }
Fan Duca925cf2014-01-18 09:55:27 +0800256 } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000257 flo = fle->object;
258 if (!flo)
259 goto ret_object;
260 flo = flo->ops->get(flo);
261 if (flo)
262 goto ret_object;
263 } else if (fle->object) {
264 flo = fle->object;
265 flo->ops->delete(flo);
266 fle->object = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 }
268
269nocache:
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000270 flo = NULL;
271 if (fle) {
272 flo = fle->object;
273 fle->object = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 }
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000275 flo = resolver(net, key, family, dir, flo, ctx);
276 if (fle) {
Fan Duca925cf2014-01-18 09:55:27 +0800277 fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000278 if (!IS_ERR(flo))
279 fle->object = flo;
280 else
281 fle->genid--;
282 } else {
YOSHIFUJI Hideaki / 吉藤英明8fbcec22013-01-22 06:32:44 +0000283 if (!IS_ERR_OR_NULL(flo))
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000284 flo->ops->delete(flo);
285 }
286ret_object:
287 local_bh_enable();
288 return flo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000290EXPORT_SYMBOL(flow_cache_lookup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
292static void flow_cache_flush_tasklet(unsigned long data)
293{
294 struct flow_flush_info *info = (void *)data;
Timo Teräsd7997fe2010-03-31 00:17:06 +0000295 struct flow_cache *fc = info->cache;
296 struct flow_cache_percpu *fcp;
Timo Teräs8e4795602010-04-07 00:30:07 +0000297 struct flow_cache_entry *fle;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800298 struct hlist_node *tmp;
Timo Teräs8e4795602010-04-07 00:30:07 +0000299 LIST_HEAD(gc_list);
Alexey Dobriyanec2e45a2017-04-03 00:53:15 +0300300 unsigned int deleted = 0;
Fan Duca925cf2014-01-18 09:55:27 +0800301 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
302 flow_cache_global);
Alexey Dobriyanf31cc7e2017-04-03 00:52:29 +0300303 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Eric Dumazet7a9b2d52010-06-24 00:52:37 +0000305 fcp = this_cpu_ptr(fc->percpu);
Timo Teräsd7997fe2010-03-31 00:17:06 +0000306 for (i = 0; i < flow_cache_hash_size(fc); i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800307 hlist_for_each_entry_safe(fle, tmp,
Timo Teräs8e4795602010-04-07 00:30:07 +0000308 &fcp->hash_table[i], u.hlist) {
Fan Duca925cf2014-01-18 09:55:27 +0800309 if (flow_entry_valid(fle, xfrm))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 continue;
311
Timo Teräs8e4795602010-04-07 00:30:07 +0000312 deleted++;
313 hlist_del(&fle->u.hlist);
314 list_add_tail(&fle->u.gc_list, &gc_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 }
316 }
317
Fan Duca925cf2014-01-18 09:55:27 +0800318 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
Timo Teräs8e4795602010-04-07 00:30:07 +0000319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 if (atomic_dec_and_test(&info->cpuleft))
321 complete(&info->completion);
322}
323
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000324/*
325 * Return whether a cpu needs flushing. Conservatively, we assume
326 * the presence of any entries means the core may require flushing,
327 * since the flow_cache_ops.check() function may assume it's running
328 * on the same core as the per-cpu cache component.
329 */
330static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
331{
332 struct flow_cache_percpu *fcp;
Alexey Dobriyanf31cc7e2017-04-03 00:52:29 +0300333 unsigned int i;
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000334
Li RongQing27815032013-03-28 02:24:11 +0000335 fcp = per_cpu_ptr(fc->percpu, cpu);
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000336 for (i = 0; i < flow_cache_hash_size(fc); i++)
337 if (!hlist_empty(&fcp->hash_table[i]))
338 return 0;
339 return 1;
340}
341
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342static void flow_cache_flush_per_cpu(void *data)
343{
344 struct flow_flush_info *info = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 struct tasklet_struct *tasklet;
346
Li RongQing50eab052013-03-27 23:42:41 +0000347 tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 tasklet->data = (unsigned long)info;
349 tasklet_schedule(tasklet);
350}
351
Fan Duca925cf2014-01-18 09:55:27 +0800352void flow_cache_flush(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353{
354 struct flow_flush_info info;
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000355 cpumask_var_t mask;
356 int i, self;
357
358 /* Track which cpus need flushing to avoid disturbing all cores. */
359 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
360 return;
361 cpumask_clear(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 /* Don't want cpus going down or up during this. */
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100364 get_online_cpus();
Fan Duca925cf2014-01-18 09:55:27 +0800365 mutex_lock(&net->xfrm.flow_flush_sem);
366 info.cache = &net->xfrm.flow_cache_global;
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000367 for_each_online_cpu(i)
368 if (!flow_cache_percpu_empty(info.cache, i))
369 cpumask_set_cpu(i, mask);
370 atomic_set(&info.cpuleft, cpumask_weight(mask));
371 if (atomic_read(&info.cpuleft) == 0)
372 goto done;
373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 init_completion(&info.completion);
375
376 local_bh_disable();
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000377 self = cpumask_test_and_clear_cpu(smp_processor_id(), mask);
378 on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0);
379 if (self)
380 flow_cache_flush_tasklet((unsigned long)&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 local_bh_enable();
382
383 wait_for_completion(&info.completion);
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000384
385done:
Fan Duca925cf2014-01-18 09:55:27 +0800386 mutex_unlock(&net->xfrm.flow_flush_sem);
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100387 put_online_cpus();
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000388 free_cpumask_var(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389}
390
Steffen Klassertc0ed1c12011-12-21 16:48:08 -0500391static void flow_cache_flush_task(struct work_struct *work)
392{
Fan Duca925cf2014-01-18 09:55:27 +0800393 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
Miroslav Urbanek233c96f2015-02-05 16:36:50 +0100394 flow_cache_flush_work);
Fan Duca925cf2014-01-18 09:55:27 +0800395 struct net *net = container_of(xfrm, struct net, xfrm);
396
397 flow_cache_flush(net);
Steffen Klassertc0ed1c12011-12-21 16:48:08 -0500398}
399
Fan Duca925cf2014-01-18 09:55:27 +0800400void flow_cache_flush_deferred(struct net *net)
Steffen Klassertc0ed1c12011-12-21 16:48:08 -0500401{
Fan Duca925cf2014-01-18 09:55:27 +0800402 schedule_work(&net->xfrm.flow_cache_flush_work);
Steffen Klassertc0ed1c12011-12-21 16:48:08 -0500403}
404
Paul Gortmaker013dbb32013-06-19 14:32:33 -0400405static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
Eric Dumazet83b6b1f2010-09-10 07:00:25 +0000407 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
Alexey Dobriyanf31cc7e2017-04-03 00:52:29 +0300408 unsigned int sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Eric Dumazet83b6b1f2010-09-10 07:00:25 +0000410 if (!fcp->hash_table) {
411 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
412 if (!fcp->hash_table) {
Alexey Dobriyanf31cc7e2017-04-03 00:52:29 +0300413 pr_err("NET: failed to allocate flow cache sz %u\n", sz);
Eric Dumazet83b6b1f2010-09-10 07:00:25 +0000414 return -ENOMEM;
415 }
416 fcp->hash_rnd_recalc = 1;
417 fcp->hash_count = 0;
418 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
419 }
420 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
422
Sebastian Andrzej Siewiora4fc1bf2016-11-03 15:50:05 +0100423static int flow_cache_cpu_up_prep(unsigned int cpu, struct hlist_node *node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424{
Sebastian Andrzej Siewiora4fc1bf2016-11-03 15:50:05 +0100425 struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
426
427 return flow_cache_cpu_prepare(fc, cpu);
428}
429
430static int flow_cache_cpu_dead(unsigned int cpu, struct hlist_node *node)
431{
432 struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
Timo Teräsd7997fe2010-03-31 00:17:06 +0000433 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
434
Sebastian Andrzej Siewiora4fc1bf2016-11-03 15:50:05 +0100435 __flow_cache_shrink(fc, fcp, 0);
436 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Fan Duca925cf2014-01-18 09:55:27 +0800439int flow_cache_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440{
441 int i;
Fan Duca925cf2014-01-18 09:55:27 +0800442 struct flow_cache *fc = &net->xfrm.flow_cache_global;
443
Eric Dumazetd32d9bb2014-03-10 07:09:07 -0700444 if (!flow_cachep)
445 flow_cachep = kmem_cache_create("flow_cache",
446 sizeof(struct flow_cache_entry),
447 0, SLAB_PANIC, NULL);
Fan Duca925cf2014-01-18 09:55:27 +0800448 spin_lock_init(&net->xfrm.flow_cache_gc_lock);
449 INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list);
450 INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
451 INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
452 mutex_init(&net->xfrm.flow_flush_sem);
Steffen Klassert6ad31222016-02-22 10:40:07 +0100453 atomic_set(&net->xfrm.flow_cache_gc_count, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
Timo Teräsd7997fe2010-03-31 00:17:06 +0000455 fc->hash_shift = 10;
456 fc->low_watermark = 2 * flow_cache_hash_size(fc);
457 fc->high_watermark = 4 * flow_cache_hash_size(fc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
Timo Teräsd7997fe2010-03-31 00:17:06 +0000459 fc->percpu = alloc_percpu(struct flow_cache_percpu);
Eric Dumazet83b6b1f2010-09-10 07:00:25 +0000460 if (!fc->percpu)
461 return -ENOMEM;
462
Sebastian Andrzej Siewiora4fc1bf2016-11-03 15:50:05 +0100463 if (cpuhp_state_add_instance(CPUHP_NET_FLOW_PREPARE, &fc->node))
464 goto err;
Timo Teräsd7997fe2010-03-31 00:17:06 +0000465
466 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
467 (unsigned long) fc);
468 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
469 add_timer(&fc->rnd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 return 0;
huajun li6ccc3ab2011-09-27 22:51:39 +0000472
473err:
474 for_each_possible_cpu(i) {
475 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
476 kfree(fcp->hash_table);
477 fcp->hash_table = NULL;
478 }
479
480 free_percpu(fc->percpu);
481 fc->percpu = NULL;
482
483 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484}
Fan Duca925cf2014-01-18 09:55:27 +0800485EXPORT_SYMBOL(flow_cache_init);
Steffen Klassert4a93f502014-03-12 09:43:17 +0100486
487void flow_cache_fini(struct net *net)
488{
489 int i;
490 struct flow_cache *fc = &net->xfrm.flow_cache_global;
491
492 del_timer_sync(&fc->rnd_timer);
Sebastian Andrzej Siewiora4fc1bf2016-11-03 15:50:05 +0100493
494 cpuhp_state_remove_instance_nocalls(CPUHP_NET_FLOW_PREPARE, &fc->node);
Steffen Klassert4a93f502014-03-12 09:43:17 +0100495
496 for_each_possible_cpu(i) {
497 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
498 kfree(fcp->hash_table);
499 fcp->hash_table = NULL;
500 }
501
502 free_percpu(fc->percpu);
503 fc->percpu = NULL;
504}
505EXPORT_SYMBOL(flow_cache_fini);
Sebastian Andrzej Siewiora4fc1bf2016-11-03 15:50:05 +0100506
507void __init flow_cache_hp_init(void)
508{
509 int ret;
510
511 ret = cpuhp_setup_state_multi(CPUHP_NET_FLOW_PREPARE,
512 "net/flow:prepare",
513 flow_cache_cpu_up_prep,
514 flow_cache_cpu_dead);
515 WARN_ON(ret < 0);
516}