blob: 1033725be40bd8f254ce27680e3b8abd09ad1546 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* flow.c: Generic flow cache.
2 *
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/list.h>
10#include <linux/jhash.h>
11#include <linux/interrupt.h>
12#include <linux/mm.h>
13#include <linux/random.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/smp.h>
17#include <linux/completion.h>
18#include <linux/percpu.h>
19#include <linux/bitops.h>
20#include <linux/notifier.h>
21#include <linux/cpu.h>
22#include <linux/cpumask.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080023#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <net/flow.h>
Arun Sharma600634972011-07-26 16:09:06 -070025#include <linux/atomic.h>
Trent Jaegerdf718372005-12-13 23:12:27 -080026#include <linux/security.h>
Fan Duca925cf2014-01-18 09:55:27 +080027#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29struct flow_cache_entry {
Timo Teräs8e4795602010-04-07 00:30:07 +000030 union {
31 struct hlist_node hlist;
32 struct list_head gc_list;
33 } u;
dpward0542b692011-08-31 06:05:27 +000034 struct net *net;
Timo Teräsfe1a5f02010-04-07 00:30:04 +000035 u16 family;
36 u8 dir;
37 u32 genid;
38 struct flowi key;
39 struct flow_cache_object *object;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040};
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042struct flow_flush_info {
Timo Teräsfe1a5f02010-04-07 00:30:04 +000043 struct flow_cache *cache;
Timo Teräsd7997fe2010-03-31 00:17:06 +000044 atomic_t cpuleft;
45 struct completion completion;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046};
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Eric Dumazetd32d9bb2014-03-10 07:09:07 -070048static struct kmem_cache *flow_cachep __read_mostly;
49
Timo Teräsd7997fe2010-03-31 00:17:06 +000050#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
51#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53static void flow_cache_new_hashrnd(unsigned long arg)
54{
Timo Teräsd7997fe2010-03-31 00:17:06 +000055 struct flow_cache *fc = (void *) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 int i;
57
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -070058 for_each_possible_cpu(i)
Timo Teräsd7997fe2010-03-31 00:17:06 +000059 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Timo Teräsd7997fe2010-03-31 00:17:06 +000061 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
62 add_timer(&fc->rnd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063}
64
Fan Duca925cf2014-01-18 09:55:27 +080065static int flow_entry_valid(struct flow_cache_entry *fle,
66 struct netns_xfrm *xfrm)
Timo Teräsfe1a5f02010-04-07 00:30:04 +000067{
Fan Duca925cf2014-01-18 09:55:27 +080068 if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
Timo Teräsfe1a5f02010-04-07 00:30:04 +000069 return 0;
70 if (fle->object && !fle->object->ops->check(fle->object))
71 return 0;
72 return 1;
73}
74
Fan Duca925cf2014-01-18 09:55:27 +080075static void flow_entry_kill(struct flow_cache_entry *fle,
76 struct netns_xfrm *xfrm)
James Morris134b0fc2006-10-05 15:42:27 -050077{
78 if (fle->object)
Timo Teräsfe1a5f02010-04-07 00:30:04 +000079 fle->object->ops->delete(fle->object);
Eric Dumazetd32d9bb2014-03-10 07:09:07 -070080 kmem_cache_free(flow_cachep, fle);
Timo Teräs8e4795602010-04-07 00:30:07 +000081}
82
83static void flow_cache_gc_task(struct work_struct *work)
84{
85 struct list_head gc_list;
86 struct flow_cache_entry *fce, *n;
Fan Duca925cf2014-01-18 09:55:27 +080087 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
88 flow_cache_gc_work);
Timo Teräs8e4795602010-04-07 00:30:07 +000089
90 INIT_LIST_HEAD(&gc_list);
Fan Duca925cf2014-01-18 09:55:27 +080091 spin_lock_bh(&xfrm->flow_cache_gc_lock);
92 list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
93 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
Timo Teräs8e4795602010-04-07 00:30:07 +000094
95 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
Fan Duca925cf2014-01-18 09:55:27 +080096 flow_entry_kill(fce, xfrm);
Timo Teräs8e4795602010-04-07 00:30:07 +000097}
Timo Teräs8e4795602010-04-07 00:30:07 +000098
99static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
Fan Duca925cf2014-01-18 09:55:27 +0800100 int deleted, struct list_head *gc_list,
101 struct netns_xfrm *xfrm)
Timo Teräs8e4795602010-04-07 00:30:07 +0000102{
103 if (deleted) {
104 fcp->hash_count -= deleted;
Fan Duca925cf2014-01-18 09:55:27 +0800105 spin_lock_bh(&xfrm->flow_cache_gc_lock);
106 list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
107 spin_unlock_bh(&xfrm->flow_cache_gc_lock);
108 schedule_work(&xfrm->flow_cache_gc_work);
Timo Teräs8e4795602010-04-07 00:30:07 +0000109 }
James Morris134b0fc2006-10-05 15:42:27 -0500110}
111
Timo Teräsd7997fe2010-03-31 00:17:06 +0000112static void __flow_cache_shrink(struct flow_cache *fc,
113 struct flow_cache_percpu *fcp,
114 int shrink_to)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
Timo Teräs8e4795602010-04-07 00:30:07 +0000116 struct flow_cache_entry *fle;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800117 struct hlist_node *tmp;
Timo Teräs8e4795602010-04-07 00:30:07 +0000118 LIST_HEAD(gc_list);
119 int i, deleted = 0;
Fan Duca925cf2014-01-18 09:55:27 +0800120 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
121 flow_cache_global);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Timo Teräsd7997fe2010-03-31 00:17:06 +0000123 for (i = 0; i < flow_cache_hash_size(fc); i++) {
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000124 int saved = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Sasha Levinb67bfe02013-02-27 17:06:00 -0800126 hlist_for_each_entry_safe(fle, tmp,
Timo Teräs8e4795602010-04-07 00:30:07 +0000127 &fcp->hash_table[i], u.hlist) {
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000128 if (saved < shrink_to &&
Fan Duca925cf2014-01-18 09:55:27 +0800129 flow_entry_valid(fle, xfrm)) {
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000130 saved++;
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000131 } else {
Timo Teräs8e4795602010-04-07 00:30:07 +0000132 deleted++;
133 hlist_del(&fle->u.hlist);
134 list_add_tail(&fle->u.gc_list, &gc_list);
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000135 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 }
137 }
Timo Teräs8e4795602010-04-07 00:30:07 +0000138
Fan Duca925cf2014-01-18 09:55:27 +0800139 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140}
141
Timo Teräsd7997fe2010-03-31 00:17:06 +0000142static void flow_cache_shrink(struct flow_cache *fc,
143 struct flow_cache_percpu *fcp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
Timo Teräsd7997fe2010-03-31 00:17:06 +0000145 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Timo Teräsd7997fe2010-03-31 00:17:06 +0000147 __flow_cache_shrink(fc, fcp, shrink_to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148}
149
Timo Teräsd7997fe2010-03-31 00:17:06 +0000150static void flow_new_hash_rnd(struct flow_cache *fc,
151 struct flow_cache_percpu *fcp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152{
Timo Teräsd7997fe2010-03-31 00:17:06 +0000153 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
154 fcp->hash_rnd_recalc = 0;
155 __flow_cache_shrink(fc, fcp, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156}
157
Timo Teräsd7997fe2010-03-31 00:17:06 +0000158static u32 flow_hash_code(struct flow_cache *fc,
159 struct flow_cache_percpu *fcp,
dpwardaa1c3662011-09-05 16:47:24 +0000160 const struct flowi *key,
161 size_t keysize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162{
David S. Millerdee9f4b2011-02-22 18:44:31 -0800163 const u32 *k = (const u32 *) key;
dpwardaa1c3662011-09-05 16:47:24 +0000164 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
dpwardaa1c3662011-09-05 16:47:24 +0000166 return jhash2(k, length, fcp->hash_rnd)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000167 & (flow_cache_hash_size(fc) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168}
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170/* I hear what you're saying, use memcmp. But memcmp cannot make
dpwardaa1c3662011-09-05 16:47:24 +0000171 * important assumptions that we can here, such as alignment.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 */
dpwardaa1c3662011-09-05 16:47:24 +0000173static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
174 size_t keysize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
David S. Millerdee9f4b2011-02-22 18:44:31 -0800176 const flow_compare_t *k1, *k1_lim, *k2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
David S. Millerdee9f4b2011-02-22 18:44:31 -0800178 k1 = (const flow_compare_t *) key1;
dpwardaa1c3662011-09-05 16:47:24 +0000179 k1_lim = k1 + keysize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
David S. Millerdee9f4b2011-02-22 18:44:31 -0800181 k2 = (const flow_compare_t *) key2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
183 do {
184 if (*k1++ != *k2++)
185 return 1;
186 } while (k1 < k1_lim);
187
188 return 0;
189}
190
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000191struct flow_cache_object *
David S. Millerdee9f4b2011-02-22 18:44:31 -0800192flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000193 flow_resolve_t resolver, void *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
Fan Duca925cf2014-01-18 09:55:27 +0800195 struct flow_cache *fc = &net->xfrm.flow_cache_global;
Timo Teräsd7997fe2010-03-31 00:17:06 +0000196 struct flow_cache_percpu *fcp;
Timo Teräs8e4795602010-04-07 00:30:07 +0000197 struct flow_cache_entry *fle, *tfle;
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000198 struct flow_cache_object *flo;
dpwardaa1c3662011-09-05 16:47:24 +0000199 size_t keysize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202 local_bh_disable();
Eric Dumazet7a9b2d52010-06-24 00:52:37 +0000203 fcp = this_cpu_ptr(fc->percpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
205 fle = NULL;
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000206 flo = NULL;
dpwardaa1c3662011-09-05 16:47:24 +0000207
208 keysize = flow_key_size(family);
209 if (!keysize)
210 goto nocache;
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Packet really early in init? Making flow_cache_init a
213 * pre-smp initcall would solve this. --RR */
Timo Teräsd7997fe2010-03-31 00:17:06 +0000214 if (!fcp->hash_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 goto nocache;
216
Timo Teräsd7997fe2010-03-31 00:17:06 +0000217 if (fcp->hash_rnd_recalc)
218 flow_new_hash_rnd(fc, fcp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
dpwardaa1c3662011-09-05 16:47:24 +0000220 hash = flow_hash_code(fc, fcp, key, keysize);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800221 hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) {
dpward0542b692011-08-31 06:05:27 +0000222 if (tfle->net == net &&
223 tfle->family == family &&
Timo Teräs8e4795602010-04-07 00:30:07 +0000224 tfle->dir == dir &&
dpwardaa1c3662011-09-05 16:47:24 +0000225 flow_key_compare(key, &tfle->key, keysize) == 0) {
Timo Teräs8e4795602010-04-07 00:30:07 +0000226 fle = tfle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 break;
Timo Teräs8e4795602010-04-07 00:30:07 +0000228 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 }
230
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000231 if (unlikely(!fle)) {
Timo Teräsd7997fe2010-03-31 00:17:06 +0000232 if (fcp->hash_count > fc->high_watermark)
233 flow_cache_shrink(fc, fcp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Eric Dumazetd32d9bb2014-03-10 07:09:07 -0700235 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 if (fle) {
dpward0542b692011-08-31 06:05:27 +0000237 fle->net = net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 fle->family = family;
239 fle->dir = dir;
dpwardaa1c3662011-09-05 16:47:24 +0000240 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 fle->object = NULL;
Timo Teräs8e4795602010-04-07 00:30:07 +0000242 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
Timo Teräsd7997fe2010-03-31 00:17:06 +0000243 fcp->hash_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 }
Fan Duca925cf2014-01-18 09:55:27 +0800245 } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000246 flo = fle->object;
247 if (!flo)
248 goto ret_object;
249 flo = flo->ops->get(flo);
250 if (flo)
251 goto ret_object;
252 } else if (fle->object) {
253 flo = fle->object;
254 flo->ops->delete(flo);
255 fle->object = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 }
257
258nocache:
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000259 flo = NULL;
260 if (fle) {
261 flo = fle->object;
262 fle->object = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 }
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000264 flo = resolver(net, key, family, dir, flo, ctx);
265 if (fle) {
Fan Duca925cf2014-01-18 09:55:27 +0800266 fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000267 if (!IS_ERR(flo))
268 fle->object = flo;
269 else
270 fle->genid--;
271 } else {
YOSHIFUJI Hideaki / 吉藤英明8fbcec22013-01-22 06:32:44 +0000272 if (!IS_ERR_OR_NULL(flo))
Timo Teräsfe1a5f02010-04-07 00:30:04 +0000273 flo->ops->delete(flo);
274 }
275ret_object:
276 local_bh_enable();
277 return flo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000279EXPORT_SYMBOL(flow_cache_lookup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281static void flow_cache_flush_tasklet(unsigned long data)
282{
283 struct flow_flush_info *info = (void *)data;
Timo Teräsd7997fe2010-03-31 00:17:06 +0000284 struct flow_cache *fc = info->cache;
285 struct flow_cache_percpu *fcp;
Timo Teräs8e4795602010-04-07 00:30:07 +0000286 struct flow_cache_entry *fle;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800287 struct hlist_node *tmp;
Timo Teräs8e4795602010-04-07 00:30:07 +0000288 LIST_HEAD(gc_list);
289 int i, deleted = 0;
Fan Duca925cf2014-01-18 09:55:27 +0800290 struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
291 flow_cache_global);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Eric Dumazet7a9b2d52010-06-24 00:52:37 +0000293 fcp = this_cpu_ptr(fc->percpu);
Timo Teräsd7997fe2010-03-31 00:17:06 +0000294 for (i = 0; i < flow_cache_hash_size(fc); i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800295 hlist_for_each_entry_safe(fle, tmp,
Timo Teräs8e4795602010-04-07 00:30:07 +0000296 &fcp->hash_table[i], u.hlist) {
Fan Duca925cf2014-01-18 09:55:27 +0800297 if (flow_entry_valid(fle, xfrm))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 continue;
299
Timo Teräs8e4795602010-04-07 00:30:07 +0000300 deleted++;
301 hlist_del(&fle->u.hlist);
302 list_add_tail(&fle->u.gc_list, &gc_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 }
304 }
305
Fan Duca925cf2014-01-18 09:55:27 +0800306 flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
Timo Teräs8e4795602010-04-07 00:30:07 +0000307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 if (atomic_dec_and_test(&info->cpuleft))
309 complete(&info->completion);
310}
311
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000312/*
313 * Return whether a cpu needs flushing. Conservatively, we assume
314 * the presence of any entries means the core may require flushing,
315 * since the flow_cache_ops.check() function may assume it's running
316 * on the same core as the per-cpu cache component.
317 */
318static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
319{
320 struct flow_cache_percpu *fcp;
321 int i;
322
Li RongQing27815032013-03-28 02:24:11 +0000323 fcp = per_cpu_ptr(fc->percpu, cpu);
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000324 for (i = 0; i < flow_cache_hash_size(fc); i++)
325 if (!hlist_empty(&fcp->hash_table[i]))
326 return 0;
327 return 1;
328}
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330static void flow_cache_flush_per_cpu(void *data)
331{
332 struct flow_flush_info *info = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 struct tasklet_struct *tasklet;
334
Li RongQing50eab052013-03-27 23:42:41 +0000335 tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 tasklet->data = (unsigned long)info;
337 tasklet_schedule(tasklet);
338}
339
Fan Duca925cf2014-01-18 09:55:27 +0800340void flow_cache_flush(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
342 struct flow_flush_info info;
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000343 cpumask_var_t mask;
344 int i, self;
345
346 /* Track which cpus need flushing to avoid disturbing all cores. */
347 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
348 return;
349 cpumask_clear(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
351 /* Don't want cpus going down or up during this. */
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100352 get_online_cpus();
Fan Duca925cf2014-01-18 09:55:27 +0800353 mutex_lock(&net->xfrm.flow_flush_sem);
354 info.cache = &net->xfrm.flow_cache_global;
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000355 for_each_online_cpu(i)
356 if (!flow_cache_percpu_empty(info.cache, i))
357 cpumask_set_cpu(i, mask);
358 atomic_set(&info.cpuleft, cpumask_weight(mask));
359 if (atomic_read(&info.cpuleft) == 0)
360 goto done;
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 init_completion(&info.completion);
363
364 local_bh_disable();
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000365 self = cpumask_test_and_clear_cpu(smp_processor_id(), mask);
366 on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0);
367 if (self)
368 flow_cache_flush_tasklet((unsigned long)&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 local_bh_enable();
370
371 wait_for_completion(&info.completion);
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000372
373done:
Fan Duca925cf2014-01-18 09:55:27 +0800374 mutex_unlock(&net->xfrm.flow_flush_sem);
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100375 put_online_cpus();
Chris Metcalf8fdc9292013-03-19 11:35:58 +0000376 free_cpumask_var(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Steffen Klassertc0ed1c12011-12-21 16:48:08 -0500379static void flow_cache_flush_task(struct work_struct *work)
380{
Fan Duca925cf2014-01-18 09:55:27 +0800381 struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
Miroslav Urbanek233c96f2015-02-05 16:36:50 +0100382 flow_cache_flush_work);
Fan Duca925cf2014-01-18 09:55:27 +0800383 struct net *net = container_of(xfrm, struct net, xfrm);
384
385 flow_cache_flush(net);
Steffen Klassertc0ed1c12011-12-21 16:48:08 -0500386}
387
Fan Duca925cf2014-01-18 09:55:27 +0800388void flow_cache_flush_deferred(struct net *net)
Steffen Klassertc0ed1c12011-12-21 16:48:08 -0500389{
Fan Duca925cf2014-01-18 09:55:27 +0800390 schedule_work(&net->xfrm.flow_cache_flush_work);
Steffen Klassertc0ed1c12011-12-21 16:48:08 -0500391}
392
Paul Gortmaker013dbb32013-06-19 14:32:33 -0400393static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394{
Eric Dumazet83b6b1f2010-09-10 07:00:25 +0000395 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
396 size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
Eric Dumazet83b6b1f2010-09-10 07:00:25 +0000398 if (!fcp->hash_table) {
399 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
400 if (!fcp->hash_table) {
401 pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
402 return -ENOMEM;
403 }
404 fcp->hash_rnd_recalc = 1;
405 fcp->hash_count = 0;
406 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
407 }
408 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409}
410
Paul Gortmaker013dbb32013-06-19 14:32:33 -0400411static int flow_cache_cpu(struct notifier_block *nfb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 unsigned long action,
413 void *hcpu)
414{
Fan Duca925cf2014-01-18 09:55:27 +0800415 struct flow_cache *fc = container_of(nfb, struct flow_cache,
416 hotcpu_notifier);
Eric Dumazet83b6b1f2010-09-10 07:00:25 +0000417 int res, cpu = (unsigned long) hcpu;
Timo Teräsd7997fe2010-03-31 00:17:06 +0000418 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
419
Eric Dumazet83b6b1f2010-09-10 07:00:25 +0000420 switch (action) {
421 case CPU_UP_PREPARE:
422 case CPU_UP_PREPARE_FROZEN:
423 res = flow_cache_cpu_prepare(fc, cpu);
424 if (res)
425 return notifier_from_errno(res);
426 break;
427 case CPU_DEAD:
428 case CPU_DEAD_FROZEN:
Timo Teräsd7997fe2010-03-31 00:17:06 +0000429 __flow_cache_shrink(fc, fcp, 0);
Eric Dumazet83b6b1f2010-09-10 07:00:25 +0000430 break;
431 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 return NOTIFY_OK;
433}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Fan Duca925cf2014-01-18 09:55:27 +0800435int flow_cache_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436{
437 int i;
Fan Duca925cf2014-01-18 09:55:27 +0800438 struct flow_cache *fc = &net->xfrm.flow_cache_global;
439
Eric Dumazetd32d9bb2014-03-10 07:09:07 -0700440 if (!flow_cachep)
441 flow_cachep = kmem_cache_create("flow_cache",
442 sizeof(struct flow_cache_entry),
443 0, SLAB_PANIC, NULL);
Fan Duca925cf2014-01-18 09:55:27 +0800444 spin_lock_init(&net->xfrm.flow_cache_gc_lock);
445 INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list);
446 INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
447 INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
448 mutex_init(&net->xfrm.flow_flush_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Timo Teräsd7997fe2010-03-31 00:17:06 +0000450 fc->hash_shift = 10;
451 fc->low_watermark = 2 * flow_cache_hash_size(fc);
452 fc->high_watermark = 4 * flow_cache_hash_size(fc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
Timo Teräsd7997fe2010-03-31 00:17:06 +0000454 fc->percpu = alloc_percpu(struct flow_cache_percpu);
Eric Dumazet83b6b1f2010-09-10 07:00:25 +0000455 if (!fc->percpu)
456 return -ENOMEM;
457
Srivatsa S. Bhate30a2932014-03-11 02:12:51 +0530458 cpu_notifier_register_begin();
459
Eric Dumazet83b6b1f2010-09-10 07:00:25 +0000460 for_each_online_cpu(i) {
461 if (flow_cache_cpu_prepare(fc, i))
huajun li6ccc3ab2011-09-27 22:51:39 +0000462 goto err;
Eric Dumazet83b6b1f2010-09-10 07:00:25 +0000463 }
464 fc->hotcpu_notifier = (struct notifier_block){
465 .notifier_call = flow_cache_cpu,
466 };
Srivatsa S. Bhate30a2932014-03-11 02:12:51 +0530467 __register_hotcpu_notifier(&fc->hotcpu_notifier);
468
469 cpu_notifier_register_done();
Timo Teräsd7997fe2010-03-31 00:17:06 +0000470
471 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
472 (unsigned long) fc);
473 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
474 add_timer(&fc->rnd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 return 0;
huajun li6ccc3ab2011-09-27 22:51:39 +0000477
478err:
479 for_each_possible_cpu(i) {
480 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
481 kfree(fcp->hash_table);
482 fcp->hash_table = NULL;
483 }
484
Srivatsa S. Bhate30a2932014-03-11 02:12:51 +0530485 cpu_notifier_register_done();
486
huajun li6ccc3ab2011-09-27 22:51:39 +0000487 free_percpu(fc->percpu);
488 fc->percpu = NULL;
489
490 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491}
Fan Duca925cf2014-01-18 09:55:27 +0800492EXPORT_SYMBOL(flow_cache_init);
Steffen Klassert4a93f502014-03-12 09:43:17 +0100493
494void flow_cache_fini(struct net *net)
495{
496 int i;
497 struct flow_cache *fc = &net->xfrm.flow_cache_global;
498
499 del_timer_sync(&fc->rnd_timer);
500 unregister_hotcpu_notifier(&fc->hotcpu_notifier);
501
502 for_each_possible_cpu(i) {
503 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
504 kfree(fcp->hash_table);
505 fcp->hash_table = NULL;
506 }
507
508 free_percpu(fc->percpu);
509 fc->percpu = NULL;
510}
511EXPORT_SYMBOL(flow_cache_fini);