blob: d22d8e948d0f4b126894a71f8dcc30bc7d6d024d [file] [log] [blame]
Pravin B Shelare6445712013-10-03 18:16:47 -07001/*
Pravin B Shelar9b996e52014-05-06 18:41:20 -07002 * Copyright (c) 2007-2014 Nicira, Inc.
Pravin B Shelare6445712013-10-03 18:16:47 -07003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#include "flow.h"
20#include "datapath.h"
Thomas Graf34ae9322015-07-21 10:44:03 +020021#include "flow_netlink.h"
Pravin B Shelare6445712013-10-03 18:16:47 -070022#include <linux/uaccess.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/if_ether.h>
26#include <linux/if_vlan.h>
27#include <net/llc_pdu.h>
28#include <linux/kernel.h>
Daniel Borkmann87545892014-12-10 16:33:11 +010029#include <linux/jhash.h>
Pravin B Shelare6445712013-10-03 18:16:47 -070030#include <linux/jiffies.h>
31#include <linux/llc.h>
32#include <linux/module.h>
33#include <linux/in.h>
34#include <linux/rcupdate.h>
35#include <linux/if_arp.h>
36#include <linux/ip.h>
37#include <linux/ipv6.h>
38#include <linux/sctp.h>
39#include <linux/tcp.h>
40#include <linux/udp.h>
41#include <linux/icmp.h>
42#include <linux/icmpv6.h>
43#include <linux/rculist.h>
44#include <net/ip.h>
45#include <net/ipv6.h>
46#include <net/ndisc.h>
47
Pravin B Shelarb637e492013-10-04 00:14:23 -070048#define TBL_MIN_BUCKETS 1024
49#define REHASH_INTERVAL (10 * 60 * HZ)
50
Pravin B Shelare6445712013-10-03 18:16:47 -070051static struct kmem_cache *flow_cache;
Jarno Rajahalme63e79592014-03-27 12:42:54 -070052struct kmem_cache *flow_stats_cache __read_mostly;
Pravin B Shelare6445712013-10-03 18:16:47 -070053
54static u16 range_n_bytes(const struct sw_flow_key_range *range)
55{
56 return range->end - range->start;
57}
58
59void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
60 const struct sw_flow_mask *mask)
61{
Daniele Di Proietto70851302014-01-23 10:56:49 -080062 const long *m = (const long *)((const u8 *)&mask->key +
63 mask->range.start);
64 const long *s = (const long *)((const u8 *)src +
65 mask->range.start);
Pravin B Shelare6445712013-10-03 18:16:47 -070066 long *d = (long *)((u8 *)dst + mask->range.start);
67 int i;
68
69 /* The memory outside of the 'mask->range' are not set since
70 * further operations on 'dst' only uses contents within
71 * 'mask->range'.
72 */
73 for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
74 *d++ = *s++ & *m++;
75}
76
Jarno Rajahalme23dabf82014-03-27 12:35:23 -070077struct sw_flow *ovs_flow_alloc(void)
Pravin B Shelare6445712013-10-03 18:16:47 -070078{
79 struct sw_flow *flow;
Jarno Rajahalme63e79592014-03-27 12:42:54 -070080 struct flow_stats *stats;
81 int node;
Pravin B Shelare6445712013-10-03 18:16:47 -070082
83 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
84 if (!flow)
85 return ERR_PTR(-ENOMEM);
86
Pravin B Shelare6445712013-10-03 18:16:47 -070087 flow->sf_acts = NULL;
88 flow->mask = NULL;
Pravin B Shelarca539342015-02-06 11:17:13 -080089 flow->id.unmasked_key = NULL;
90 flow->id.ufid_len = 0;
Jarno Rajahalme63e79592014-03-27 12:42:54 -070091 flow->stats_last_writer = NUMA_NO_NODE;
Pravin B Shelare6445712013-10-03 18:16:47 -070092
Jarno Rajahalme63e79592014-03-27 12:42:54 -070093 /* Initialize the default stat node. */
94 stats = kmem_cache_alloc_node(flow_stats_cache,
95 GFP_KERNEL | __GFP_ZERO, 0);
96 if (!stats)
Jarno Rajahalme23dabf82014-03-27 12:35:23 -070097 goto err;
Pravin B Shelare298e502013-10-29 17:22:21 -070098
Jarno Rajahalme63e79592014-03-27 12:42:54 -070099 spin_lock_init(&stats->lock);
Pravin B Shelare298e502013-10-29 17:22:21 -0700100
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700101 RCU_INIT_POINTER(flow->stats[0], stats);
102
103 for_each_node(node)
104 if (node != 0)
105 RCU_INIT_POINTER(flow->stats[node], NULL);
106
Pravin B Shelare6445712013-10-03 18:16:47 -0700107 return flow;
Pravin B Shelare298e502013-10-29 17:22:21 -0700108err:
Wei Yongjunece37c82014-01-08 18:13:14 +0800109 kmem_cache_free(flow_cache, flow);
Pravin B Shelare298e502013-10-29 17:22:21 -0700110 return ERR_PTR(-ENOMEM);
Pravin B Shelare6445712013-10-03 18:16:47 -0700111}
112
Thomas Graf12eb18f2014-11-06 06:58:52 -0800113int ovs_flow_tbl_count(const struct flow_table *table)
Pravin B Shelarb637e492013-10-04 00:14:23 -0700114{
115 return table->count;
116}
117
Pravin B Shelare6445712013-10-03 18:16:47 -0700118static struct flex_array *alloc_buckets(unsigned int n_buckets)
119{
120 struct flex_array *buckets;
121 int i, err;
122
123 buckets = flex_array_alloc(sizeof(struct hlist_head),
124 n_buckets, GFP_KERNEL);
125 if (!buckets)
126 return NULL;
127
128 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
129 if (err) {
130 flex_array_free(buckets);
131 return NULL;
132 }
133
134 for (i = 0; i < n_buckets; i++)
135 INIT_HLIST_HEAD((struct hlist_head *)
136 flex_array_get(buckets, i));
137
138 return buckets;
139}
140
141static void flow_free(struct sw_flow *flow)
142{
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700143 int node;
144
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800145 if (ovs_identifier_is_key(&flow->id))
146 kfree(flow->id.unmasked_key);
Thomas Graf34ae9322015-07-21 10:44:03 +0200147 if (flow->sf_acts)
148 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700149 for_each_node(node)
150 if (flow->stats[node])
151 kmem_cache_free(flow_stats_cache,
152 (struct flow_stats __force *)flow->stats[node]);
Pravin B Shelare6445712013-10-03 18:16:47 -0700153 kmem_cache_free(flow_cache, flow);
154}
155
156static void rcu_free_flow_callback(struct rcu_head *rcu)
157{
158 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
159
160 flow_free(flow);
161}
162
163void ovs_flow_free(struct sw_flow *flow, bool deferred)
164{
165 if (!flow)
166 return;
167
Pravin B Shelare6445712013-10-03 18:16:47 -0700168 if (deferred)
169 call_rcu(&flow->rcu, rcu_free_flow_callback);
170 else
171 flow_free(flow);
172}
173
174static void free_buckets(struct flex_array *buckets)
175{
176 flex_array_free(buckets);
177}
178
Andy Zhoue80857c2014-01-21 09:31:04 -0800179
Pravin B Shelarb637e492013-10-04 00:14:23 -0700180static void __table_instance_destroy(struct table_instance *ti)
Pravin B Shelare6445712013-10-03 18:16:47 -0700181{
Pravin B Shelarb637e492013-10-04 00:14:23 -0700182 free_buckets(ti->buckets);
183 kfree(ti);
Pravin B Shelare6445712013-10-03 18:16:47 -0700184}
185
Pravin B Shelarb637e492013-10-04 00:14:23 -0700186static struct table_instance *table_instance_alloc(int new_size)
Pravin B Shelare6445712013-10-03 18:16:47 -0700187{
Pravin B Shelarb637e492013-10-04 00:14:23 -0700188 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
Pravin B Shelare6445712013-10-03 18:16:47 -0700189
Pravin B Shelarb637e492013-10-04 00:14:23 -0700190 if (!ti)
Pravin B Shelare6445712013-10-03 18:16:47 -0700191 return NULL;
192
Pravin B Shelarb637e492013-10-04 00:14:23 -0700193 ti->buckets = alloc_buckets(new_size);
Pravin B Shelare6445712013-10-03 18:16:47 -0700194
Pravin B Shelarb637e492013-10-04 00:14:23 -0700195 if (!ti->buckets) {
196 kfree(ti);
Pravin B Shelare6445712013-10-03 18:16:47 -0700197 return NULL;
198 }
Pravin B Shelarb637e492013-10-04 00:14:23 -0700199 ti->n_buckets = new_size;
200 ti->node_ver = 0;
201 ti->keep_flows = false;
202 get_random_bytes(&ti->hash_seed, sizeof(u32));
203
204 return ti;
205}
206
207int ovs_flow_tbl_init(struct flow_table *table)
208{
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800209 struct table_instance *ti, *ufid_ti;
Pravin B Shelarb637e492013-10-04 00:14:23 -0700210
211 ti = table_instance_alloc(TBL_MIN_BUCKETS);
212
213 if (!ti)
214 return -ENOMEM;
215
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800216 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
217 if (!ufid_ti)
218 goto free_ti;
219
Pravin B Shelarb637e492013-10-04 00:14:23 -0700220 rcu_assign_pointer(table->ti, ti);
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800221 rcu_assign_pointer(table->ufid_ti, ufid_ti);
Pravin B Shelarb637e492013-10-04 00:14:23 -0700222 INIT_LIST_HEAD(&table->mask_list);
223 table->last_rehash = jiffies;
Pravin B Shelare6445712013-10-03 18:16:47 -0700224 table->count = 0;
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800225 table->ufid_count = 0;
Pravin B Shelarb637e492013-10-04 00:14:23 -0700226 return 0;
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800227
228free_ti:
229 __table_instance_destroy(ti);
230 return -ENOMEM;
Pravin B Shelare6445712013-10-03 18:16:47 -0700231}
232
233static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
234{
Pravin B Shelarb637e492013-10-04 00:14:23 -0700235 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
Pravin B Shelare6445712013-10-03 18:16:47 -0700236
Pravin B Shelarb637e492013-10-04 00:14:23 -0700237 __table_instance_destroy(ti);
238}
239
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800240static void table_instance_destroy(struct table_instance *ti,
241 struct table_instance *ufid_ti,
242 bool deferred)
Pravin B Shelarb637e492013-10-04 00:14:23 -0700243{
Andy Zhoue80857c2014-01-21 09:31:04 -0800244 int i;
245
Pravin B Shelarb637e492013-10-04 00:14:23 -0700246 if (!ti)
247 return;
248
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800249 BUG_ON(!ufid_ti);
Andy Zhoue80857c2014-01-21 09:31:04 -0800250 if (ti->keep_flows)
251 goto skip_flows;
252
253 for (i = 0; i < ti->n_buckets; i++) {
254 struct sw_flow *flow;
255 struct hlist_head *head = flex_array_get(ti->buckets, i);
256 struct hlist_node *n;
257 int ver = ti->node_ver;
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800258 int ufid_ver = ufid_ti->node_ver;
Andy Zhoue80857c2014-01-21 09:31:04 -0800259
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800260 hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
261 hlist_del_rcu(&flow->flow_table.node[ver]);
262 if (ovs_identifier_is_ufid(&flow->id))
263 hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
Andy Zhoue80857c2014-01-21 09:31:04 -0800264 ovs_flow_free(flow, deferred);
265 }
266 }
267
268skip_flows:
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800269 if (deferred) {
Pravin B Shelarb637e492013-10-04 00:14:23 -0700270 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800271 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
272 } else {
Pravin B Shelarb637e492013-10-04 00:14:23 -0700273 __table_instance_destroy(ti);
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800274 __table_instance_destroy(ufid_ti);
275 }
Pravin B Shelare6445712013-10-03 18:16:47 -0700276}
277
Pravin B Shelar9b996e52014-05-06 18:41:20 -0700278/* No need for locking this function is called from RCU callback or
279 * error path.
280 */
281void ovs_flow_tbl_destroy(struct flow_table *table)
Pravin B Shelare6445712013-10-03 18:16:47 -0700282{
Pravin B Shelar9b996e52014-05-06 18:41:20 -0700283 struct table_instance *ti = rcu_dereference_raw(table->ti);
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800284 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
Pravin B Shelare6445712013-10-03 18:16:47 -0700285
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800286 table_instance_destroy(ti, ufid_ti, false);
Pravin B Shelare6445712013-10-03 18:16:47 -0700287}
288
Pravin B Shelarb637e492013-10-04 00:14:23 -0700289struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
Pravin B Shelare6445712013-10-03 18:16:47 -0700290 u32 *bucket, u32 *last)
291{
292 struct sw_flow *flow;
293 struct hlist_head *head;
294 int ver;
295 int i;
296
Pravin B Shelarb637e492013-10-04 00:14:23 -0700297 ver = ti->node_ver;
298 while (*bucket < ti->n_buckets) {
Pravin B Shelare6445712013-10-03 18:16:47 -0700299 i = 0;
Pravin B Shelarb637e492013-10-04 00:14:23 -0700300 head = flex_array_get(ti->buckets, *bucket);
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800301 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
Pravin B Shelare6445712013-10-03 18:16:47 -0700302 if (i < *last) {
303 i++;
304 continue;
305 }
306 *last = i + 1;
307 return flow;
308 }
309 (*bucket)++;
310 *last = 0;
311 }
312
313 return NULL;
314}
315
Pravin B Shelarb637e492013-10-04 00:14:23 -0700316static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
Pravin B Shelare6445712013-10-03 18:16:47 -0700317{
Pravin B Shelarb637e492013-10-04 00:14:23 -0700318 hash = jhash_1word(hash, ti->hash_seed);
319 return flex_array_get(ti->buckets,
320 (hash & (ti->n_buckets - 1)));
Pravin B Shelare6445712013-10-03 18:16:47 -0700321}
322
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800323static void table_instance_insert(struct table_instance *ti,
324 struct sw_flow *flow)
Pravin B Shelare6445712013-10-03 18:16:47 -0700325{
326 struct hlist_head *head;
327
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800328 head = find_bucket(ti, flow->flow_table.hash);
329 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
330}
331
332static void ufid_table_instance_insert(struct table_instance *ti,
333 struct sw_flow *flow)
334{
335 struct hlist_head *head;
336
337 head = find_bucket(ti, flow->ufid_table.hash);
338 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
Pravin B Shelare6445712013-10-03 18:16:47 -0700339}
340
Pravin B Shelarb637e492013-10-04 00:14:23 -0700341static void flow_table_copy_flows(struct table_instance *old,
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800342 struct table_instance *new, bool ufid)
Pravin B Shelare6445712013-10-03 18:16:47 -0700343{
344 int old_ver;
345 int i;
346
347 old_ver = old->node_ver;
348 new->node_ver = !old_ver;
349
350 /* Insert in new table. */
351 for (i = 0; i < old->n_buckets; i++) {
352 struct sw_flow *flow;
353 struct hlist_head *head;
354
355 head = flex_array_get(old->buckets, i);
356
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800357 if (ufid)
358 hlist_for_each_entry(flow, head,
359 ufid_table.node[old_ver])
360 ufid_table_instance_insert(new, flow);
361 else
362 hlist_for_each_entry(flow, head,
363 flow_table.node[old_ver])
364 table_instance_insert(new, flow);
Pravin B Shelare6445712013-10-03 18:16:47 -0700365 }
366
Pravin B Shelare6445712013-10-03 18:16:47 -0700367 old->keep_flows = true;
368}
369
Pravin B Shelarb637e492013-10-04 00:14:23 -0700370static struct table_instance *table_instance_rehash(struct table_instance *ti,
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800371 int n_buckets, bool ufid)
Pravin B Shelare6445712013-10-03 18:16:47 -0700372{
Pravin B Shelarb637e492013-10-04 00:14:23 -0700373 struct table_instance *new_ti;
Pravin B Shelare6445712013-10-03 18:16:47 -0700374
Pravin B Shelarb637e492013-10-04 00:14:23 -0700375 new_ti = table_instance_alloc(n_buckets);
376 if (!new_ti)
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700377 return NULL;
Pravin B Shelare6445712013-10-03 18:16:47 -0700378
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800379 flow_table_copy_flows(ti, new_ti, ufid);
Pravin B Shelare6445712013-10-03 18:16:47 -0700380
Pravin B Shelarb637e492013-10-04 00:14:23 -0700381 return new_ti;
Pravin B Shelare6445712013-10-03 18:16:47 -0700382}
383
Pravin B Shelarb637e492013-10-04 00:14:23 -0700384int ovs_flow_tbl_flush(struct flow_table *flow_table)
Pravin B Shelare6445712013-10-03 18:16:47 -0700385{
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800386 struct table_instance *old_ti, *new_ti;
387 struct table_instance *old_ufid_ti, *new_ufid_ti;
Pravin B Shelare6445712013-10-03 18:16:47 -0700388
Pravin B Shelarb637e492013-10-04 00:14:23 -0700389 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
390 if (!new_ti)
391 return -ENOMEM;
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800392 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
393 if (!new_ufid_ti)
394 goto err_free_ti;
395
396 old_ti = ovsl_dereference(flow_table->ti);
397 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
Pravin B Shelarb637e492013-10-04 00:14:23 -0700398
399 rcu_assign_pointer(flow_table->ti, new_ti);
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800400 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
Pravin B Shelarb637e492013-10-04 00:14:23 -0700401 flow_table->last_rehash = jiffies;
402 flow_table->count = 0;
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800403 flow_table->ufid_count = 0;
Pravin B Shelarb637e492013-10-04 00:14:23 -0700404
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800405 table_instance_destroy(old_ti, old_ufid_ti, true);
Pravin B Shelarb637e492013-10-04 00:14:23 -0700406 return 0;
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800407
408err_free_ti:
409 __table_instance_destroy(new_ti);
410 return -ENOMEM;
Pravin B Shelare6445712013-10-03 18:16:47 -0700411}
412
Joe Stringer272c2cf2015-01-21 16:42:50 -0800413static u32 flow_hash(const struct sw_flow_key *key,
414 const struct sw_flow_key_range *range)
Pravin B Shelare6445712013-10-03 18:16:47 -0700415{
Joe Stringer272c2cf2015-01-21 16:42:50 -0800416 int key_start = range->start;
417 int key_end = range->end;
Daniele Di Proietto70851302014-01-23 10:56:49 -0800418 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
Pravin B Shelare6445712013-10-03 18:16:47 -0700419 int hash_u32s = (key_end - key_start) >> 2;
420
421 /* Make sure number of hash bytes are multiple of u32. */
422 BUILD_BUG_ON(sizeof(long) % sizeof(u32));
423
Daniel Borkmann87545892014-12-10 16:33:11 +0100424 return jhash2(hash_key, hash_u32s, 0);
Pravin B Shelare6445712013-10-03 18:16:47 -0700425}
426
427static int flow_key_start(const struct sw_flow_key *key)
428{
Jiri Bencc1ea5d62015-08-20 13:56:23 +0200429 if (key->tun_key.u.ipv4.dst)
Pravin B Shelare6445712013-10-03 18:16:47 -0700430 return 0;
431 else
432 return rounddown(offsetof(struct sw_flow_key, phy),
433 sizeof(long));
434}
435
436static bool cmp_key(const struct sw_flow_key *key1,
437 const struct sw_flow_key *key2,
438 int key_start, int key_end)
439{
Daniele Di Proietto70851302014-01-23 10:56:49 -0800440 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
441 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
Pravin B Shelare6445712013-10-03 18:16:47 -0700442 long diffs = 0;
443 int i;
444
445 for (i = key_start; i < key_end; i += sizeof(long))
446 diffs |= *cp1++ ^ *cp2++;
447
448 return diffs == 0;
449}
450
451static bool flow_cmp_masked_key(const struct sw_flow *flow,
452 const struct sw_flow_key *key,
Joe Stringer272c2cf2015-01-21 16:42:50 -0800453 const struct sw_flow_key_range *range)
Pravin B Shelare6445712013-10-03 18:16:47 -0700454{
Joe Stringer272c2cf2015-01-21 16:42:50 -0800455 return cmp_key(&flow->key, key, range->start, range->end);
Pravin B Shelare6445712013-10-03 18:16:47 -0700456}
457
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800458static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
459 const struct sw_flow_match *match)
Pravin B Shelare6445712013-10-03 18:16:47 -0700460{
461 struct sw_flow_key *key = match->key;
462 int key_start = flow_key_start(key);
463 int key_end = match->range.end;
464
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800465 BUG_ON(ovs_identifier_is_ufid(&flow->id));
466 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
Pravin B Shelare6445712013-10-03 18:16:47 -0700467}
468
Pravin B Shelarb637e492013-10-04 00:14:23 -0700469static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
Pravin B Shelare6445712013-10-03 18:16:47 -0700470 const struct sw_flow_key *unmasked,
Thomas Graf12eb18f2014-11-06 06:58:52 -0800471 const struct sw_flow_mask *mask)
Pravin B Shelare6445712013-10-03 18:16:47 -0700472{
473 struct sw_flow *flow;
474 struct hlist_head *head;
Pravin B Shelare6445712013-10-03 18:16:47 -0700475 u32 hash;
476 struct sw_flow_key masked_key;
477
478 ovs_flow_mask_key(&masked_key, unmasked, mask);
Joe Stringer272c2cf2015-01-21 16:42:50 -0800479 hash = flow_hash(&masked_key, &mask->range);
Pravin B Shelarb637e492013-10-04 00:14:23 -0700480 head = find_bucket(ti, hash);
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800481 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
482 if (flow->mask == mask && flow->flow_table.hash == hash &&
Joe Stringer272c2cf2015-01-21 16:42:50 -0800483 flow_cmp_masked_key(flow, &masked_key, &mask->range))
Pravin B Shelare6445712013-10-03 18:16:47 -0700484 return flow;
485 }
486 return NULL;
487}
488
Andy Zhou5bb50632013-11-25 10:42:46 -0800489struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
Andy Zhou1bd71162013-10-22 10:42:46 -0700490 const struct sw_flow_key *key,
491 u32 *n_mask_hit)
Pravin B Shelare6445712013-10-03 18:16:47 -0700492{
Jesse Gross663efa32013-12-03 10:58:53 -0800493 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
Pravin B Shelare6445712013-10-03 18:16:47 -0700494 struct sw_flow_mask *mask;
Pravin B Shelarb637e492013-10-04 00:14:23 -0700495 struct sw_flow *flow;
Pravin B Shelare6445712013-10-03 18:16:47 -0700496
Andy Zhou1bd71162013-10-22 10:42:46 -0700497 *n_mask_hit = 0;
Pravin B Shelarb637e492013-10-04 00:14:23 -0700498 list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
Andy Zhou1bd71162013-10-22 10:42:46 -0700499 (*n_mask_hit)++;
Pravin B Shelarb637e492013-10-04 00:14:23 -0700500 flow = masked_flow_lookup(ti, key, mask);
Pravin B Shelare6445712013-10-03 18:16:47 -0700501 if (flow) /* Found */
Pravin B Shelarb637e492013-10-04 00:14:23 -0700502 return flow;
Pravin B Shelare6445712013-10-03 18:16:47 -0700503 }
Pravin B Shelarb637e492013-10-04 00:14:23 -0700504 return NULL;
505}
Pravin B Shelare6445712013-10-03 18:16:47 -0700506
Andy Zhou5bb50632013-11-25 10:42:46 -0800507struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
508 const struct sw_flow_key *key)
509{
510 u32 __always_unused n_mask_hit;
511
512 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
513}
514
Alex Wang4a46b242014-06-30 20:30:29 -0700515struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
Thomas Graf12eb18f2014-11-06 06:58:52 -0800516 const struct sw_flow_match *match)
Alex Wang4a46b242014-06-30 20:30:29 -0700517{
518 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
519 struct sw_flow_mask *mask;
520 struct sw_flow *flow;
521
522 /* Always called under ovs-mutex. */
523 list_for_each_entry(mask, &tbl->mask_list, list) {
524 flow = masked_flow_lookup(ti, match->key, mask);
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800525 if (flow && ovs_identifier_is_key(&flow->id) &&
526 ovs_flow_cmp_unmasked_key(flow, match))
527 return flow;
528 }
529 return NULL;
530}
531
532static u32 ufid_hash(const struct sw_flow_id *sfid)
533{
534 return jhash(sfid->ufid, sfid->ufid_len, 0);
535}
536
537static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
538 const struct sw_flow_id *sfid)
539{
540 if (flow->id.ufid_len != sfid->ufid_len)
541 return false;
542
543 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
544}
545
546bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
547{
548 if (ovs_identifier_is_ufid(&flow->id))
549 return flow_cmp_masked_key(flow, match->key, &match->range);
550
551 return ovs_flow_cmp_unmasked_key(flow, match);
552}
553
554struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
555 const struct sw_flow_id *ufid)
556{
557 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
558 struct sw_flow *flow;
559 struct hlist_head *head;
560 u32 hash;
561
562 hash = ufid_hash(ufid);
563 head = find_bucket(ti, hash);
564 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
565 if (flow->ufid_table.hash == hash &&
566 ovs_flow_cmp_ufid(flow, ufid))
Alex Wang4a46b242014-06-30 20:30:29 -0700567 return flow;
568 }
569 return NULL;
570}
571
Andy Zhou1bd71162013-10-22 10:42:46 -0700572int ovs_flow_tbl_num_masks(const struct flow_table *table)
573{
574 struct sw_flow_mask *mask;
575 int num = 0;
576
577 list_for_each_entry(mask, &table->mask_list, list)
578 num++;
579
580 return num;
581}
582
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800583static struct table_instance *table_instance_expand(struct table_instance *ti,
584 bool ufid)
Pravin B Shelarb637e492013-10-04 00:14:23 -0700585{
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800586 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
Pravin B Shelare6445712013-10-03 18:16:47 -0700587}
588
Jarno Rajahalme56c19862014-05-05 13:24:53 -0700589/* Remove 'mask' from the mask list, if it is not needed any more. */
590static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
591{
592 if (mask) {
593 /* ovs-lock is required to protect mask-refcount and
594 * mask list.
595 */
596 ASSERT_OVSL();
597 BUG_ON(!mask->ref_count);
598 mask->ref_count--;
599
600 if (!mask->ref_count) {
601 list_del_rcu(&mask->list);
602 kfree_rcu(mask, rcu);
603 }
604 }
605}
606
607/* Must be called with OVS mutex held. */
Pravin B Shelare6445712013-10-03 18:16:47 -0700608void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
609{
Pravin B Shelarb637e492013-10-04 00:14:23 -0700610 struct table_instance *ti = ovsl_dereference(table->ti);
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800611 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
Pravin B Shelarb637e492013-10-04 00:14:23 -0700612
Pravin B Shelare6445712013-10-03 18:16:47 -0700613 BUG_ON(table->count == 0);
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800614 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
Pravin B Shelare6445712013-10-03 18:16:47 -0700615 table->count--;
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800616 if (ovs_identifier_is_ufid(&flow->id)) {
617 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
618 table->ufid_count--;
619 }
Jarno Rajahalme56c19862014-05-05 13:24:53 -0700620
621 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
622 * accessible as long as the RCU read lock is held.
623 */
624 flow_mask_remove(table, flow->mask);
Pravin B Shelare6445712013-10-03 18:16:47 -0700625}
626
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700627static struct sw_flow_mask *mask_alloc(void)
Pravin B Shelare6445712013-10-03 18:16:47 -0700628{
629 struct sw_flow_mask *mask;
630
631 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
632 if (mask)
Andy Zhoue80857c2014-01-21 09:31:04 -0800633 mask->ref_count = 1;
Pravin B Shelare6445712013-10-03 18:16:47 -0700634
635 return mask;
636}
637
Pravin B Shelare6445712013-10-03 18:16:47 -0700638static bool mask_equal(const struct sw_flow_mask *a,
639 const struct sw_flow_mask *b)
640{
Daniele Di Proietto70851302014-01-23 10:56:49 -0800641 const u8 *a_ = (const u8 *)&a->key + a->range.start;
642 const u8 *b_ = (const u8 *)&b->key + b->range.start;
Pravin B Shelare6445712013-10-03 18:16:47 -0700643
644 return (a->range.end == b->range.end)
645 && (a->range.start == b->range.start)
646 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
647}
648
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700649static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
Pravin B Shelare6445712013-10-03 18:16:47 -0700650 const struct sw_flow_mask *mask)
651{
652 struct list_head *ml;
653
Pravin B Shelarb637e492013-10-04 00:14:23 -0700654 list_for_each(ml, &tbl->mask_list) {
Pravin B Shelare6445712013-10-03 18:16:47 -0700655 struct sw_flow_mask *m;
656 m = container_of(ml, struct sw_flow_mask, list);
657 if (mask_equal(mask, m))
658 return m;
659 }
660
661 return NULL;
662}
663
Ben Pfaffd1211902013-11-25 10:40:51 -0800664/* Add 'mask' into the mask list, if it is not already there. */
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700665static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
Thomas Graf12eb18f2014-11-06 06:58:52 -0800666 const struct sw_flow_mask *new)
Pravin B Shelare6445712013-10-03 18:16:47 -0700667{
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700668 struct sw_flow_mask *mask;
669 mask = flow_mask_find(tbl, new);
670 if (!mask) {
671 /* Allocate a new mask if none exsits. */
672 mask = mask_alloc();
673 if (!mask)
674 return -ENOMEM;
675 mask->key = new->key;
676 mask->range = new->range;
677 list_add_rcu(&mask->list, &tbl->mask_list);
Andy Zhoue80857c2014-01-21 09:31:04 -0800678 } else {
679 BUG_ON(!mask->ref_count);
680 mask->ref_count++;
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700681 }
682
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700683 flow->mask = mask;
684 return 0;
685}
686
Jarno Rajahalme56c19862014-05-05 13:24:53 -0700687/* Must be called with OVS mutex held. */
Joe Stringerd29ab6f2015-01-21 16:42:49 -0800688static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700689{
690 struct table_instance *new_ti = NULL;
691 struct table_instance *ti;
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700692
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800693 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700694 ti = ovsl_dereference(table->ti);
695 table_instance_insert(ti, flow);
696 table->count++;
697
698 /* Expand table, if necessary, to make room. */
699 if (table->count > ti->n_buckets)
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800700 new_ti = table_instance_expand(ti, false);
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700701 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800702 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700703
704 if (new_ti) {
705 rcu_assign_pointer(table->ti, new_ti);
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800706 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700707 table->last_rehash = jiffies;
708 }
Joe Stringerd29ab6f2015-01-21 16:42:49 -0800709}
710
711/* Must be called with OVS mutex held. */
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800712static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
713{
714 struct table_instance *ti;
715
716 flow->ufid_table.hash = ufid_hash(&flow->id);
717 ti = ovsl_dereference(table->ufid_ti);
718 ufid_table_instance_insert(ti, flow);
719 table->ufid_count++;
720
721 /* Expand table, if necessary, to make room. */
722 if (table->ufid_count > ti->n_buckets) {
723 struct table_instance *new_ti;
724
725 new_ti = table_instance_expand(ti, true);
726 if (new_ti) {
727 rcu_assign_pointer(table->ufid_ti, new_ti);
728 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
729 }
730 }
731}
732
733/* Must be called with OVS mutex held. */
Joe Stringerd29ab6f2015-01-21 16:42:49 -0800734int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
735 const struct sw_flow_mask *mask)
736{
737 int err;
738
739 err = flow_mask_insert(table, flow, mask);
740 if (err)
741 return err;
742 flow_key_insert(table, flow);
Joe Stringer74ed7ab2015-01-21 16:42:52 -0800743 if (ovs_identifier_is_ufid(&flow->id))
744 flow_ufid_insert(table, flow);
Joe Stringerd29ab6f2015-01-21 16:42:49 -0800745
Pravin B Shelar618ed0c2013-10-04 00:17:42 -0700746 return 0;
Pravin B Shelare6445712013-10-03 18:16:47 -0700747}
748
749/* Initializes the flow module.
750 * Returns zero if successful or a negative error code. */
751int ovs_flow_init(void)
752{
753 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
754 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
755
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700756 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
Chris J Argesbac541e2015-07-21 12:36:33 -0500757 + (nr_node_ids
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700758 * sizeof(struct flow_stats *)),
759 0, 0, NULL);
Pravin B Shelare6445712013-10-03 18:16:47 -0700760 if (flow_cache == NULL)
761 return -ENOMEM;
762
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700763 flow_stats_cache
764 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
765 0, SLAB_HWCACHE_ALIGN, NULL);
766 if (flow_stats_cache == NULL) {
767 kmem_cache_destroy(flow_cache);
768 flow_cache = NULL;
769 return -ENOMEM;
770 }
771
Pravin B Shelare6445712013-10-03 18:16:47 -0700772 return 0;
773}
774
775/* Uninitializes the flow module. */
776void ovs_flow_exit(void)
777{
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700778 kmem_cache_destroy(flow_stats_cache);
Pravin B Shelare6445712013-10-03 18:16:47 -0700779 kmem_cache_destroy(flow_cache);
780}