blob: 970db7a41684aa2a494b97663f91ca932308de05 [file] [log] [blame]
Jiri Pirko77b99002015-05-12 14:56:21 +02001/*
2 * net/sched/cls_flower.c Flower classifier
3 *
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/rhashtable.h>
Daniel Borkmannd9363772016-11-27 01:18:01 +010016#include <linux/workqueue.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020017
18#include <linux/if_ether.h>
19#include <linux/in6.h>
20#include <linux/ip.h>
21
22#include <net/sch_generic.h>
23#include <net/pkt_cls.h>
24#include <net/ip.h>
25#include <net/flow_dissector.h>
26
Amir Vadaibc3103f2016-09-08 16:23:47 +030027#include <net/dst.h>
28#include <net/dst_metadata.h>
29
Jiri Pirko77b99002015-05-12 14:56:21 +020030struct fl_flow_key {
31 int indev_ifindex;
Tom Herbert42aecaa2015-06-04 09:16:39 -070032 struct flow_dissector_key_control control;
Amir Vadaibc3103f2016-09-08 16:23:47 +030033 struct flow_dissector_key_control enc_control;
Jiri Pirko77b99002015-05-12 14:56:21 +020034 struct flow_dissector_key_basic basic;
35 struct flow_dissector_key_eth_addrs eth;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +030036 struct flow_dissector_key_vlan vlan;
Jiri Pirko77b99002015-05-12 14:56:21 +020037 union {
Tom Herbertc3f83242015-06-04 09:16:40 -070038 struct flow_dissector_key_ipv4_addrs ipv4;
Jiri Pirko77b99002015-05-12 14:56:21 +020039 struct flow_dissector_key_ipv6_addrs ipv6;
40 };
41 struct flow_dissector_key_ports tp;
Simon Horman7b684882016-12-07 13:48:28 +010042 struct flow_dissector_key_icmp icmp;
Amir Vadaibc3103f2016-09-08 16:23:47 +030043 struct flow_dissector_key_keyid enc_key_id;
44 union {
45 struct flow_dissector_key_ipv4_addrs enc_ipv4;
46 struct flow_dissector_key_ipv6_addrs enc_ipv6;
47 };
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +020048 struct flow_dissector_key_ports enc_tp;
Jiri Pirko77b99002015-05-12 14:56:21 +020049} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
50
51struct fl_flow_mask_range {
52 unsigned short int start;
53 unsigned short int end;
54};
55
56struct fl_flow_mask {
57 struct fl_flow_key key;
58 struct fl_flow_mask_range range;
59 struct rcu_head rcu;
60};
61
62struct cls_fl_head {
63 struct rhashtable ht;
64 struct fl_flow_mask mask;
65 struct flow_dissector dissector;
66 u32 hgen;
67 bool mask_assigned;
68 struct list_head filters;
69 struct rhashtable_params ht_params;
Daniel Borkmannd9363772016-11-27 01:18:01 +010070 union {
71 struct work_struct work;
72 struct rcu_head rcu;
73 };
Jiri Pirko77b99002015-05-12 14:56:21 +020074};
75
76struct cls_fl_filter {
77 struct rhash_head ht_node;
78 struct fl_flow_key mkey;
79 struct tcf_exts exts;
80 struct tcf_result res;
81 struct fl_flow_key key;
82 struct list_head list;
83 u32 handle;
Amir Vadaie69985c2016-06-05 17:11:18 +030084 u32 flags;
Jiri Pirko77b99002015-05-12 14:56:21 +020085 struct rcu_head rcu;
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +020086 struct tc_to_netdev tc;
87 struct net_device *hw_dev;
Jiri Pirko77b99002015-05-12 14:56:21 +020088};
89
90static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
91{
92 return mask->range.end - mask->range.start;
93}
94
95static void fl_mask_update_range(struct fl_flow_mask *mask)
96{
97 const u8 *bytes = (const u8 *) &mask->key;
98 size_t size = sizeof(mask->key);
99 size_t i, first = 0, last = size - 1;
100
101 for (i = 0; i < sizeof(mask->key); i++) {
102 if (bytes[i]) {
103 if (!first && i)
104 first = i;
105 last = i;
106 }
107 }
108 mask->range.start = rounddown(first, sizeof(long));
109 mask->range.end = roundup(last + 1, sizeof(long));
110}
111
112static void *fl_key_get_start(struct fl_flow_key *key,
113 const struct fl_flow_mask *mask)
114{
115 return (u8 *) key + mask->range.start;
116}
117
118static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
119 struct fl_flow_mask *mask)
120{
121 const long *lkey = fl_key_get_start(key, mask);
122 const long *lmask = fl_key_get_start(&mask->key, mask);
123 long *lmkey = fl_key_get_start(mkey, mask);
124 int i;
125
126 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
127 *lmkey++ = *lkey++ & *lmask++;
128}
129
130static void fl_clear_masked_range(struct fl_flow_key *key,
131 struct fl_flow_mask *mask)
132{
133 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
134}
135
136static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
137 struct tcf_result *res)
138{
139 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
140 struct cls_fl_filter *f;
141 struct fl_flow_key skb_key;
142 struct fl_flow_key skb_mkey;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300143 struct ip_tunnel_info *info;
Jiri Pirko77b99002015-05-12 14:56:21 +0200144
Amir Vadaie69985c2016-06-05 17:11:18 +0300145 if (!atomic_read(&head->ht.nelems))
146 return -1;
147
Jiri Pirko77b99002015-05-12 14:56:21 +0200148 fl_clear_masked_range(&skb_key, &head->mask);
Amir Vadaibc3103f2016-09-08 16:23:47 +0300149
150 info = skb_tunnel_info(skb);
151 if (info) {
152 struct ip_tunnel_key *key = &info->key;
153
154 switch (ip_tunnel_info_af(info)) {
155 case AF_INET:
Paul Blakey0df0f202016-12-28 14:54:47 +0200156 skb_key.enc_control.addr_type =
157 FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300158 skb_key.enc_ipv4.src = key->u.ipv4.src;
159 skb_key.enc_ipv4.dst = key->u.ipv4.dst;
160 break;
161 case AF_INET6:
Paul Blakey0df0f202016-12-28 14:54:47 +0200162 skb_key.enc_control.addr_type =
163 FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300164 skb_key.enc_ipv6.src = key->u.ipv6.src;
165 skb_key.enc_ipv6.dst = key->u.ipv6.dst;
166 break;
167 }
168
169 skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200170 skb_key.enc_tp.src = key->tp_src;
171 skb_key.enc_tp.dst = key->tp_dst;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300172 }
173
Jiri Pirko77b99002015-05-12 14:56:21 +0200174 skb_key.indev_ifindex = skb->skb_iif;
175 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
176 * so do it rather here.
177 */
178 skb_key.basic.n_proto = skb->protocol;
Tom Herbertcd79a232015-09-01 09:24:27 -0700179 skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
Jiri Pirko77b99002015-05-12 14:56:21 +0200180
181 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
182
183 f = rhashtable_lookup_fast(&head->ht,
184 fl_key_get_start(&skb_mkey, &head->mask),
185 head->ht_params);
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300186 if (f && !tc_skip_sw(f->flags)) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200187 *res = f->res;
188 return tcf_exts_exec(skb, &f->exts, res);
189 }
190 return -1;
191}
192
193static int fl_init(struct tcf_proto *tp)
194{
195 struct cls_fl_head *head;
196
197 head = kzalloc(sizeof(*head), GFP_KERNEL);
198 if (!head)
199 return -ENOBUFS;
200
201 INIT_LIST_HEAD_RCU(&head->filters);
202 rcu_assign_pointer(tp->root, head);
203
204 return 0;
205}
206
207static void fl_destroy_filter(struct rcu_head *head)
208{
209 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
210
211 tcf_exts_destroy(&f->exts);
212 kfree(f);
213}
214
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200215static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
Amir Vadai5b33f482016-03-08 12:42:29 +0200216{
Amir Vadai5b33f482016-03-08 12:42:29 +0200217 struct tc_cls_flower_offload offload = {0};
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200218 struct net_device *dev = f->hw_dev;
219 struct tc_to_netdev *tc = &f->tc;
Amir Vadai5b33f482016-03-08 12:42:29 +0200220
Hadar Hen Zion79685212016-12-01 14:06:34 +0200221 if (!tc_can_offload(dev, tp))
Amir Vadai5b33f482016-03-08 12:42:29 +0200222 return;
223
224 offload.command = TC_CLSFLOWER_DESTROY;
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200225 offload.cookie = (unsigned long)f;
Amir Vadai5b33f482016-03-08 12:42:29 +0200226
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200227 tc->type = TC_SETUP_CLSFLOWER;
228 tc->cls_flower = &offload;
Amir Vadai5b33f482016-03-08 12:42:29 +0200229
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200230 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
Amir Vadai5b33f482016-03-08 12:42:29 +0200231}
232
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300233static int fl_hw_replace_filter(struct tcf_proto *tp,
234 struct flow_dissector *dissector,
235 struct fl_flow_key *mask,
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200236 struct cls_fl_filter *f)
Amir Vadai5b33f482016-03-08 12:42:29 +0200237{
238 struct net_device *dev = tp->q->dev_queue->dev;
239 struct tc_cls_flower_offload offload = {0};
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200240 struct tc_to_netdev *tc = &f->tc;
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300241 int err;
Amir Vadai5b33f482016-03-08 12:42:29 +0200242
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200243 if (!tc_can_offload(dev, tp)) {
Hadar Hen Ziona6e16932016-12-04 15:25:19 +0200244 if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) ||
245 (f->hw_dev && !tc_can_offload(f->hw_dev, tp))) {
246 f->hw_dev = dev;
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200247 return tc_skip_sw(f->flags) ? -EINVAL : 0;
Hadar Hen Ziona6e16932016-12-04 15:25:19 +0200248 }
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200249 dev = f->hw_dev;
250 tc->egress_dev = true;
251 } else {
252 f->hw_dev = dev;
253 }
Amir Vadai5b33f482016-03-08 12:42:29 +0200254
255 offload.command = TC_CLSFLOWER_REPLACE;
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200256 offload.cookie = (unsigned long)f;
Amir Vadai5b33f482016-03-08 12:42:29 +0200257 offload.dissector = dissector;
258 offload.mask = mask;
Paul Blakeyf93bd172016-12-14 19:00:58 +0200259 offload.key = &f->mkey;
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200260 offload.exts = &f->exts;
Amir Vadai5b33f482016-03-08 12:42:29 +0200261
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200262 tc->type = TC_SETUP_CLSFLOWER;
263 tc->cls_flower = &offload;
Amir Vadai5b33f482016-03-08 12:42:29 +0200264
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400265 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200266 tc);
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300267
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200268 if (tc_skip_sw(f->flags))
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300269 return err;
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300270 return 0;
Amir Vadai5b33f482016-03-08 12:42:29 +0200271}
272
Amir Vadai10cbc682016-05-13 12:55:37 +0000273static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
274{
Amir Vadai10cbc682016-05-13 12:55:37 +0000275 struct tc_cls_flower_offload offload = {0};
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200276 struct net_device *dev = f->hw_dev;
277 struct tc_to_netdev *tc = &f->tc;
Amir Vadai10cbc682016-05-13 12:55:37 +0000278
Hadar Hen Zion79685212016-12-01 14:06:34 +0200279 if (!tc_can_offload(dev, tp))
Amir Vadai10cbc682016-05-13 12:55:37 +0000280 return;
281
282 offload.command = TC_CLSFLOWER_STATS;
283 offload.cookie = (unsigned long)f;
284 offload.exts = &f->exts;
285
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200286 tc->type = TC_SETUP_CLSFLOWER;
287 tc->cls_flower = &offload;
Amir Vadai10cbc682016-05-13 12:55:37 +0000288
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200289 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc);
Amir Vadai10cbc682016-05-13 12:55:37 +0000290}
291
Roi Dayan13fa8762016-11-01 16:08:29 +0200292static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
293{
294 list_del_rcu(&f->list);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200295 if (!tc_skip_hw(f->flags))
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200296 fl_hw_destroy_filter(tp, f);
Roi Dayan13fa8762016-11-01 16:08:29 +0200297 tcf_unbind_filter(tp, &f->res);
298 call_rcu(&f->rcu, fl_destroy_filter);
299}
300
Daniel Borkmannd9363772016-11-27 01:18:01 +0100301static void fl_destroy_sleepable(struct work_struct *work)
302{
303 struct cls_fl_head *head = container_of(work, struct cls_fl_head,
304 work);
305 if (head->mask_assigned)
306 rhashtable_destroy(&head->ht);
307 kfree(head);
308 module_put(THIS_MODULE);
309}
310
311static void fl_destroy_rcu(struct rcu_head *rcu)
312{
313 struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
314
315 INIT_WORK(&head->work, fl_destroy_sleepable);
316 schedule_work(&head->work);
317}
318
Jiri Pirko77b99002015-05-12 14:56:21 +0200319static bool fl_destroy(struct tcf_proto *tp, bool force)
320{
321 struct cls_fl_head *head = rtnl_dereference(tp->root);
322 struct cls_fl_filter *f, *next;
323
324 if (!force && !list_empty(&head->filters))
325 return false;
326
Roi Dayan13fa8762016-11-01 16:08:29 +0200327 list_for_each_entry_safe(f, next, &head->filters, list)
328 __fl_delete(tp, f);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100329
330 __module_get(THIS_MODULE);
331 call_rcu(&head->rcu, fl_destroy_rcu);
David S. Miller27455292016-12-03 11:46:54 -0500332
Jiri Pirko77b99002015-05-12 14:56:21 +0200333 return true;
334}
335
336static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
337{
338 struct cls_fl_head *head = rtnl_dereference(tp->root);
339 struct cls_fl_filter *f;
340
341 list_for_each_entry(f, &head->filters, list)
342 if (f->handle == handle)
343 return (unsigned long) f;
344 return 0;
345}
346
347static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
348 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
349 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
350 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
351 .len = IFNAMSIZ },
352 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
353 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
354 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
355 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
356 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
357 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
358 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
359 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
360 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
361 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
362 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
363 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
364 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
365 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
366 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
367 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
Jamal Hadi Salimb175c3a2015-06-25 06:55:27 -0400368 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
369 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300370 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
371 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
372 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
Amir Vadaibc3103f2016-09-08 16:23:47 +0300373 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
374 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
375 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
376 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
377 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
378 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
379 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
380 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
381 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
Or Gerlitzaa72d702016-09-15 15:28:22 +0300382 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
383 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
384 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
385 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
Simon Horman5976c5f2016-11-03 13:24:21 +0100386 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
387 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
388 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
389 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200390 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
391 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
392 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
393 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200394 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
395 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
Simon Horman7b684882016-12-07 13:48:28 +0100396 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
397 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
398 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
399 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
400 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
401 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
402 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
403 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
Jiri Pirko77b99002015-05-12 14:56:21 +0200404};
405
406static void fl_set_key_val(struct nlattr **tb,
407 void *val, int val_type,
408 void *mask, int mask_type, int len)
409{
410 if (!tb[val_type])
411 return;
412 memcpy(val, nla_data(tb[val_type]), len);
413 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
414 memset(mask, 0xff, len);
415 else
416 memcpy(mask, nla_data(tb[mask_type]), len);
417}
418
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300419static void fl_set_key_vlan(struct nlattr **tb,
420 struct flow_dissector_key_vlan *key_val,
421 struct flow_dissector_key_vlan *key_mask)
422{
423#define VLAN_PRIORITY_MASK 0x7
424
425 if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
426 key_val->vlan_id =
427 nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
428 key_mask->vlan_id = VLAN_VID_MASK;
429 }
430 if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
431 key_val->vlan_priority =
432 nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
433 VLAN_PRIORITY_MASK;
434 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
435 }
436}
437
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200438static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
439 u32 *dissector_key, u32 *dissector_mask,
440 u32 flower_flag_bit, u32 dissector_flag_bit)
441{
442 if (flower_mask & flower_flag_bit) {
443 *dissector_mask |= dissector_flag_bit;
444 if (flower_key & flower_flag_bit)
445 *dissector_key |= dissector_flag_bit;
446 }
447}
448
Or Gerlitzd9724772016-12-22 14:28:15 +0200449static int fl_set_key_flags(struct nlattr **tb,
450 u32 *flags_key, u32 *flags_mask)
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200451{
452 u32 key, mask;
453
Or Gerlitzd9724772016-12-22 14:28:15 +0200454 /* mask is mandatory for flags */
455 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
456 return -EINVAL;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200457
458 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
Or Gerlitzd9724772016-12-22 14:28:15 +0200459 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200460
461 *flags_key = 0;
462 *flags_mask = 0;
463
464 fl_set_key_flag(key, mask, flags_key, flags_mask,
465 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Or Gerlitzd9724772016-12-22 14:28:15 +0200466
467 return 0;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200468}
469
Jiri Pirko77b99002015-05-12 14:56:21 +0200470static int fl_set_key(struct net *net, struct nlattr **tb,
471 struct fl_flow_key *key, struct fl_flow_key *mask)
472{
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300473 __be16 ethertype;
Or Gerlitzd9724772016-12-22 14:28:15 +0200474 int ret = 0;
Brian Haleydd3aa3b2015-05-14 13:20:15 -0400475#ifdef CONFIG_NET_CLS_IND
Jiri Pirko77b99002015-05-12 14:56:21 +0200476 if (tb[TCA_FLOWER_INDEV]) {
Brian Haleydd3aa3b2015-05-14 13:20:15 -0400477 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
Jiri Pirko77b99002015-05-12 14:56:21 +0200478 if (err < 0)
479 return err;
480 key->indev_ifindex = err;
481 mask->indev_ifindex = 0xffffffff;
482 }
Brian Haleydd3aa3b2015-05-14 13:20:15 -0400483#endif
Jiri Pirko77b99002015-05-12 14:56:21 +0200484
485 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
486 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
487 sizeof(key->eth.dst));
488 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
489 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
490 sizeof(key->eth.src));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500491
Arnd Bergmann0b498a52016-08-26 17:25:45 +0200492 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300493 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
494
Arnd Bergmann0b498a52016-08-26 17:25:45 +0200495 if (ethertype == htons(ETH_P_8021Q)) {
496 fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
497 fl_set_key_val(tb, &key->basic.n_proto,
498 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
499 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
500 sizeof(key->basic.n_proto));
501 } else {
502 key->basic.n_proto = ethertype;
503 mask->basic.n_proto = cpu_to_be16(~0);
504 }
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300505 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500506
Jiri Pirko77b99002015-05-12 14:56:21 +0200507 if (key->basic.n_proto == htons(ETH_P_IP) ||
508 key->basic.n_proto == htons(ETH_P_IPV6)) {
509 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
510 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
511 sizeof(key->basic.ip_proto));
512 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500513
514 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
515 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +0200516 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +0200517 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
518 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
519 sizeof(key->ipv4.src));
520 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
521 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
522 sizeof(key->ipv4.dst));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500523 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
524 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +0200525 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +0200526 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
527 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
528 sizeof(key->ipv6.src));
529 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
530 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
531 sizeof(key->ipv6.dst));
532 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500533
Jiri Pirko77b99002015-05-12 14:56:21 +0200534 if (key->basic.ip_proto == IPPROTO_TCP) {
535 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300536 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200537 sizeof(key->tp.src));
538 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300539 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200540 sizeof(key->tp.dst));
541 } else if (key->basic.ip_proto == IPPROTO_UDP) {
542 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300543 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200544 sizeof(key->tp.src));
545 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300546 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200547 sizeof(key->tp.dst));
Simon Horman5976c5f2016-11-03 13:24:21 +0100548 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
549 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
550 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
551 sizeof(key->tp.src));
552 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
553 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
554 sizeof(key->tp.dst));
Simon Horman7b684882016-12-07 13:48:28 +0100555 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
556 key->basic.ip_proto == IPPROTO_ICMP) {
557 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
558 &mask->icmp.type,
559 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
560 sizeof(key->icmp.type));
561 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
562 &mask->icmp.code,
563 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
564 sizeof(key->icmp.code));
565 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
566 key->basic.ip_proto == IPPROTO_ICMPV6) {
567 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
568 &mask->icmp.type,
569 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
570 sizeof(key->icmp.type));
571 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
572 &mask->icmp.code,
573 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
574 sizeof(key->icmp.code));
Jiri Pirko77b99002015-05-12 14:56:21 +0200575 }
576
Amir Vadaibc3103f2016-09-08 16:23:47 +0300577 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
578 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
579 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +0200580 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300581 fl_set_key_val(tb, &key->enc_ipv4.src,
582 TCA_FLOWER_KEY_ENC_IPV4_SRC,
583 &mask->enc_ipv4.src,
584 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
585 sizeof(key->enc_ipv4.src));
586 fl_set_key_val(tb, &key->enc_ipv4.dst,
587 TCA_FLOWER_KEY_ENC_IPV4_DST,
588 &mask->enc_ipv4.dst,
589 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
590 sizeof(key->enc_ipv4.dst));
591 }
592
593 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
594 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
595 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +0200596 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300597 fl_set_key_val(tb, &key->enc_ipv6.src,
598 TCA_FLOWER_KEY_ENC_IPV6_SRC,
599 &mask->enc_ipv6.src,
600 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
601 sizeof(key->enc_ipv6.src));
602 fl_set_key_val(tb, &key->enc_ipv6.dst,
603 TCA_FLOWER_KEY_ENC_IPV6_DST,
604 &mask->enc_ipv6.dst,
605 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
606 sizeof(key->enc_ipv6.dst));
607 }
608
609 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +0300610 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
Amir Vadaibc3103f2016-09-08 16:23:47 +0300611 sizeof(key->enc_key_id.keyid));
612
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200613 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
614 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
615 sizeof(key->enc_tp.src));
616
617 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
618 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
619 sizeof(key->enc_tp.dst));
620
Or Gerlitzd9724772016-12-22 14:28:15 +0200621 if (tb[TCA_FLOWER_KEY_FLAGS])
622 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200623
Or Gerlitzd9724772016-12-22 14:28:15 +0200624 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +0200625}
626
627static bool fl_mask_eq(struct fl_flow_mask *mask1,
628 struct fl_flow_mask *mask2)
629{
630 const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
631 const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
632
633 return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
634 !memcmp(lmask1, lmask2, fl_mask_range(mask1));
635}
636
637static const struct rhashtable_params fl_ht_params = {
638 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
639 .head_offset = offsetof(struct cls_fl_filter, ht_node),
640 .automatic_shrinking = true,
641};
642
643static int fl_init_hashtable(struct cls_fl_head *head,
644 struct fl_flow_mask *mask)
645{
646 head->ht_params = fl_ht_params;
647 head->ht_params.key_len = fl_mask_range(mask);
648 head->ht_params.key_offset += mask->range.start;
649
650 return rhashtable_init(&head->ht, &head->ht_params);
651}
652
653#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
654#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
Jiri Pirko77b99002015-05-12 14:56:21 +0200655
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300656#define FL_KEY_IS_MASKED(mask, member) \
657 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
658 0, FL_KEY_MEMBER_SIZE(member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +0200659
660#define FL_KEY_SET(keys, cnt, id, member) \
661 do { \
662 keys[cnt].key_id = id; \
663 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
664 cnt++; \
665 } while(0);
666
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300667#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
Jiri Pirko77b99002015-05-12 14:56:21 +0200668 do { \
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300669 if (FL_KEY_IS_MASKED(mask, member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +0200670 FL_KEY_SET(keys, cnt, id, member); \
671 } while(0);
672
673static void fl_init_dissector(struct cls_fl_head *head,
674 struct fl_flow_mask *mask)
675{
676 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
677 size_t cnt = 0;
678
Tom Herbert42aecaa2015-06-04 09:16:39 -0700679 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
Jiri Pirko77b99002015-05-12 14:56:21 +0200680 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300681 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
682 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
683 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
684 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
685 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
686 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
687 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
688 FLOW_DISSECTOR_KEY_PORTS, tp);
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300689 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
Simon Horman7b684882016-12-07 13:48:28 +0100690 FLOW_DISSECTOR_KEY_ICMP, icmp);
691 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300692 FLOW_DISSECTOR_KEY_VLAN, vlan);
Hadar Hen Zion519d1052016-11-07 15:14:38 +0200693 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
694 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
695 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
696 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
697 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
698 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
699 if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) ||
700 FL_KEY_IS_MASKED(&mask->key, enc_ipv6))
701 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
702 enc_control);
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200703 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
704 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
Jiri Pirko77b99002015-05-12 14:56:21 +0200705
706 skb_flow_dissector_init(&head->dissector, keys, cnt);
707}
708
709static int fl_check_assign_mask(struct cls_fl_head *head,
710 struct fl_flow_mask *mask)
711{
712 int err;
713
714 if (head->mask_assigned) {
715 if (!fl_mask_eq(&head->mask, mask))
716 return -EINVAL;
717 else
718 return 0;
719 }
720
721 /* Mask is not assigned yet. So assign it and init hashtable
722 * according to that.
723 */
724 err = fl_init_hashtable(head, mask);
725 if (err)
726 return err;
727 memcpy(&head->mask, mask, sizeof(head->mask));
728 head->mask_assigned = true;
729
730 fl_init_dissector(head, mask);
731
732 return 0;
733}
734
735static int fl_set_parms(struct net *net, struct tcf_proto *tp,
736 struct cls_fl_filter *f, struct fl_flow_mask *mask,
737 unsigned long base, struct nlattr **tb,
738 struct nlattr *est, bool ovr)
739{
740 struct tcf_exts e;
741 int err;
742
WANG Congb9a24bb2016-08-19 12:36:54 -0700743 err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
Jiri Pirko77b99002015-05-12 14:56:21 +0200744 if (err < 0)
745 return err;
WANG Congb9a24bb2016-08-19 12:36:54 -0700746 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
747 if (err < 0)
748 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +0200749
750 if (tb[TCA_FLOWER_CLASSID]) {
751 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
752 tcf_bind_filter(tp, &f->res, base);
753 }
754
755 err = fl_set_key(net, tb, &f->key, &mask->key);
756 if (err)
757 goto errout;
758
759 fl_mask_update_range(mask);
760 fl_set_masked_key(&f->mkey, &f->key, mask);
761
762 tcf_exts_change(tp, &f->exts, &e);
763
764 return 0;
765errout:
766 tcf_exts_destroy(&e);
767 return err;
768}
769
770static u32 fl_grab_new_handle(struct tcf_proto *tp,
771 struct cls_fl_head *head)
772{
773 unsigned int i = 0x80000000;
774 u32 handle;
775
776 do {
777 if (++head->hgen == 0x7FFFFFFF)
778 head->hgen = 1;
779 } while (--i > 0 && fl_get(tp, head->hgen));
780
781 if (unlikely(i == 0)) {
782 pr_err("Insufficient number of handles\n");
783 handle = 0;
784 } else {
785 handle = head->hgen;
786 }
787
788 return handle;
789}
790
791static int fl_change(struct net *net, struct sk_buff *in_skb,
792 struct tcf_proto *tp, unsigned long base,
793 u32 handle, struct nlattr **tca,
794 unsigned long *arg, bool ovr)
795{
796 struct cls_fl_head *head = rtnl_dereference(tp->root);
797 struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
798 struct cls_fl_filter *fnew;
799 struct nlattr *tb[TCA_FLOWER_MAX + 1];
800 struct fl_flow_mask mask = {};
801 int err;
802
803 if (!tca[TCA_OPTIONS])
804 return -EINVAL;
805
806 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
807 if (err < 0)
808 return err;
809
810 if (fold && handle && fold->handle != handle)
811 return -EINVAL;
812
813 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
814 if (!fnew)
815 return -ENOBUFS;
816
WANG Congb9a24bb2016-08-19 12:36:54 -0700817 err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
818 if (err < 0)
819 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +0200820
821 if (!handle) {
822 handle = fl_grab_new_handle(tp, head);
823 if (!handle) {
824 err = -EINVAL;
825 goto errout;
826 }
827 }
828 fnew->handle = handle;
829
Amir Vadaie69985c2016-06-05 17:11:18 +0300830 if (tb[TCA_FLOWER_FLAGS]) {
831 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
832
833 if (!tc_flags_valid(fnew->flags)) {
834 err = -EINVAL;
835 goto errout;
836 }
837 }
Amir Vadai5b33f482016-03-08 12:42:29 +0200838
Jiri Pirko77b99002015-05-12 14:56:21 +0200839 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
840 if (err)
841 goto errout;
842
843 err = fl_check_assign_mask(head, &mask);
844 if (err)
845 goto errout;
846
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300847 if (!tc_skip_sw(fnew->flags)) {
Amir Vadaie69985c2016-06-05 17:11:18 +0300848 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
849 head->ht_params);
850 if (err)
851 goto errout;
852 }
Amir Vadai5b33f482016-03-08 12:42:29 +0200853
Hadar Hen Zion79685212016-12-01 14:06:34 +0200854 if (!tc_skip_hw(fnew->flags)) {
855 err = fl_hw_replace_filter(tp,
856 &head->dissector,
857 &mask.key,
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200858 fnew);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200859 if (err)
860 goto errout;
861 }
Amir Vadai5b33f482016-03-08 12:42:29 +0200862
863 if (fold) {
Jiri Pirko725cbb622016-11-28 15:40:13 +0100864 if (!tc_skip_sw(fold->flags))
865 rhashtable_remove_fast(&head->ht, &fold->ht_node,
866 head->ht_params);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200867 if (!tc_skip_hw(fold->flags))
Hadar Hen Zion3036dab2016-12-01 14:06:35 +0200868 fl_hw_destroy_filter(tp, fold);
Amir Vadai5b33f482016-03-08 12:42:29 +0200869 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200870
871 *arg = (unsigned long) fnew;
872
873 if (fold) {
Daniel Borkmannff3532f2015-07-17 22:38:44 +0200874 list_replace_rcu(&fold->list, &fnew->list);
Jiri Pirko77b99002015-05-12 14:56:21 +0200875 tcf_unbind_filter(tp, &fold->res);
876 call_rcu(&fold->rcu, fl_destroy_filter);
877 } else {
878 list_add_tail_rcu(&fnew->list, &head->filters);
879 }
880
881 return 0;
882
883errout:
WANG Congb9a24bb2016-08-19 12:36:54 -0700884 tcf_exts_destroy(&fnew->exts);
Jiri Pirko77b99002015-05-12 14:56:21 +0200885 kfree(fnew);
886 return err;
887}
888
889static int fl_delete(struct tcf_proto *tp, unsigned long arg)
890{
891 struct cls_fl_head *head = rtnl_dereference(tp->root);
892 struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
893
Jiri Pirko725cbb622016-11-28 15:40:13 +0100894 if (!tc_skip_sw(f->flags))
895 rhashtable_remove_fast(&head->ht, &f->ht_node,
896 head->ht_params);
Roi Dayan13fa8762016-11-01 16:08:29 +0200897 __fl_delete(tp, f);
Jiri Pirko77b99002015-05-12 14:56:21 +0200898 return 0;
899}
900
901static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
902{
903 struct cls_fl_head *head = rtnl_dereference(tp->root);
904 struct cls_fl_filter *f;
905
906 list_for_each_entry_rcu(f, &head->filters, list) {
907 if (arg->count < arg->skip)
908 goto skip;
909 if (arg->fn(tp, (unsigned long) f, arg) < 0) {
910 arg->stop = 1;
911 break;
912 }
913skip:
914 arg->count++;
915 }
916}
917
918static int fl_dump_key_val(struct sk_buff *skb,
919 void *val, int val_type,
920 void *mask, int mask_type, int len)
921{
922 int err;
923
924 if (!memchr_inv(mask, 0, len))
925 return 0;
926 err = nla_put(skb, val_type, len, val);
927 if (err)
928 return err;
929 if (mask_type != TCA_FLOWER_UNSPEC) {
930 err = nla_put(skb, mask_type, len, mask);
931 if (err)
932 return err;
933 }
934 return 0;
935}
936
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300937static int fl_dump_key_vlan(struct sk_buff *skb,
938 struct flow_dissector_key_vlan *vlan_key,
939 struct flow_dissector_key_vlan *vlan_mask)
940{
941 int err;
942
943 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
944 return 0;
945 if (vlan_mask->vlan_id) {
946 err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
947 vlan_key->vlan_id);
948 if (err)
949 return err;
950 }
951 if (vlan_mask->vlan_priority) {
952 err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
953 vlan_key->vlan_priority);
954 if (err)
955 return err;
956 }
957 return 0;
958}
959
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200960static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
961 u32 *flower_key, u32 *flower_mask,
962 u32 flower_flag_bit, u32 dissector_flag_bit)
963{
964 if (dissector_mask & dissector_flag_bit) {
965 *flower_mask |= flower_flag_bit;
966 if (dissector_key & dissector_flag_bit)
967 *flower_key |= flower_flag_bit;
968 }
969}
970
971static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
972{
973 u32 key, mask;
974 __be32 _key, _mask;
975 int err;
976
977 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
978 return 0;
979
980 key = 0;
981 mask = 0;
982
983 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
984 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
985
986 _key = cpu_to_be32(key);
987 _mask = cpu_to_be32(mask);
988
989 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
990 if (err)
991 return err;
992
993 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
994}
995
Jiri Pirko77b99002015-05-12 14:56:21 +0200996static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
997 struct sk_buff *skb, struct tcmsg *t)
998{
999 struct cls_fl_head *head = rtnl_dereference(tp->root);
1000 struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
1001 struct nlattr *nest;
1002 struct fl_flow_key *key, *mask;
1003
1004 if (!f)
1005 return skb->len;
1006
1007 t->tcm_handle = f->handle;
1008
1009 nest = nla_nest_start(skb, TCA_OPTIONS);
1010 if (!nest)
1011 goto nla_put_failure;
1012
1013 if (f->res.classid &&
1014 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
1015 goto nla_put_failure;
1016
1017 key = &f->key;
1018 mask = &head->mask.key;
1019
1020 if (mask->indev_ifindex) {
1021 struct net_device *dev;
1022
1023 dev = __dev_get_by_index(net, key->indev_ifindex);
1024 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
1025 goto nla_put_failure;
1026 }
1027
Hadar Hen Zion79685212016-12-01 14:06:34 +02001028 if (!tc_skip_hw(f->flags))
1029 fl_hw_update_stats(tp, f);
Amir Vadai10cbc682016-05-13 12:55:37 +00001030
Jiri Pirko77b99002015-05-12 14:56:21 +02001031 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1032 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1033 sizeof(key->eth.dst)) ||
1034 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1035 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1036 sizeof(key->eth.src)) ||
1037 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
1038 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
1039 sizeof(key->basic.n_proto)))
1040 goto nla_put_failure;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001041
1042 if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
1043 goto nla_put_failure;
1044
Jiri Pirko77b99002015-05-12 14:56:21 +02001045 if ((key->basic.n_proto == htons(ETH_P_IP) ||
1046 key->basic.n_proto == htons(ETH_P_IPV6)) &&
1047 fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1048 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1049 sizeof(key->basic.ip_proto)))
1050 goto nla_put_failure;
1051
Tom Herbertc3f83242015-06-04 09:16:40 -07001052 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02001053 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1054 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1055 sizeof(key->ipv4.src)) ||
1056 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1057 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1058 sizeof(key->ipv4.dst))))
1059 goto nla_put_failure;
Tom Herbertc3f83242015-06-04 09:16:40 -07001060 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02001061 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1062 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1063 sizeof(key->ipv6.src)) ||
1064 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1065 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1066 sizeof(key->ipv6.dst))))
1067 goto nla_put_failure;
1068
1069 if (key->basic.ip_proto == IPPROTO_TCP &&
1070 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001071 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001072 sizeof(key->tp.src)) ||
1073 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001074 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001075 sizeof(key->tp.dst))))
1076 goto nla_put_failure;
1077 else if (key->basic.ip_proto == IPPROTO_UDP &&
1078 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001079 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001080 sizeof(key->tp.src)) ||
1081 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001082 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001083 sizeof(key->tp.dst))))
1084 goto nla_put_failure;
Simon Horman5976c5f2016-11-03 13:24:21 +01001085 else if (key->basic.ip_proto == IPPROTO_SCTP &&
1086 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1087 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1088 sizeof(key->tp.src)) ||
1089 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1090 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1091 sizeof(key->tp.dst))))
1092 goto nla_put_failure;
Simon Horman7b684882016-12-07 13:48:28 +01001093 else if (key->basic.n_proto == htons(ETH_P_IP) &&
1094 key->basic.ip_proto == IPPROTO_ICMP &&
1095 (fl_dump_key_val(skb, &key->icmp.type,
1096 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
1097 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1098 sizeof(key->icmp.type)) ||
1099 fl_dump_key_val(skb, &key->icmp.code,
1100 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
1101 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1102 sizeof(key->icmp.code))))
1103 goto nla_put_failure;
1104 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1105 key->basic.ip_proto == IPPROTO_ICMPV6 &&
1106 (fl_dump_key_val(skb, &key->icmp.type,
1107 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
1108 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1109 sizeof(key->icmp.type)) ||
1110 fl_dump_key_val(skb, &key->icmp.code,
1111 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
1112 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1113 sizeof(key->icmp.code))))
1114 goto nla_put_failure;
Jiri Pirko77b99002015-05-12 14:56:21 +02001115
Amir Vadaibc3103f2016-09-08 16:23:47 +03001116 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
1117 (fl_dump_key_val(skb, &key->enc_ipv4.src,
1118 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
1119 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1120 sizeof(key->enc_ipv4.src)) ||
1121 fl_dump_key_val(skb, &key->enc_ipv4.dst,
1122 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
1123 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1124 sizeof(key->enc_ipv4.dst))))
1125 goto nla_put_failure;
1126 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
1127 (fl_dump_key_val(skb, &key->enc_ipv6.src,
1128 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
1129 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1130 sizeof(key->enc_ipv6.src)) ||
1131 fl_dump_key_val(skb, &key->enc_ipv6.dst,
1132 TCA_FLOWER_KEY_ENC_IPV6_DST,
1133 &mask->enc_ipv6.dst,
1134 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1135 sizeof(key->enc_ipv6.dst))))
1136 goto nla_put_failure;
1137
1138 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03001139 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001140 sizeof(key->enc_key_id)) ||
1141 fl_dump_key_val(skb, &key->enc_tp.src,
1142 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1143 &mask->enc_tp.src,
1144 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1145 sizeof(key->enc_tp.src)) ||
1146 fl_dump_key_val(skb, &key->enc_tp.dst,
1147 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1148 &mask->enc_tp.dst,
1149 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1150 sizeof(key->enc_tp.dst)))
Amir Vadaibc3103f2016-09-08 16:23:47 +03001151 goto nla_put_failure;
1152
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001153 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
1154 goto nla_put_failure;
1155
Amir Vadaie69985c2016-06-05 17:11:18 +03001156 nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
1157
Jiri Pirko77b99002015-05-12 14:56:21 +02001158 if (tcf_exts_dump(skb, &f->exts))
1159 goto nla_put_failure;
1160
1161 nla_nest_end(skb, nest);
1162
1163 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
1164 goto nla_put_failure;
1165
1166 return skb->len;
1167
1168nla_put_failure:
1169 nla_nest_cancel(skb, nest);
1170 return -1;
1171}
1172
1173static struct tcf_proto_ops cls_fl_ops __read_mostly = {
1174 .kind = "flower",
1175 .classify = fl_classify,
1176 .init = fl_init,
1177 .destroy = fl_destroy,
1178 .get = fl_get,
1179 .change = fl_change,
1180 .delete = fl_delete,
1181 .walk = fl_walk,
1182 .dump = fl_dump,
1183 .owner = THIS_MODULE,
1184};
1185
1186static int __init cls_fl_init(void)
1187{
1188 return register_tcf_proto_ops(&cls_fl_ops);
1189}
1190
1191static void __exit cls_fl_exit(void)
1192{
1193 unregister_tcf_proto_ops(&cls_fl_ops);
1194}
1195
1196module_init(cls_fl_init);
1197module_exit(cls_fl_exit);
1198
1199MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
1200MODULE_DESCRIPTION("Flower classifier");
1201MODULE_LICENSE("GPL v2");