blob: 18904313bd4e6b7c0b286b33db0f89776a5c7b9b [file] [log] [blame]
Jiri Pirko77b99002015-05-12 14:56:21 +02001/*
2 * net/sched/cls_flower.c Flower classifier
3 *
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/rhashtable.h>
Daniel Borkmannd9363772016-11-27 01:18:01 +010016#include <linux/workqueue.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020017
18#include <linux/if_ether.h>
19#include <linux/in6.h>
20#include <linux/ip.h>
21
22#include <net/sch_generic.h>
23#include <net/pkt_cls.h>
24#include <net/ip.h>
25#include <net/flow_dissector.h>
26
Amir Vadaibc3103f2016-09-08 16:23:47 +030027#include <net/dst.h>
28#include <net/dst_metadata.h>
29
Jiri Pirko77b99002015-05-12 14:56:21 +020030struct fl_flow_key {
31 int indev_ifindex;
Tom Herbert42aecaa2015-06-04 09:16:39 -070032 struct flow_dissector_key_control control;
Amir Vadaibc3103f2016-09-08 16:23:47 +030033 struct flow_dissector_key_control enc_control;
Jiri Pirko77b99002015-05-12 14:56:21 +020034 struct flow_dissector_key_basic basic;
35 struct flow_dissector_key_eth_addrs eth;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +030036 struct flow_dissector_key_vlan vlan;
Jiri Pirko77b99002015-05-12 14:56:21 +020037 union {
Tom Herbertc3f83242015-06-04 09:16:40 -070038 struct flow_dissector_key_ipv4_addrs ipv4;
Jiri Pirko77b99002015-05-12 14:56:21 +020039 struct flow_dissector_key_ipv6_addrs ipv6;
40 };
41 struct flow_dissector_key_ports tp;
Amir Vadaibc3103f2016-09-08 16:23:47 +030042 struct flow_dissector_key_keyid enc_key_id;
43 union {
44 struct flow_dissector_key_ipv4_addrs enc_ipv4;
45 struct flow_dissector_key_ipv6_addrs enc_ipv6;
46 };
Jiri Pirko77b99002015-05-12 14:56:21 +020047} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
48
49struct fl_flow_mask_range {
50 unsigned short int start;
51 unsigned short int end;
52};
53
54struct fl_flow_mask {
55 struct fl_flow_key key;
56 struct fl_flow_mask_range range;
57 struct rcu_head rcu;
58};
59
60struct cls_fl_head {
61 struct rhashtable ht;
62 struct fl_flow_mask mask;
63 struct flow_dissector dissector;
64 u32 hgen;
65 bool mask_assigned;
66 struct list_head filters;
67 struct rhashtable_params ht_params;
Daniel Borkmannd9363772016-11-27 01:18:01 +010068 union {
69 struct work_struct work;
70 struct rcu_head rcu;
71 };
Jiri Pirko77b99002015-05-12 14:56:21 +020072};
73
74struct cls_fl_filter {
75 struct rhash_head ht_node;
76 struct fl_flow_key mkey;
77 struct tcf_exts exts;
78 struct tcf_result res;
79 struct fl_flow_key key;
80 struct list_head list;
81 u32 handle;
Amir Vadaie69985c2016-06-05 17:11:18 +030082 u32 flags;
Jiri Pirko77b99002015-05-12 14:56:21 +020083 struct rcu_head rcu;
84};
85
86static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
87{
88 return mask->range.end - mask->range.start;
89}
90
91static void fl_mask_update_range(struct fl_flow_mask *mask)
92{
93 const u8 *bytes = (const u8 *) &mask->key;
94 size_t size = sizeof(mask->key);
95 size_t i, first = 0, last = size - 1;
96
97 for (i = 0; i < sizeof(mask->key); i++) {
98 if (bytes[i]) {
99 if (!first && i)
100 first = i;
101 last = i;
102 }
103 }
104 mask->range.start = rounddown(first, sizeof(long));
105 mask->range.end = roundup(last + 1, sizeof(long));
106}
107
108static void *fl_key_get_start(struct fl_flow_key *key,
109 const struct fl_flow_mask *mask)
110{
111 return (u8 *) key + mask->range.start;
112}
113
114static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
115 struct fl_flow_mask *mask)
116{
117 const long *lkey = fl_key_get_start(key, mask);
118 const long *lmask = fl_key_get_start(&mask->key, mask);
119 long *lmkey = fl_key_get_start(mkey, mask);
120 int i;
121
122 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
123 *lmkey++ = *lkey++ & *lmask++;
124}
125
126static void fl_clear_masked_range(struct fl_flow_key *key,
127 struct fl_flow_mask *mask)
128{
129 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
130}
131
132static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
133 struct tcf_result *res)
134{
135 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
136 struct cls_fl_filter *f;
137 struct fl_flow_key skb_key;
138 struct fl_flow_key skb_mkey;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300139 struct ip_tunnel_info *info;
Jiri Pirko77b99002015-05-12 14:56:21 +0200140
Amir Vadaie69985c2016-06-05 17:11:18 +0300141 if (!atomic_read(&head->ht.nelems))
142 return -1;
143
Jason Baron259384b2020-02-17 15:38:09 -0500144 flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
Jiri Pirko77b99002015-05-12 14:56:21 +0200145 fl_clear_masked_range(&skb_key, &head->mask);
Amir Vadaibc3103f2016-09-08 16:23:47 +0300146
147 info = skb_tunnel_info(skb);
148 if (info) {
149 struct ip_tunnel_key *key = &info->key;
150
151 switch (ip_tunnel_info_af(info)) {
152 case AF_INET:
Paul Blakey9b4a34f2016-12-28 14:54:47 +0200153 skb_key.enc_control.addr_type =
154 FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300155 skb_key.enc_ipv4.src = key->u.ipv4.src;
156 skb_key.enc_ipv4.dst = key->u.ipv4.dst;
157 break;
158 case AF_INET6:
Paul Blakey9b4a34f2016-12-28 14:54:47 +0200159 skb_key.enc_control.addr_type =
160 FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300161 skb_key.enc_ipv6.src = key->u.ipv6.src;
162 skb_key.enc_ipv6.dst = key->u.ipv6.dst;
163 break;
164 }
165
166 skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
167 }
168
Jiri Pirko77b99002015-05-12 14:56:21 +0200169 skb_key.indev_ifindex = skb->skb_iif;
170 /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
171 * so do it rather here.
172 */
173 skb_key.basic.n_proto = skb->protocol;
Tom Herbertcd79a232015-09-01 09:24:27 -0700174 skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
Jiri Pirko77b99002015-05-12 14:56:21 +0200175
176 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
177
178 f = rhashtable_lookup_fast(&head->ht,
179 fl_key_get_start(&skb_mkey, &head->mask),
180 head->ht_params);
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300181 if (f && !tc_skip_sw(f->flags)) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200182 *res = f->res;
183 return tcf_exts_exec(skb, &f->exts, res);
184 }
185 return -1;
186}
187
188static int fl_init(struct tcf_proto *tp)
189{
190 struct cls_fl_head *head;
191
192 head = kzalloc(sizeof(*head), GFP_KERNEL);
193 if (!head)
194 return -ENOBUFS;
195
196 INIT_LIST_HEAD_RCU(&head->filters);
197 rcu_assign_pointer(tp->root, head);
198
199 return 0;
200}
201
202static void fl_destroy_filter(struct rcu_head *head)
203{
204 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
205
206 tcf_exts_destroy(&f->exts);
207 kfree(f);
208}
209
Amir Vadai8208d212016-03-11 11:08:45 +0200210static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
Amir Vadai5b33f482016-03-08 12:42:29 +0200211{
212 struct net_device *dev = tp->q->dev_queue->dev;
213 struct tc_cls_flower_offload offload = {0};
214 struct tc_to_netdev tc;
215
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200216 if (!tc_should_offload(dev, tp, 0))
Amir Vadai5b33f482016-03-08 12:42:29 +0200217 return;
218
219 offload.command = TC_CLSFLOWER_DESTROY;
220 offload.cookie = cookie;
221
222 tc.type = TC_SETUP_CLSFLOWER;
223 tc.cls_flower = &offload;
224
225 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
226}
227
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300228static int fl_hw_replace_filter(struct tcf_proto *tp,
229 struct flow_dissector *dissector,
230 struct fl_flow_key *mask,
231 struct fl_flow_key *key,
232 struct tcf_exts *actions,
233 unsigned long cookie, u32 flags)
Amir Vadai5b33f482016-03-08 12:42:29 +0200234{
235 struct net_device *dev = tp->q->dev_queue->dev;
236 struct tc_cls_flower_offload offload = {0};
237 struct tc_to_netdev tc;
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300238 int err;
Amir Vadai5b33f482016-03-08 12:42:29 +0200239
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200240 if (!tc_should_offload(dev, tp, flags))
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300241 return tc_skip_sw(flags) ? -EINVAL : 0;
Amir Vadai5b33f482016-03-08 12:42:29 +0200242
243 offload.command = TC_CLSFLOWER_REPLACE;
244 offload.cookie = cookie;
245 offload.dissector = dissector;
246 offload.mask = mask;
247 offload.key = key;
248 offload.exts = actions;
249
250 tc.type = TC_SETUP_CLSFLOWER;
251 tc.cls_flower = &offload;
252
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400253 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
254 &tc);
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300255
256 if (tc_skip_sw(flags))
257 return err;
258
259 return 0;
Amir Vadai5b33f482016-03-08 12:42:29 +0200260}
261
Amir Vadai10cbc682016-05-13 12:55:37 +0000262static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
263{
264 struct net_device *dev = tp->q->dev_queue->dev;
265 struct tc_cls_flower_offload offload = {0};
266 struct tc_to_netdev tc;
267
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200268 if (!tc_should_offload(dev, tp, 0))
Amir Vadai10cbc682016-05-13 12:55:37 +0000269 return;
270
271 offload.command = TC_CLSFLOWER_STATS;
272 offload.cookie = (unsigned long)f;
273 offload.exts = &f->exts;
274
275 tc.type = TC_SETUP_CLSFLOWER;
276 tc.cls_flower = &offload;
277
278 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
279}
280
Daniel Borkmannd9363772016-11-27 01:18:01 +0100281static void fl_destroy_sleepable(struct work_struct *work)
282{
283 struct cls_fl_head *head = container_of(work, struct cls_fl_head,
284 work);
285 if (head->mask_assigned)
286 rhashtable_destroy(&head->ht);
287 kfree(head);
288 module_put(THIS_MODULE);
289}
290
291static void fl_destroy_rcu(struct rcu_head *rcu)
292{
293 struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
294
295 INIT_WORK(&head->work, fl_destroy_sleepable);
296 schedule_work(&head->work);
297}
298
Jiri Pirko77b99002015-05-12 14:56:21 +0200299static bool fl_destroy(struct tcf_proto *tp, bool force)
300{
301 struct cls_fl_head *head = rtnl_dereference(tp->root);
302 struct cls_fl_filter *f, *next;
303
304 if (!force && !list_empty(&head->filters))
305 return false;
306
307 list_for_each_entry_safe(f, next, &head->filters, list) {
Amir Vadai8208d212016-03-11 11:08:45 +0200308 fl_hw_destroy_filter(tp, (unsigned long)f);
Jiri Pirko77b99002015-05-12 14:56:21 +0200309 list_del_rcu(&f->list);
310 call_rcu(&f->rcu, fl_destroy_filter);
311 }
Daniel Borkmannd9363772016-11-27 01:18:01 +0100312
313 __module_get(THIS_MODULE);
314 call_rcu(&head->rcu, fl_destroy_rcu);
Jiri Pirko77b99002015-05-12 14:56:21 +0200315 return true;
316}
317
318static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
319{
320 struct cls_fl_head *head = rtnl_dereference(tp->root);
321 struct cls_fl_filter *f;
322
323 list_for_each_entry(f, &head->filters, list)
324 if (f->handle == handle)
325 return (unsigned long) f;
326 return 0;
327}
328
329static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
330 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
331 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
332 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
333 .len = IFNAMSIZ },
334 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
335 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
336 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
337 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
338 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
339 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
340 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
341 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
342 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
343 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
344 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
345 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
346 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
347 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
348 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
349 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
Jamal Hadi Salimb175c3a2015-06-25 06:55:27 -0400350 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
351 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300352 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
353 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
354 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
Amir Vadaibc3103f2016-09-08 16:23:47 +0300355 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
356 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
357 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
358 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
359 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
360 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
361 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
362 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
363 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
Or Gerlitzaa72d702016-09-15 15:28:22 +0300364 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
365 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
366 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
367 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
Davide Carattic1edff52020-02-11 19:33:40 +0100368 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
Jiri Pirko77b99002015-05-12 14:56:21 +0200369};
370
371static void fl_set_key_val(struct nlattr **tb,
372 void *val, int val_type,
373 void *mask, int mask_type, int len)
374{
375 if (!tb[val_type])
376 return;
377 memcpy(val, nla_data(tb[val_type]), len);
378 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
379 memset(mask, 0xff, len);
380 else
381 memcpy(mask, nla_data(tb[mask_type]), len);
382}
383
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300384static void fl_set_key_vlan(struct nlattr **tb,
385 struct flow_dissector_key_vlan *key_val,
386 struct flow_dissector_key_vlan *key_mask)
387{
388#define VLAN_PRIORITY_MASK 0x7
389
390 if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
391 key_val->vlan_id =
392 nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
393 key_mask->vlan_id = VLAN_VID_MASK;
394 }
395 if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
396 key_val->vlan_priority =
397 nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
398 VLAN_PRIORITY_MASK;
399 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
400 }
401}
402
Jiri Pirko77b99002015-05-12 14:56:21 +0200403static int fl_set_key(struct net *net, struct nlattr **tb,
404 struct fl_flow_key *key, struct fl_flow_key *mask)
405{
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300406 __be16 ethertype;
Brian Haleydd3aa3b2015-05-14 13:20:15 -0400407#ifdef CONFIG_NET_CLS_IND
Jiri Pirko77b99002015-05-12 14:56:21 +0200408 if (tb[TCA_FLOWER_INDEV]) {
Brian Haleydd3aa3b2015-05-14 13:20:15 -0400409 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
Jiri Pirko77b99002015-05-12 14:56:21 +0200410 if (err < 0)
411 return err;
412 key->indev_ifindex = err;
413 mask->indev_ifindex = 0xffffffff;
414 }
Brian Haleydd3aa3b2015-05-14 13:20:15 -0400415#endif
Jiri Pirko77b99002015-05-12 14:56:21 +0200416
417 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
418 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
419 sizeof(key->eth.dst));
420 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
421 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
422 sizeof(key->eth.src));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500423
Arnd Bergmann0b498a52016-08-26 17:25:45 +0200424 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300425 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
426
Arnd Bergmann0b498a52016-08-26 17:25:45 +0200427 if (ethertype == htons(ETH_P_8021Q)) {
428 fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
429 fl_set_key_val(tb, &key->basic.n_proto,
430 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
431 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
432 sizeof(key->basic.n_proto));
433 } else {
434 key->basic.n_proto = ethertype;
435 mask->basic.n_proto = cpu_to_be16(~0);
436 }
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300437 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500438
Jiri Pirko77b99002015-05-12 14:56:21 +0200439 if (key->basic.n_proto == htons(ETH_P_IP) ||
440 key->basic.n_proto == htons(ETH_P_IPV6)) {
441 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
442 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
443 sizeof(key->basic.ip_proto));
444 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500445
446 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
447 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Jiri Pirko77b99002015-05-12 14:56:21 +0200448 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
449 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
450 sizeof(key->ipv4.src));
451 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
452 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
453 sizeof(key->ipv4.dst));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500454 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
455 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Jiri Pirko77b99002015-05-12 14:56:21 +0200456 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
457 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
458 sizeof(key->ipv6.src));
459 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
460 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
461 sizeof(key->ipv6.dst));
462 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500463
Jiri Pirko77b99002015-05-12 14:56:21 +0200464 if (key->basic.ip_proto == IPPROTO_TCP) {
465 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300466 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200467 sizeof(key->tp.src));
468 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300469 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200470 sizeof(key->tp.dst));
471 } else if (key->basic.ip_proto == IPPROTO_UDP) {
472 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300473 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200474 sizeof(key->tp.src));
475 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300476 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200477 sizeof(key->tp.dst));
478 }
479
Amir Vadaibc3103f2016-09-08 16:23:47 +0300480 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
481 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
482 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
483 fl_set_key_val(tb, &key->enc_ipv4.src,
484 TCA_FLOWER_KEY_ENC_IPV4_SRC,
485 &mask->enc_ipv4.src,
486 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
487 sizeof(key->enc_ipv4.src));
488 fl_set_key_val(tb, &key->enc_ipv4.dst,
489 TCA_FLOWER_KEY_ENC_IPV4_DST,
490 &mask->enc_ipv4.dst,
491 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
492 sizeof(key->enc_ipv4.dst));
493 }
494
495 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
496 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
497 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
498 fl_set_key_val(tb, &key->enc_ipv6.src,
499 TCA_FLOWER_KEY_ENC_IPV6_SRC,
500 &mask->enc_ipv6.src,
501 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
502 sizeof(key->enc_ipv6.src));
503 fl_set_key_val(tb, &key->enc_ipv6.dst,
504 TCA_FLOWER_KEY_ENC_IPV6_DST,
505 &mask->enc_ipv6.dst,
506 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
507 sizeof(key->enc_ipv6.dst));
508 }
509
510 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +0300511 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
Amir Vadaibc3103f2016-09-08 16:23:47 +0300512 sizeof(key->enc_key_id.keyid));
513
Jiri Pirko77b99002015-05-12 14:56:21 +0200514 return 0;
515}
516
517static bool fl_mask_eq(struct fl_flow_mask *mask1,
518 struct fl_flow_mask *mask2)
519{
520 const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
521 const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
522
523 return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
524 !memcmp(lmask1, lmask2, fl_mask_range(mask1));
525}
526
527static const struct rhashtable_params fl_ht_params = {
528 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
529 .head_offset = offsetof(struct cls_fl_filter, ht_node),
530 .automatic_shrinking = true,
531};
532
533static int fl_init_hashtable(struct cls_fl_head *head,
534 struct fl_flow_mask *mask)
535{
536 head->ht_params = fl_ht_params;
537 head->ht_params.key_len = fl_mask_range(mask);
538 head->ht_params.key_offset += mask->range.start;
539
540 return rhashtable_init(&head->ht, &head->ht_params);
541}
542
543#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
544#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
Jiri Pirko77b99002015-05-12 14:56:21 +0200545
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300546#define FL_KEY_IS_MASKED(mask, member) \
547 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
548 0, FL_KEY_MEMBER_SIZE(member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +0200549
550#define FL_KEY_SET(keys, cnt, id, member) \
551 do { \
552 keys[cnt].key_id = id; \
553 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
554 cnt++; \
555 } while(0);
556
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300557#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
Jiri Pirko77b99002015-05-12 14:56:21 +0200558 do { \
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300559 if (FL_KEY_IS_MASKED(mask, member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +0200560 FL_KEY_SET(keys, cnt, id, member); \
561 } while(0);
562
563static void fl_init_dissector(struct cls_fl_head *head,
564 struct fl_flow_mask *mask)
565{
566 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
567 size_t cnt = 0;
568
Tom Herbert42aecaa2015-06-04 09:16:39 -0700569 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
Jiri Pirko77b99002015-05-12 14:56:21 +0200570 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
Hadar Hen Zion339ba872016-08-17 13:36:12 +0300571 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
572 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
573 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
574 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
575 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
576 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
577 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
578 FLOW_DISSECTOR_KEY_PORTS, tp);
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300579 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
580 FLOW_DISSECTOR_KEY_VLAN, vlan);
Jiri Pirko77b99002015-05-12 14:56:21 +0200581
582 skb_flow_dissector_init(&head->dissector, keys, cnt);
583}
584
585static int fl_check_assign_mask(struct cls_fl_head *head,
586 struct fl_flow_mask *mask)
587{
588 int err;
589
590 if (head->mask_assigned) {
591 if (!fl_mask_eq(&head->mask, mask))
592 return -EINVAL;
593 else
594 return 0;
595 }
596
597 /* Mask is not assigned yet. So assign it and init hashtable
598 * according to that.
599 */
600 err = fl_init_hashtable(head, mask);
601 if (err)
602 return err;
603 memcpy(&head->mask, mask, sizeof(head->mask));
604 head->mask_assigned = true;
605
606 fl_init_dissector(head, mask);
607
608 return 0;
609}
610
611static int fl_set_parms(struct net *net, struct tcf_proto *tp,
612 struct cls_fl_filter *f, struct fl_flow_mask *mask,
613 unsigned long base, struct nlattr **tb,
614 struct nlattr *est, bool ovr)
615{
616 struct tcf_exts e;
617 int err;
618
WANG Congb9a24bb2016-08-19 12:36:54 -0700619 err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
Jiri Pirko77b99002015-05-12 14:56:21 +0200620 if (err < 0)
621 return err;
WANG Congb9a24bb2016-08-19 12:36:54 -0700622 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
623 if (err < 0)
624 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +0200625
626 if (tb[TCA_FLOWER_CLASSID]) {
627 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
628 tcf_bind_filter(tp, &f->res, base);
629 }
630
631 err = fl_set_key(net, tb, &f->key, &mask->key);
632 if (err)
633 goto errout;
634
635 fl_mask_update_range(mask);
636 fl_set_masked_key(&f->mkey, &f->key, mask);
637
638 tcf_exts_change(tp, &f->exts, &e);
639
640 return 0;
641errout:
642 tcf_exts_destroy(&e);
643 return err;
644}
645
646static u32 fl_grab_new_handle(struct tcf_proto *tp,
647 struct cls_fl_head *head)
648{
649 unsigned int i = 0x80000000;
650 u32 handle;
651
652 do {
653 if (++head->hgen == 0x7FFFFFFF)
654 head->hgen = 1;
655 } while (--i > 0 && fl_get(tp, head->hgen));
656
657 if (unlikely(i == 0)) {
658 pr_err("Insufficient number of handles\n");
659 handle = 0;
660 } else {
661 handle = head->hgen;
662 }
663
664 return handle;
665}
666
667static int fl_change(struct net *net, struct sk_buff *in_skb,
668 struct tcf_proto *tp, unsigned long base,
669 u32 handle, struct nlattr **tca,
670 unsigned long *arg, bool ovr)
671{
672 struct cls_fl_head *head = rtnl_dereference(tp->root);
673 struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
674 struct cls_fl_filter *fnew;
675 struct nlattr *tb[TCA_FLOWER_MAX + 1];
676 struct fl_flow_mask mask = {};
677 int err;
678
679 if (!tca[TCA_OPTIONS])
680 return -EINVAL;
681
682 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
683 if (err < 0)
684 return err;
685
686 if (fold && handle && fold->handle != handle)
687 return -EINVAL;
688
689 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
690 if (!fnew)
691 return -ENOBUFS;
692
WANG Congb9a24bb2016-08-19 12:36:54 -0700693 err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
694 if (err < 0)
695 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +0200696
697 if (!handle) {
698 handle = fl_grab_new_handle(tp, head);
699 if (!handle) {
700 err = -EINVAL;
701 goto errout;
702 }
703 }
704 fnew->handle = handle;
705
Amir Vadaie69985c2016-06-05 17:11:18 +0300706 if (tb[TCA_FLOWER_FLAGS]) {
707 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
708
709 if (!tc_flags_valid(fnew->flags)) {
710 err = -EINVAL;
711 goto errout;
712 }
713 }
Amir Vadai5b33f482016-03-08 12:42:29 +0200714
Jiri Pirko77b99002015-05-12 14:56:21 +0200715 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
716 if (err)
717 goto errout;
718
719 err = fl_check_assign_mask(head, &mask);
720 if (err)
721 goto errout;
722
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300723 if (!tc_skip_sw(fnew->flags)) {
Amir Vadaie69985c2016-06-05 17:11:18 +0300724 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
725 head->ht_params);
726 if (err)
727 goto errout;
728 }
Amir Vadai5b33f482016-03-08 12:42:29 +0200729
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300730 err = fl_hw_replace_filter(tp,
731 &head->dissector,
732 &mask.key,
733 &fnew->key,
734 &fnew->exts,
735 (unsigned long)fnew,
736 fnew->flags);
737 if (err)
738 goto errout;
Amir Vadai5b33f482016-03-08 12:42:29 +0200739
740 if (fold) {
Jiri Pirko725cbb622016-11-28 15:40:13 +0100741 if (!tc_skip_sw(fold->flags))
742 rhashtable_remove_fast(&head->ht, &fold->ht_node,
743 head->ht_params);
Amir Vadai8208d212016-03-11 11:08:45 +0200744 fl_hw_destroy_filter(tp, (unsigned long)fold);
Amir Vadai5b33f482016-03-08 12:42:29 +0200745 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200746
747 *arg = (unsigned long) fnew;
748
749 if (fold) {
Daniel Borkmannff3532f2015-07-17 22:38:44 +0200750 list_replace_rcu(&fold->list, &fnew->list);
Jiri Pirko77b99002015-05-12 14:56:21 +0200751 tcf_unbind_filter(tp, &fold->res);
752 call_rcu(&fold->rcu, fl_destroy_filter);
753 } else {
754 list_add_tail_rcu(&fnew->list, &head->filters);
755 }
756
757 return 0;
758
759errout:
WANG Congb9a24bb2016-08-19 12:36:54 -0700760 tcf_exts_destroy(&fnew->exts);
Jiri Pirko77b99002015-05-12 14:56:21 +0200761 kfree(fnew);
762 return err;
763}
764
765static int fl_delete(struct tcf_proto *tp, unsigned long arg)
766{
767 struct cls_fl_head *head = rtnl_dereference(tp->root);
768 struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
769
Jiri Pirko725cbb622016-11-28 15:40:13 +0100770 if (!tc_skip_sw(f->flags))
771 rhashtable_remove_fast(&head->ht, &f->ht_node,
772 head->ht_params);
Jiri Pirko77b99002015-05-12 14:56:21 +0200773 list_del_rcu(&f->list);
Amir Vadai8208d212016-03-11 11:08:45 +0200774 fl_hw_destroy_filter(tp, (unsigned long)f);
Jiri Pirko77b99002015-05-12 14:56:21 +0200775 tcf_unbind_filter(tp, &f->res);
776 call_rcu(&f->rcu, fl_destroy_filter);
777 return 0;
778}
779
780static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
781{
782 struct cls_fl_head *head = rtnl_dereference(tp->root);
783 struct cls_fl_filter *f;
784
785 list_for_each_entry_rcu(f, &head->filters, list) {
786 if (arg->count < arg->skip)
787 goto skip;
788 if (arg->fn(tp, (unsigned long) f, arg) < 0) {
789 arg->stop = 1;
790 break;
791 }
792skip:
793 arg->count++;
794 }
795}
796
797static int fl_dump_key_val(struct sk_buff *skb,
798 void *val, int val_type,
799 void *mask, int mask_type, int len)
800{
801 int err;
802
803 if (!memchr_inv(mask, 0, len))
804 return 0;
805 err = nla_put(skb, val_type, len, val);
806 if (err)
807 return err;
808 if (mask_type != TCA_FLOWER_UNSPEC) {
809 err = nla_put(skb, mask_type, len, mask);
810 if (err)
811 return err;
812 }
813 return 0;
814}
815
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300816static int fl_dump_key_vlan(struct sk_buff *skb,
817 struct flow_dissector_key_vlan *vlan_key,
818 struct flow_dissector_key_vlan *vlan_mask)
819{
820 int err;
821
822 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
823 return 0;
824 if (vlan_mask->vlan_id) {
825 err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
826 vlan_key->vlan_id);
827 if (err)
828 return err;
829 }
830 if (vlan_mask->vlan_priority) {
831 err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
832 vlan_key->vlan_priority);
833 if (err)
834 return err;
835 }
836 return 0;
837}
838
Jiri Pirko77b99002015-05-12 14:56:21 +0200839static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
840 struct sk_buff *skb, struct tcmsg *t)
841{
842 struct cls_fl_head *head = rtnl_dereference(tp->root);
843 struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
844 struct nlattr *nest;
845 struct fl_flow_key *key, *mask;
846
847 if (!f)
848 return skb->len;
849
850 t->tcm_handle = f->handle;
851
852 nest = nla_nest_start(skb, TCA_OPTIONS);
853 if (!nest)
854 goto nla_put_failure;
855
856 if (f->res.classid &&
857 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
858 goto nla_put_failure;
859
860 key = &f->key;
861 mask = &head->mask.key;
862
863 if (mask->indev_ifindex) {
864 struct net_device *dev;
865
866 dev = __dev_get_by_index(net, key->indev_ifindex);
867 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
868 goto nla_put_failure;
869 }
870
Amir Vadai10cbc682016-05-13 12:55:37 +0000871 fl_hw_update_stats(tp, f);
872
Jiri Pirko77b99002015-05-12 14:56:21 +0200873 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
874 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
875 sizeof(key->eth.dst)) ||
876 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
877 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
878 sizeof(key->eth.src)) ||
879 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
880 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
881 sizeof(key->basic.n_proto)))
882 goto nla_put_failure;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300883
884 if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
885 goto nla_put_failure;
886
Jiri Pirko77b99002015-05-12 14:56:21 +0200887 if ((key->basic.n_proto == htons(ETH_P_IP) ||
888 key->basic.n_proto == htons(ETH_P_IPV6)) &&
889 fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
890 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
891 sizeof(key->basic.ip_proto)))
892 goto nla_put_failure;
893
Tom Herbertc3f83242015-06-04 09:16:40 -0700894 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +0200895 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
896 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
897 sizeof(key->ipv4.src)) ||
898 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
899 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
900 sizeof(key->ipv4.dst))))
901 goto nla_put_failure;
Tom Herbertc3f83242015-06-04 09:16:40 -0700902 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +0200903 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
904 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
905 sizeof(key->ipv6.src)) ||
906 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
907 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
908 sizeof(key->ipv6.dst))))
909 goto nla_put_failure;
910
911 if (key->basic.ip_proto == IPPROTO_TCP &&
912 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300913 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200914 sizeof(key->tp.src)) ||
915 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300916 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200917 sizeof(key->tp.dst))))
918 goto nla_put_failure;
919 else if (key->basic.ip_proto == IPPROTO_UDP &&
920 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300921 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200922 sizeof(key->tp.src)) ||
923 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300924 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200925 sizeof(key->tp.dst))))
926 goto nla_put_failure;
927
Amir Vadaibc3103f2016-09-08 16:23:47 +0300928 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
929 (fl_dump_key_val(skb, &key->enc_ipv4.src,
930 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
931 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
932 sizeof(key->enc_ipv4.src)) ||
933 fl_dump_key_val(skb, &key->enc_ipv4.dst,
934 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
935 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
936 sizeof(key->enc_ipv4.dst))))
937 goto nla_put_failure;
938 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
939 (fl_dump_key_val(skb, &key->enc_ipv6.src,
940 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
941 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
942 sizeof(key->enc_ipv6.src)) ||
943 fl_dump_key_val(skb, &key->enc_ipv6.dst,
944 TCA_FLOWER_KEY_ENC_IPV6_DST,
945 &mask->enc_ipv6.dst,
946 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
947 sizeof(key->enc_ipv6.dst))))
948 goto nla_put_failure;
949
950 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +0300951 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
Amir Vadaibc3103f2016-09-08 16:23:47 +0300952 sizeof(key->enc_key_id)))
953 goto nla_put_failure;
954
Amir Vadaie69985c2016-06-05 17:11:18 +0300955 nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
956
Jiri Pirko77b99002015-05-12 14:56:21 +0200957 if (tcf_exts_dump(skb, &f->exts))
958 goto nla_put_failure;
959
960 nla_nest_end(skb, nest);
961
962 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
963 goto nla_put_failure;
964
965 return skb->len;
966
967nla_put_failure:
968 nla_nest_cancel(skb, nest);
969 return -1;
970}
971
972static struct tcf_proto_ops cls_fl_ops __read_mostly = {
973 .kind = "flower",
974 .classify = fl_classify,
975 .init = fl_init,
976 .destroy = fl_destroy,
977 .get = fl_get,
978 .change = fl_change,
979 .delete = fl_delete,
980 .walk = fl_walk,
981 .dump = fl_dump,
982 .owner = THIS_MODULE,
983};
984
985static int __init cls_fl_init(void)
986{
987 return register_tcf_proto_ops(&cls_fl_ops);
988}
989
990static void __exit cls_fl_exit(void)
991{
992 unregister_tcf_proto_ops(&cls_fl_ops);
993}
994
995module_init(cls_fl_init);
996module_exit(cls_fl_exit);
997
998MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
999MODULE_DESCRIPTION("Flower classifier");
1000MODULE_LICENSE("GPL v2");