blob: 71312d7bd8f490c9b8200ccaac59ea0cd0031da6 [file] [log] [blame]
Jiri Pirko77b99002015-05-12 14:56:21 +02001/*
2 * net/sched/cls_flower.c Flower classifier
3 *
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/rhashtable.h>
Daniel Borkmannd9363772016-11-27 01:18:01 +010016#include <linux/workqueue.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020017
18#include <linux/if_ether.h>
19#include <linux/in6.h>
20#include <linux/ip.h>
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040021#include <linux/mpls.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020022
23#include <net/sch_generic.h>
24#include <net/pkt_cls.h>
25#include <net/ip.h>
26#include <net/flow_dissector.h>
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020027#include <net/geneve.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020028
Amir Vadaibc3103f2016-09-08 16:23:47 +030029#include <net/dst.h>
30#include <net/dst_metadata.h>
31
Jiri Pirko77b99002015-05-12 14:56:21 +020032struct fl_flow_key {
33 int indev_ifindex;
Tom Herbert42aecaa2015-06-04 09:16:39 -070034 struct flow_dissector_key_control control;
Amir Vadaibc3103f2016-09-08 16:23:47 +030035 struct flow_dissector_key_control enc_control;
Jiri Pirko77b99002015-05-12 14:56:21 +020036 struct flow_dissector_key_basic basic;
37 struct flow_dissector_key_eth_addrs eth;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +030038 struct flow_dissector_key_vlan vlan;
Jianbo Liud64efd02018-07-06 05:38:16 +000039 struct flow_dissector_key_vlan cvlan;
Jiri Pirko77b99002015-05-12 14:56:21 +020040 union {
Tom Herbertc3f83242015-06-04 09:16:40 -070041 struct flow_dissector_key_ipv4_addrs ipv4;
Jiri Pirko77b99002015-05-12 14:56:21 +020042 struct flow_dissector_key_ipv6_addrs ipv6;
43 };
44 struct flow_dissector_key_ports tp;
Simon Horman7b684882016-12-07 13:48:28 +010045 struct flow_dissector_key_icmp icmp;
Simon Horman99d31322017-01-11 14:05:43 +010046 struct flow_dissector_key_arp arp;
Amir Vadaibc3103f2016-09-08 16:23:47 +030047 struct flow_dissector_key_keyid enc_key_id;
48 union {
49 struct flow_dissector_key_ipv4_addrs enc_ipv4;
50 struct flow_dissector_key_ipv6_addrs enc_ipv6;
51 };
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +020052 struct flow_dissector_key_ports enc_tp;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040053 struct flow_dissector_key_mpls mpls;
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +020054 struct flow_dissector_key_tcp tcp;
Or Gerlitz4d80cc02017-06-01 21:37:38 +030055 struct flow_dissector_key_ip ip;
Or Gerlitz0e2c17b2018-07-17 19:27:18 +030056 struct flow_dissector_key_ip enc_ip;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020057 struct flow_dissector_key_enc_opts enc_opts;
Jiri Pirko77b99002015-05-12 14:56:21 +020058} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
59
60struct fl_flow_mask_range {
61 unsigned short int start;
62 unsigned short int end;
63};
64
65struct fl_flow_mask {
66 struct fl_flow_key key;
67 struct fl_flow_mask_range range;
Paul Blakey05cd2712018-04-30 14:28:30 +030068 struct rhash_head ht_node;
69 struct rhashtable ht;
70 struct rhashtable_params filter_ht_params;
71 struct flow_dissector dissector;
72 struct list_head filters;
Paolo Abeni44a5cd42018-06-21 20:02:16 +020073 struct rcu_work rwork;
Paul Blakey05cd2712018-04-30 14:28:30 +030074 struct list_head list;
Jiri Pirko77b99002015-05-12 14:56:21 +020075};
76
Jiri Pirkob95ec7e2018-07-23 09:23:10 +020077struct fl_flow_tmplt {
78 struct fl_flow_key dummy_key;
79 struct fl_flow_key mask;
80 struct flow_dissector dissector;
81 struct tcf_chain *chain;
82};
83
Jiri Pirko77b99002015-05-12 14:56:21 +020084struct cls_fl_head {
85 struct rhashtable ht;
Paul Blakey05cd2712018-04-30 14:28:30 +030086 struct list_head masks;
Cong Wangaaa908f2018-05-23 15:26:53 -070087 struct rcu_work rwork;
Chris Mic15ab232017-08-30 02:31:58 -040088 struct idr handle_idr;
Jiri Pirko77b99002015-05-12 14:56:21 +020089};
90
91struct cls_fl_filter {
Paul Blakey05cd2712018-04-30 14:28:30 +030092 struct fl_flow_mask *mask;
Jiri Pirko77b99002015-05-12 14:56:21 +020093 struct rhash_head ht_node;
94 struct fl_flow_key mkey;
95 struct tcf_exts exts;
96 struct tcf_result res;
97 struct fl_flow_key key;
98 struct list_head list;
99 u32 handle;
Amir Vadaie69985c2016-06-05 17:11:18 +0300100 u32 flags;
Vlad Buslov86c55362018-09-07 17:22:21 +0300101 u32 in_hw_count;
Cong Wangaaa908f2018-05-23 15:26:53 -0700102 struct rcu_work rwork;
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200103 struct net_device *hw_dev;
Jiri Pirko77b99002015-05-12 14:56:21 +0200104};
105
Paul Blakey05cd2712018-04-30 14:28:30 +0300106static const struct rhashtable_params mask_ht_params = {
107 .key_offset = offsetof(struct fl_flow_mask, key),
108 .key_len = sizeof(struct fl_flow_key),
109 .head_offset = offsetof(struct fl_flow_mask, ht_node),
110 .automatic_shrinking = true,
111};
112
Jiri Pirko77b99002015-05-12 14:56:21 +0200113static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
114{
115 return mask->range.end - mask->range.start;
116}
117
118static void fl_mask_update_range(struct fl_flow_mask *mask)
119{
120 const u8 *bytes = (const u8 *) &mask->key;
121 size_t size = sizeof(mask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300122 size_t i, first = 0, last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200123
Paul Blakey05cd2712018-04-30 14:28:30 +0300124 for (i = 0; i < size; i++) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200125 if (bytes[i]) {
Paul Blakey05cd2712018-04-30 14:28:30 +0300126 first = i;
127 break;
128 }
129 }
130 last = first;
131 for (i = size - 1; i != first; i--) {
132 if (bytes[i]) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200133 last = i;
Paul Blakey05cd2712018-04-30 14:28:30 +0300134 break;
Jiri Pirko77b99002015-05-12 14:56:21 +0200135 }
136 }
137 mask->range.start = rounddown(first, sizeof(long));
138 mask->range.end = roundup(last + 1, sizeof(long));
139}
140
141static void *fl_key_get_start(struct fl_flow_key *key,
142 const struct fl_flow_mask *mask)
143{
144 return (u8 *) key + mask->range.start;
145}
146
147static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
148 struct fl_flow_mask *mask)
149{
150 const long *lkey = fl_key_get_start(key, mask);
151 const long *lmask = fl_key_get_start(&mask->key, mask);
152 long *lmkey = fl_key_get_start(mkey, mask);
153 int i;
154
155 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
156 *lmkey++ = *lkey++ & *lmask++;
157}
158
Jiri Pirkob95ec7e2018-07-23 09:23:10 +0200159static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
160 struct fl_flow_mask *mask)
161{
162 const long *lmask = fl_key_get_start(&mask->key, mask);
163 const long *ltmplt;
164 int i;
165
166 if (!tmplt)
167 return true;
168 ltmplt = fl_key_get_start(&tmplt->mask, mask);
169 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
170 if (~*ltmplt++ & *lmask++)
171 return false;
172 }
173 return true;
174}
175
Jiri Pirko77b99002015-05-12 14:56:21 +0200176static void fl_clear_masked_range(struct fl_flow_key *key,
177 struct fl_flow_mask *mask)
178{
179 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
180}
181
Paul Blakey05cd2712018-04-30 14:28:30 +0300182static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
Paul Blakeya3308d82017-01-16 10:45:13 +0200183 struct fl_flow_key *mkey)
184{
Paul Blakey05cd2712018-04-30 14:28:30 +0300185 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
186 mask->filter_ht_params);
Paul Blakeya3308d82017-01-16 10:45:13 +0200187}
188
Jiri Pirko77b99002015-05-12 14:56:21 +0200189static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
190 struct tcf_result *res)
191{
192 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
193 struct cls_fl_filter *f;
Paul Blakey05cd2712018-04-30 14:28:30 +0300194 struct fl_flow_mask *mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200195 struct fl_flow_key skb_key;
196 struct fl_flow_key skb_mkey;
197
Paul Blakey05cd2712018-04-30 14:28:30 +0300198 list_for_each_entry_rcu(mask, &head->masks, list) {
199 fl_clear_masked_range(&skb_key, mask);
Amir Vadaie69985c2016-06-05 17:11:18 +0300200
Paul Blakey05cd2712018-04-30 14:28:30 +0300201 skb_key.indev_ifindex = skb->skb_iif;
202 /* skb_flow_dissect() does not set n_proto in case an unknown
203 * protocol, so do it rather here.
204 */
205 skb_key.basic.n_proto = skb->protocol;
206 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
207 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
Amir Vadaibc3103f2016-09-08 16:23:47 +0300208
Paul Blakey05cd2712018-04-30 14:28:30 +0300209 fl_set_masked_key(&skb_mkey, &skb_key, mask);
Jiri Pirko77b99002015-05-12 14:56:21 +0200210
Paul Blakey05cd2712018-04-30 14:28:30 +0300211 f = fl_lookup(mask, &skb_mkey);
212 if (f && !tc_skip_sw(f->flags)) {
213 *res = f->res;
214 return tcf_exts_exec(skb, &f->exts, res);
215 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200216 }
217 return -1;
218}
219
220static int fl_init(struct tcf_proto *tp)
221{
222 struct cls_fl_head *head;
223
224 head = kzalloc(sizeof(*head), GFP_KERNEL);
225 if (!head)
226 return -ENOBUFS;
227
Paul Blakey05cd2712018-04-30 14:28:30 +0300228 INIT_LIST_HEAD_RCU(&head->masks);
Jiri Pirko77b99002015-05-12 14:56:21 +0200229 rcu_assign_pointer(tp->root, head);
Chris Mic15ab232017-08-30 02:31:58 -0400230 idr_init(&head->handle_idr);
Jiri Pirko77b99002015-05-12 14:56:21 +0200231
Paul Blakey05cd2712018-04-30 14:28:30 +0300232 return rhashtable_init(&head->ht, &mask_ht_params);
233}
234
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200235static void fl_mask_free(struct fl_flow_mask *mask)
236{
237 rhashtable_destroy(&mask->ht);
238 kfree(mask);
239}
240
241static void fl_mask_free_work(struct work_struct *work)
242{
243 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
244 struct fl_flow_mask, rwork);
245
246 fl_mask_free(mask);
247}
248
Paul Blakey05cd2712018-04-30 14:28:30 +0300249static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
250 bool async)
251{
252 if (!list_empty(&mask->filters))
253 return false;
254
255 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +0300256 list_del_rcu(&mask->list);
257 if (async)
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200258 tcf_queue_work(&mask->rwork, fl_mask_free_work);
Paul Blakey05cd2712018-04-30 14:28:30 +0300259 else
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200260 fl_mask_free(mask);
Paul Blakey05cd2712018-04-30 14:28:30 +0300261
262 return true;
Jiri Pirko77b99002015-05-12 14:56:21 +0200263}
264
Cong Wang0dadc112017-11-06 13:47:24 -0800265static void __fl_destroy_filter(struct cls_fl_filter *f)
266{
267 tcf_exts_destroy(&f->exts);
268 tcf_exts_put_net(&f->exts);
269 kfree(f);
270}
271
Cong Wang0552c8a2017-10-26 18:24:33 -0700272static void fl_destroy_filter_work(struct work_struct *work)
273{
Cong Wangaaa908f2018-05-23 15:26:53 -0700274 struct cls_fl_filter *f = container_of(to_rcu_work(work),
275 struct cls_fl_filter, rwork);
Cong Wang0552c8a2017-10-26 18:24:33 -0700276
277 rtnl_lock();
Cong Wang0dadc112017-11-06 13:47:24 -0800278 __fl_destroy_filter(f);
Cong Wang0552c8a2017-10-26 18:24:33 -0700279 rtnl_unlock();
280}
281
Jakub Kicinski1b0f8032018-01-24 12:54:21 -0800282static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
283 struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200284{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200285 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200286 struct tcf_block *block = tp->chain->block;
Amir Vadai5b33f482016-03-08 12:42:29 +0200287
Jakub Kicinski1b0f8032018-01-24 12:54:21 -0800288 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200289 cls_flower.command = TC_CLSFLOWER_DESTROY;
290 cls_flower.cookie = (unsigned long) f;
Amir Vadai5b33f482016-03-08 12:42:29 +0200291
Jiri Pirko208c0f42017-10-19 15:50:32 +0200292 tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
Jiri Pirko717503b2017-10-11 09:41:09 +0200293 &cls_flower, false);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100294 tcf_block_offload_dec(block, &f->flags);
Amir Vadai5b33f482016-03-08 12:42:29 +0200295}
296
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300297static int fl_hw_replace_filter(struct tcf_proto *tp,
Quentin Monnet41002032018-01-19 17:44:43 -0800298 struct cls_fl_filter *f,
299 struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200300{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200301 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200302 struct tcf_block *block = tp->chain->block;
Jiri Pirko717503b2017-10-11 09:41:09 +0200303 bool skip_sw = tc_skip_sw(f->flags);
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300304 int err;
Amir Vadai5b33f482016-03-08 12:42:29 +0200305
Jakub Kicinskiea205942018-01-24 12:54:20 -0800306 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200307 cls_flower.command = TC_CLSFLOWER_REPLACE;
308 cls_flower.cookie = (unsigned long) f;
Paul Blakey05cd2712018-04-30 14:28:30 +0300309 cls_flower.dissector = &f->mask->dissector;
310 cls_flower.mask = &f->mask->key;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200311 cls_flower.key = &f->mkey;
312 cls_flower.exts = &f->exts;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700313 cls_flower.classid = f->res.classid;
Amir Vadai5b33f482016-03-08 12:42:29 +0200314
Jiri Pirko208c0f42017-10-19 15:50:32 +0200315 err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
Jiri Pirko717503b2017-10-11 09:41:09 +0200316 &cls_flower, skip_sw);
317 if (err < 0) {
Jakub Kicinski1b0f8032018-01-24 12:54:21 -0800318 fl_hw_destroy_filter(tp, f, NULL);
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300319 return err;
Jiri Pirko717503b2017-10-11 09:41:09 +0200320 } else if (err > 0) {
John Hurley31533cb2018-06-25 14:30:06 -0700321 f->in_hw_count = err;
Jiri Pirkocaa72602018-01-17 11:46:50 +0100322 tcf_block_offload_inc(block, &f->flags);
Jiri Pirko717503b2017-10-11 09:41:09 +0200323 }
324
325 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
326 return -EINVAL;
327
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300328 return 0;
Amir Vadai5b33f482016-03-08 12:42:29 +0200329}
330
Amir Vadai10cbc682016-05-13 12:55:37 +0000331static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
332{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200333 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200334 struct tcf_block *block = tp->chain->block;
Amir Vadai10cbc682016-05-13 12:55:37 +0000335
Jakub Kicinskiea205942018-01-24 12:54:20 -0800336 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200337 cls_flower.command = TC_CLSFLOWER_STATS;
338 cls_flower.cookie = (unsigned long) f;
339 cls_flower.exts = &f->exts;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700340 cls_flower.classid = f->res.classid;
Amir Vadai10cbc682016-05-13 12:55:37 +0000341
Jiri Pirko208c0f42017-10-19 15:50:32 +0200342 tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
Jiri Pirko717503b2017-10-11 09:41:09 +0200343 &cls_flower, false);
Amir Vadai10cbc682016-05-13 12:55:37 +0000344}
345
Paul Blakey05cd2712018-04-30 14:28:30 +0300346static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
Jakub Kicinski1b0f8032018-01-24 12:54:21 -0800347 struct netlink_ext_ack *extack)
Roi Dayan13fa8762016-11-01 16:08:29 +0200348{
Chris Mic15ab232017-08-30 02:31:58 -0400349 struct cls_fl_head *head = rtnl_dereference(tp->root);
Paul Blakey05cd2712018-04-30 14:28:30 +0300350 bool async = tcf_exts_get_net(&f->exts);
351 bool last;
Chris Mic15ab232017-08-30 02:31:58 -0400352
Matthew Wilcox9c160942017-11-28 09:48:43 -0500353 idr_remove(&head->handle_idr, f->handle);
Roi Dayan13fa8762016-11-01 16:08:29 +0200354 list_del_rcu(&f->list);
Paul Blakey05cd2712018-04-30 14:28:30 +0300355 last = fl_mask_put(head, f->mask, async);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200356 if (!tc_skip_hw(f->flags))
Jakub Kicinski1b0f8032018-01-24 12:54:21 -0800357 fl_hw_destroy_filter(tp, f, extack);
Roi Dayan13fa8762016-11-01 16:08:29 +0200358 tcf_unbind_filter(tp, &f->res);
Paul Blakey05cd2712018-04-30 14:28:30 +0300359 if (async)
Cong Wangaaa908f2018-05-23 15:26:53 -0700360 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
Cong Wang0dadc112017-11-06 13:47:24 -0800361 else
362 __fl_destroy_filter(f);
Paul Blakey05cd2712018-04-30 14:28:30 +0300363
364 return last;
Roi Dayan13fa8762016-11-01 16:08:29 +0200365}
366
Daniel Borkmannd9363772016-11-27 01:18:01 +0100367static void fl_destroy_sleepable(struct work_struct *work)
368{
Cong Wangaaa908f2018-05-23 15:26:53 -0700369 struct cls_fl_head *head = container_of(to_rcu_work(work),
370 struct cls_fl_head,
371 rwork);
Paul Blakeyde9dc652018-06-03 10:06:13 +0300372
373 rhashtable_destroy(&head->ht);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100374 kfree(head);
375 module_put(THIS_MODULE);
376}
377
Jakub Kicinski715df5e2018-01-24 12:54:13 -0800378static void fl_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200379{
380 struct cls_fl_head *head = rtnl_dereference(tp->root);
Paul Blakey05cd2712018-04-30 14:28:30 +0300381 struct fl_flow_mask *mask, *next_mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200382 struct cls_fl_filter *f, *next;
383
Paul Blakey05cd2712018-04-30 14:28:30 +0300384 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
385 list_for_each_entry_safe(f, next, &mask->filters, list) {
386 if (__fl_delete(tp, f, extack))
387 break;
388 }
389 }
Chris Mic15ab232017-08-30 02:31:58 -0400390 idr_destroy(&head->handle_idr);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100391
392 __module_get(THIS_MODULE);
Cong Wangaaa908f2018-05-23 15:26:53 -0700393 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
Jiri Pirko77b99002015-05-12 14:56:21 +0200394}
395
WANG Cong8113c092017-08-04 21:31:43 -0700396static void *fl_get(struct tcf_proto *tp, u32 handle)
Jiri Pirko77b99002015-05-12 14:56:21 +0200397{
398 struct cls_fl_head *head = rtnl_dereference(tp->root);
Jiri Pirko77b99002015-05-12 14:56:21 +0200399
Matthew Wilcox322d8842017-11-28 10:01:24 -0500400 return idr_find(&head->handle_idr, handle);
Jiri Pirko77b99002015-05-12 14:56:21 +0200401}
402
403static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
404 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
405 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
406 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
407 .len = IFNAMSIZ },
408 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
409 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
410 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
411 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
412 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
413 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
414 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
415 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
416 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
417 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
418 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
419 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
420 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
421 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
422 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
423 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
Jamal Hadi Salimb175c3a2015-06-25 06:55:27 -0400424 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
425 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300426 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
427 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
428 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
Amir Vadaibc3103f2016-09-08 16:23:47 +0300429 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
430 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
431 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
432 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
433 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
434 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
435 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
436 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
437 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
Or Gerlitzaa72d702016-09-15 15:28:22 +0300438 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
439 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
440 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
441 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
Simon Horman5976c5f2016-11-03 13:24:21 +0100442 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
443 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
444 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
445 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200446 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
447 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
448 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
449 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200450 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
451 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
Simon Horman7b684882016-12-07 13:48:28 +0100452 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
453 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
454 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
455 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
456 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
457 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
458 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
459 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
Simon Horman99d31322017-01-11 14:05:43 +0100460 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
461 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
462 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
463 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
464 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
465 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
466 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
467 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
468 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
469 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400470 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
471 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
472 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
473 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +0200474 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
475 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300476 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
477 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
478 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
479 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
Jianbo Liud64efd02018-07-06 05:38:16 +0000480 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
481 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
482 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300483 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
484 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
485 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
486 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200487 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
488 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
489};
490
491static const struct nla_policy
492enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
493 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
494};
495
496static const struct nla_policy
497geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
498 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
499 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
500 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
501 .len = 128 },
Jiri Pirko77b99002015-05-12 14:56:21 +0200502};
503
504static void fl_set_key_val(struct nlattr **tb,
505 void *val, int val_type,
506 void *mask, int mask_type, int len)
507{
508 if (!tb[val_type])
509 return;
510 memcpy(val, nla_data(tb[val_type]), len);
511 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
512 memset(mask, 0xff, len);
513 else
514 memcpy(mask, nla_data(tb[mask_type]), len);
515}
516
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400517static int fl_set_key_mpls(struct nlattr **tb,
518 struct flow_dissector_key_mpls *key_val,
519 struct flow_dissector_key_mpls *key_mask)
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400520{
521 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
522 key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
523 key_mask->mpls_ttl = MPLS_TTL_MASK;
524 }
525 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400526 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
527
528 if (bos & ~MPLS_BOS_MASK)
529 return -EINVAL;
530 key_val->mpls_bos = bos;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400531 key_mask->mpls_bos = MPLS_BOS_MASK;
532 }
533 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400534 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
535
536 if (tc & ~MPLS_TC_MASK)
537 return -EINVAL;
538 key_val->mpls_tc = tc;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400539 key_mask->mpls_tc = MPLS_TC_MASK;
540 }
541 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400542 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
543
544 if (label & ~MPLS_LABEL_MASK)
545 return -EINVAL;
546 key_val->mpls_label = label;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400547 key_mask->mpls_label = MPLS_LABEL_MASK;
548 }
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400549 return 0;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400550}
551
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300552static void fl_set_key_vlan(struct nlattr **tb,
Jianbo Liuaaab0832018-07-06 05:38:13 +0000553 __be16 ethertype,
Jianbo Liud64efd02018-07-06 05:38:16 +0000554 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300555 struct flow_dissector_key_vlan *key_val,
556 struct flow_dissector_key_vlan *key_mask)
557{
558#define VLAN_PRIORITY_MASK 0x7
559
Jianbo Liud64efd02018-07-06 05:38:16 +0000560 if (tb[vlan_id_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300561 key_val->vlan_id =
Jianbo Liud64efd02018-07-06 05:38:16 +0000562 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300563 key_mask->vlan_id = VLAN_VID_MASK;
564 }
Jianbo Liud64efd02018-07-06 05:38:16 +0000565 if (tb[vlan_prio_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300566 key_val->vlan_priority =
Jianbo Liud64efd02018-07-06 05:38:16 +0000567 nla_get_u8(tb[vlan_prio_key]) &
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300568 VLAN_PRIORITY_MASK;
569 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
570 }
Jianbo Liuaaab0832018-07-06 05:38:13 +0000571 key_val->vlan_tpid = ethertype;
572 key_mask->vlan_tpid = cpu_to_be16(~0);
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300573}
574
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200575static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
576 u32 *dissector_key, u32 *dissector_mask,
577 u32 flower_flag_bit, u32 dissector_flag_bit)
578{
579 if (flower_mask & flower_flag_bit) {
580 *dissector_mask |= dissector_flag_bit;
581 if (flower_key & flower_flag_bit)
582 *dissector_key |= dissector_flag_bit;
583 }
584}
585
Or Gerlitzd9724772016-12-22 14:28:15 +0200586static int fl_set_key_flags(struct nlattr **tb,
587 u32 *flags_key, u32 *flags_mask)
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200588{
589 u32 key, mask;
590
Or Gerlitzd9724772016-12-22 14:28:15 +0200591 /* mask is mandatory for flags */
592 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
593 return -EINVAL;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200594
595 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
Or Gerlitzd9724772016-12-22 14:28:15 +0200596 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200597
598 *flags_key = 0;
599 *flags_mask = 0;
600
601 fl_set_key_flag(key, mask, flags_key, flags_mask,
602 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +0100603 fl_set_key_flag(key, mask, flags_key, flags_mask,
604 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
605 FLOW_DIS_FIRST_FRAG);
Or Gerlitzd9724772016-12-22 14:28:15 +0200606
607 return 0;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200608}
609
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300610static void fl_set_key_ip(struct nlattr **tb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300611 struct flow_dissector_key_ip *key,
612 struct flow_dissector_key_ip *mask)
613{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300614 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
615 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
616 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
617 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300618
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300619 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
620 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300621}
622
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200623static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
624 int depth, int option_len,
625 struct netlink_ext_ack *extack)
626{
627 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
628 struct nlattr *class = NULL, *type = NULL, *data = NULL;
629 struct geneve_opt *opt;
630 int err, data_len = 0;
631
632 if (option_len > sizeof(struct geneve_opt))
633 data_len = option_len - sizeof(struct geneve_opt);
634
635 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
636 memset(opt, 0xff, option_len);
637 opt->length = data_len / 4;
638 opt->r1 = 0;
639 opt->r2 = 0;
640 opt->r3 = 0;
641
642 /* If no mask has been prodived we assume an exact match. */
643 if (!depth)
644 return sizeof(struct geneve_opt) + data_len;
645
646 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
647 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
648 return -EINVAL;
649 }
650
651 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
652 nla, geneve_opt_policy, extack);
653 if (err < 0)
654 return err;
655
656 /* We are not allowed to omit any of CLASS, TYPE or DATA
657 * fields from the key.
658 */
659 if (!option_len &&
660 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
661 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
662 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
663 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
664 return -EINVAL;
665 }
666
667 /* Omitting any of CLASS, TYPE or DATA fields is allowed
668 * for the mask.
669 */
670 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
671 int new_len = key->enc_opts.len;
672
673 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
674 data_len = nla_len(data);
675 if (data_len < 4) {
676 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
677 return -ERANGE;
678 }
679 if (data_len % 4) {
680 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
681 return -ERANGE;
682 }
683
684 new_len += sizeof(struct geneve_opt) + data_len;
685 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
686 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
687 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
688 return -ERANGE;
689 }
690 opt->length = data_len / 4;
691 memcpy(opt->opt_data, nla_data(data), data_len);
692 }
693
694 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
695 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
696 opt->opt_class = nla_get_be16(class);
697 }
698
699 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
700 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
701 opt->type = nla_get_u8(type);
702 }
703
704 return sizeof(struct geneve_opt) + data_len;
705}
706
707static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
708 struct fl_flow_key *mask,
709 struct netlink_ext_ack *extack)
710{
711 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
Jakub Kicinski63c82992018-11-09 21:06:26 -0800712 int err, option_len, key_depth, msk_depth = 0;
713
714 err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
715 TCA_FLOWER_KEY_ENC_OPTS_MAX,
716 enc_opts_policy, extack);
717 if (err)
718 return err;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200719
720 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
721
722 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
Jakub Kicinski63c82992018-11-09 21:06:26 -0800723 err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
724 TCA_FLOWER_KEY_ENC_OPTS_MAX,
725 enc_opts_policy, extack);
726 if (err)
727 return err;
728
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200729 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
730 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
731 }
732
733 nla_for_each_attr(nla_opt_key, nla_enc_key,
734 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
735 switch (nla_type(nla_opt_key)) {
736 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
737 option_len = 0;
738 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
739 option_len = fl_set_geneve_opt(nla_opt_key, key,
740 key_depth, option_len,
741 extack);
742 if (option_len < 0)
743 return option_len;
744
745 key->enc_opts.len += option_len;
746 /* At the same time we need to parse through the mask
747 * in order to verify exact and mask attribute lengths.
748 */
749 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
750 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
751 msk_depth, option_len,
752 extack);
753 if (option_len < 0)
754 return option_len;
755
756 mask->enc_opts.len += option_len;
757 if (key->enc_opts.len != mask->enc_opts.len) {
758 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
759 return -EINVAL;
760 }
761
762 if (msk_depth)
763 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
764 break;
765 default:
766 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
767 return -EINVAL;
768 }
769 }
770
771 return 0;
772}
773
Jiri Pirko77b99002015-05-12 14:56:21 +0200774static int fl_set_key(struct net *net, struct nlattr **tb,
Alexander Aring1057c552018-01-18 11:20:54 -0500775 struct fl_flow_key *key, struct fl_flow_key *mask,
776 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200777{
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300778 __be16 ethertype;
Or Gerlitzd9724772016-12-22 14:28:15 +0200779 int ret = 0;
Brian Haleydd3aa3b2015-05-14 13:20:15 -0400780#ifdef CONFIG_NET_CLS_IND
Jiri Pirko77b99002015-05-12 14:56:21 +0200781 if (tb[TCA_FLOWER_INDEV]) {
Alexander Aring1057c552018-01-18 11:20:54 -0500782 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
Jiri Pirko77b99002015-05-12 14:56:21 +0200783 if (err < 0)
784 return err;
785 key->indev_ifindex = err;
786 mask->indev_ifindex = 0xffffffff;
787 }
Brian Haleydd3aa3b2015-05-14 13:20:15 -0400788#endif
Jiri Pirko77b99002015-05-12 14:56:21 +0200789
790 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
791 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
792 sizeof(key->eth.dst));
793 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
794 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
795 sizeof(key->eth.src));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500796
Arnd Bergmann0b498a52016-08-26 17:25:45 +0200797 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300798 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
799
Jianbo Liuaaab0832018-07-06 05:38:13 +0000800 if (eth_type_vlan(ethertype)) {
Jianbo Liud64efd02018-07-06 05:38:16 +0000801 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
802 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
803 &mask->vlan);
804
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +0000805 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
806 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
807 if (eth_type_vlan(ethertype)) {
808 fl_set_key_vlan(tb, ethertype,
809 TCA_FLOWER_KEY_CVLAN_ID,
810 TCA_FLOWER_KEY_CVLAN_PRIO,
811 &key->cvlan, &mask->cvlan);
812 fl_set_key_val(tb, &key->basic.n_proto,
813 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
814 &mask->basic.n_proto,
815 TCA_FLOWER_UNSPEC,
816 sizeof(key->basic.n_proto));
817 } else {
818 key->basic.n_proto = ethertype;
819 mask->basic.n_proto = cpu_to_be16(~0);
820 }
Jianbo Liud64efd02018-07-06 05:38:16 +0000821 }
Arnd Bergmann0b498a52016-08-26 17:25:45 +0200822 } else {
823 key->basic.n_proto = ethertype;
824 mask->basic.n_proto = cpu_to_be16(~0);
825 }
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300826 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500827
Jiri Pirko77b99002015-05-12 14:56:21 +0200828 if (key->basic.n_proto == htons(ETH_P_IP) ||
829 key->basic.n_proto == htons(ETH_P_IPV6)) {
830 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
831 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
832 sizeof(key->basic.ip_proto));
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300833 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
Jiri Pirko77b99002015-05-12 14:56:21 +0200834 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500835
836 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
837 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +0200838 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +0200839 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
840 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
841 sizeof(key->ipv4.src));
842 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
843 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
844 sizeof(key->ipv4.dst));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500845 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
846 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +0200847 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +0200848 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
849 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
850 sizeof(key->ipv6.src));
851 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
852 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
853 sizeof(key->ipv6.dst));
854 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -0500855
Jiri Pirko77b99002015-05-12 14:56:21 +0200856 if (key->basic.ip_proto == IPPROTO_TCP) {
857 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300858 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200859 sizeof(key->tp.src));
860 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300861 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200862 sizeof(key->tp.dst));
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +0200863 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
864 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
865 sizeof(key->tcp.flags));
Jiri Pirko77b99002015-05-12 14:56:21 +0200866 } else if (key->basic.ip_proto == IPPROTO_UDP) {
867 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300868 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200869 sizeof(key->tp.src));
870 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +0300871 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +0200872 sizeof(key->tp.dst));
Simon Horman5976c5f2016-11-03 13:24:21 +0100873 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
874 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
875 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
876 sizeof(key->tp.src));
877 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
878 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
879 sizeof(key->tp.dst));
Simon Horman7b684882016-12-07 13:48:28 +0100880 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
881 key->basic.ip_proto == IPPROTO_ICMP) {
882 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
883 &mask->icmp.type,
884 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
885 sizeof(key->icmp.type));
886 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
887 &mask->icmp.code,
888 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
889 sizeof(key->icmp.code));
890 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
891 key->basic.ip_proto == IPPROTO_ICMPV6) {
892 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
893 &mask->icmp.type,
894 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
895 sizeof(key->icmp.type));
Simon Horman040587a2017-01-30 16:19:02 +0100896 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
Simon Horman7b684882016-12-07 13:48:28 +0100897 &mask->icmp.code,
Simon Horman040587a2017-01-30 16:19:02 +0100898 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
Simon Horman7b684882016-12-07 13:48:28 +0100899 sizeof(key->icmp.code));
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400900 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
901 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400902 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
903 if (ret)
904 return ret;
Simon Horman99d31322017-01-11 14:05:43 +0100905 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
906 key->basic.n_proto == htons(ETH_P_RARP)) {
907 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
908 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
909 sizeof(key->arp.sip));
910 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
911 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
912 sizeof(key->arp.tip));
913 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
914 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
915 sizeof(key->arp.op));
916 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
917 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
918 sizeof(key->arp.sha));
919 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
920 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
921 sizeof(key->arp.tha));
Jiri Pirko77b99002015-05-12 14:56:21 +0200922 }
923
Amir Vadaibc3103f2016-09-08 16:23:47 +0300924 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
925 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
926 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +0200927 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300928 fl_set_key_val(tb, &key->enc_ipv4.src,
929 TCA_FLOWER_KEY_ENC_IPV4_SRC,
930 &mask->enc_ipv4.src,
931 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
932 sizeof(key->enc_ipv4.src));
933 fl_set_key_val(tb, &key->enc_ipv4.dst,
934 TCA_FLOWER_KEY_ENC_IPV4_DST,
935 &mask->enc_ipv4.dst,
936 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
937 sizeof(key->enc_ipv4.dst));
938 }
939
940 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
941 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
942 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +0200943 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +0300944 fl_set_key_val(tb, &key->enc_ipv6.src,
945 TCA_FLOWER_KEY_ENC_IPV6_SRC,
946 &mask->enc_ipv6.src,
947 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
948 sizeof(key->enc_ipv6.src));
949 fl_set_key_val(tb, &key->enc_ipv6.dst,
950 TCA_FLOWER_KEY_ENC_IPV6_DST,
951 &mask->enc_ipv6.dst,
952 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
953 sizeof(key->enc_ipv6.dst));
954 }
955
956 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +0300957 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
Amir Vadaibc3103f2016-09-08 16:23:47 +0300958 sizeof(key->enc_key_id.keyid));
959
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200960 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
961 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
962 sizeof(key->enc_tp.src));
963
964 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
965 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
966 sizeof(key->enc_tp.dst));
967
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300968 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
969
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200970 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
971 ret = fl_set_enc_opt(tb, key, mask, extack);
972 if (ret)
973 return ret;
974 }
975
Or Gerlitzd9724772016-12-22 14:28:15 +0200976 if (tb[TCA_FLOWER_KEY_FLAGS])
977 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200978
Or Gerlitzd9724772016-12-22 14:28:15 +0200979 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +0200980}
981
Paul Blakey05cd2712018-04-30 14:28:30 +0300982static void fl_mask_copy(struct fl_flow_mask *dst,
983 struct fl_flow_mask *src)
Jiri Pirko77b99002015-05-12 14:56:21 +0200984{
Paul Blakey05cd2712018-04-30 14:28:30 +0300985 const void *psrc = fl_key_get_start(&src->key, src);
986 void *pdst = fl_key_get_start(&dst->key, src);
Jiri Pirko77b99002015-05-12 14:56:21 +0200987
Paul Blakey05cd2712018-04-30 14:28:30 +0300988 memcpy(pdst, psrc, fl_mask_range(src));
989 dst->range = src->range;
Jiri Pirko77b99002015-05-12 14:56:21 +0200990}
991
992static const struct rhashtable_params fl_ht_params = {
993 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
994 .head_offset = offsetof(struct cls_fl_filter, ht_node),
995 .automatic_shrinking = true,
996};
997
Paul Blakey05cd2712018-04-30 14:28:30 +0300998static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +0200999{
Paul Blakey05cd2712018-04-30 14:28:30 +03001000 mask->filter_ht_params = fl_ht_params;
1001 mask->filter_ht_params.key_len = fl_mask_range(mask);
1002 mask->filter_ht_params.key_offset += mask->range.start;
Jiri Pirko77b99002015-05-12 14:56:21 +02001003
Paul Blakey05cd2712018-04-30 14:28:30 +03001004 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
Jiri Pirko77b99002015-05-12 14:56:21 +02001005}
1006
1007#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
zhong jiangcb205a82018-09-19 19:32:11 +08001008#define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
Jiri Pirko77b99002015-05-12 14:56:21 +02001009
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001010#define FL_KEY_IS_MASKED(mask, member) \
1011 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1012 0, FL_KEY_MEMBER_SIZE(member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001013
1014#define FL_KEY_SET(keys, cnt, id, member) \
1015 do { \
1016 keys[cnt].key_id = id; \
1017 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1018 cnt++; \
1019 } while(0);
1020
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001021#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001022 do { \
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001023 if (FL_KEY_IS_MASKED(mask, member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001024 FL_KEY_SET(keys, cnt, id, member); \
1025 } while(0);
1026
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001027static void fl_init_dissector(struct flow_dissector *dissector,
1028 struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001029{
1030 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1031 size_t cnt = 0;
1032
Tom Herbert42aecaa2015-06-04 09:16:39 -07001033 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
Jiri Pirko77b99002015-05-12 14:56:21 +02001034 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001035 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001036 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001037 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001038 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001039 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001040 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001041 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001042 FLOW_DISSECTOR_KEY_PORTS, tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001043 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001044 FLOW_DISSECTOR_KEY_IP, ip);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001045 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001046 FLOW_DISSECTOR_KEY_TCP, tcp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001047 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman7b684882016-12-07 13:48:28 +01001048 FLOW_DISSECTOR_KEY_ICMP, icmp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001049 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman99d31322017-01-11 14:05:43 +01001050 FLOW_DISSECTOR_KEY_ARP, arp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001051 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001052 FLOW_DISSECTOR_KEY_MPLS, mpls);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001053 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001054 FLOW_DISSECTOR_KEY_VLAN, vlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001055 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jianbo Liud64efd02018-07-06 05:38:16 +00001056 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001057 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001058 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001059 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001060 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001061 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001062 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001063 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1064 FL_KEY_IS_MASKED(mask, enc_ipv6))
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001065 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1066 enc_control);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001067 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001068 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001069 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001070 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001071 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1072 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
Jiri Pirko77b99002015-05-12 14:56:21 +02001073
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001074 skb_flow_dissector_init(dissector, keys, cnt);
Paul Blakey05cd2712018-04-30 14:28:30 +03001075}
1076
1077static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1078 struct fl_flow_mask *mask)
1079{
1080 struct fl_flow_mask *newmask;
1081 int err;
1082
1083 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1084 if (!newmask)
1085 return ERR_PTR(-ENOMEM);
1086
1087 fl_mask_copy(newmask, mask);
1088
1089 err = fl_init_mask_hashtable(newmask);
1090 if (err)
1091 goto errout_free;
1092
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001093 fl_init_dissector(&newmask->dissector, &newmask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +03001094
1095 INIT_LIST_HEAD_RCU(&newmask->filters);
1096
1097 err = rhashtable_insert_fast(&head->ht, &newmask->ht_node,
1098 mask_ht_params);
1099 if (err)
1100 goto errout_destroy;
1101
1102 list_add_tail_rcu(&newmask->list, &head->masks);
1103
1104 return newmask;
1105
1106errout_destroy:
1107 rhashtable_destroy(&newmask->ht);
1108errout_free:
1109 kfree(newmask);
1110
1111 return ERR_PTR(err);
Jiri Pirko77b99002015-05-12 14:56:21 +02001112}
1113
1114static int fl_check_assign_mask(struct cls_fl_head *head,
Paul Blakey05cd2712018-04-30 14:28:30 +03001115 struct cls_fl_filter *fnew,
1116 struct cls_fl_filter *fold,
Jiri Pirko77b99002015-05-12 14:56:21 +02001117 struct fl_flow_mask *mask)
1118{
Paul Blakey05cd2712018-04-30 14:28:30 +03001119 struct fl_flow_mask *newmask;
Jiri Pirko77b99002015-05-12 14:56:21 +02001120
Paul Blakey05cd2712018-04-30 14:28:30 +03001121 fnew->mask = rhashtable_lookup_fast(&head->ht, mask, mask_ht_params);
1122 if (!fnew->mask) {
1123 if (fold)
Jiri Pirko77b99002015-05-12 14:56:21 +02001124 return -EINVAL;
Paul Blakey05cd2712018-04-30 14:28:30 +03001125
1126 newmask = fl_create_new_mask(head, mask);
1127 if (IS_ERR(newmask))
1128 return PTR_ERR(newmask);
1129
1130 fnew->mask = newmask;
Paul Blakeyf6521c52018-06-03 10:06:14 +03001131 } else if (fold && fold->mask != fnew->mask) {
Paul Blakey05cd2712018-04-30 14:28:30 +03001132 return -EINVAL;
Jiri Pirko77b99002015-05-12 14:56:21 +02001133 }
1134
Jiri Pirko77b99002015-05-12 14:56:21 +02001135 return 0;
1136}
1137
1138static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1139 struct cls_fl_filter *f, struct fl_flow_mask *mask,
1140 unsigned long base, struct nlattr **tb,
Alexander Aring50a56192018-01-18 11:20:52 -05001141 struct nlattr *est, bool ovr,
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001142 struct fl_flow_tmplt *tmplt,
Alexander Aring50a56192018-01-18 11:20:52 -05001143 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001144{
Jiri Pirko77b99002015-05-12 14:56:21 +02001145 int err;
1146
Alexander Aring50a56192018-01-18 11:20:52 -05001147 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001148 if (err < 0)
1149 return err;
1150
1151 if (tb[TCA_FLOWER_CLASSID]) {
1152 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1153 tcf_bind_filter(tp, &f->res, base);
1154 }
1155
Alexander Aring1057c552018-01-18 11:20:54 -05001156 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001157 if (err)
Jiri Pirko45507522017-08-04 14:29:06 +02001158 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02001159
1160 fl_mask_update_range(mask);
1161 fl_set_masked_key(&f->mkey, &f->key, mask);
1162
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001163 if (!fl_mask_fits_tmplt(tmplt, mask)) {
1164 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1165 return -EINVAL;
1166 }
1167
Jiri Pirko77b99002015-05-12 14:56:21 +02001168 return 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001169}
1170
Jiri Pirko77b99002015-05-12 14:56:21 +02001171static int fl_change(struct net *net, struct sk_buff *in_skb,
1172 struct tcf_proto *tp, unsigned long base,
1173 u32 handle, struct nlattr **tca,
Alexander Aring7306db32018-01-18 11:20:51 -05001174 void **arg, bool ovr, struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001175{
1176 struct cls_fl_head *head = rtnl_dereference(tp->root);
WANG Cong8113c092017-08-04 21:31:43 -07001177 struct cls_fl_filter *fold = *arg;
Jiri Pirko77b99002015-05-12 14:56:21 +02001178 struct cls_fl_filter *fnew;
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001179 struct nlattr **tb;
Jiri Pirko77b99002015-05-12 14:56:21 +02001180 struct fl_flow_mask mask = {};
1181 int err;
1182
1183 if (!tca[TCA_OPTIONS])
1184 return -EINVAL;
1185
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001186 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1187 if (!tb)
1188 return -ENOBUFS;
1189
Johannes Bergfceb6432017-04-12 14:34:07 +02001190 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
1191 fl_policy, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02001192 if (err < 0)
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001193 goto errout_tb;
Jiri Pirko77b99002015-05-12 14:56:21 +02001194
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001195 if (fold && handle && fold->handle != handle) {
1196 err = -EINVAL;
1197 goto errout_tb;
1198 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001199
1200 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001201 if (!fnew) {
1202 err = -ENOBUFS;
1203 goto errout_tb;
1204 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001205
WANG Congb9a24bb2016-08-19 12:36:54 -07001206 err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
1207 if (err < 0)
1208 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +02001209
1210 if (!handle) {
Matthew Wilcox85bd0432017-11-28 10:53:03 -05001211 handle = 1;
1212 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1213 INT_MAX, GFP_KERNEL);
1214 } else if (!fold) {
1215 /* user specifies a handle and it doesn't exist */
1216 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1217 handle, GFP_KERNEL);
Jiri Pirko77b99002015-05-12 14:56:21 +02001218 }
Matthew Wilcox85bd0432017-11-28 10:53:03 -05001219 if (err)
1220 goto errout;
1221 fnew->handle = handle;
Jiri Pirko77b99002015-05-12 14:56:21 +02001222
Amir Vadaie69985c2016-06-05 17:11:18 +03001223 if (tb[TCA_FLOWER_FLAGS]) {
1224 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1225
1226 if (!tc_flags_valid(fnew->flags)) {
1227 err = -EINVAL;
Cong Wangfe2502e2017-09-20 09:18:45 -07001228 goto errout_idr;
Amir Vadaie69985c2016-06-05 17:11:18 +03001229 }
1230 }
Amir Vadai5b33f482016-03-08 12:42:29 +02001231
Alexander Aring50a56192018-01-18 11:20:52 -05001232 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001233 tp->chain->tmplt_priv, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001234 if (err)
Cong Wangfe2502e2017-09-20 09:18:45 -07001235 goto errout_idr;
Jiri Pirko77b99002015-05-12 14:56:21 +02001236
Paul Blakey05cd2712018-04-30 14:28:30 +03001237 err = fl_check_assign_mask(head, fnew, fold, &mask);
Jiri Pirko77b99002015-05-12 14:56:21 +02001238 if (err)
Cong Wangfe2502e2017-09-20 09:18:45 -07001239 goto errout_idr;
Jiri Pirko77b99002015-05-12 14:56:21 +02001240
Or Gerlitz35cc3cef2018-12-09 18:10:24 +02001241 if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
1242 err = -EEXIST;
1243 goto errout_mask;
Amir Vadaie69985c2016-06-05 17:11:18 +03001244 }
Amir Vadai5b33f482016-03-08 12:42:29 +02001245
Or Gerlitz35cc3cef2018-12-09 18:10:24 +02001246 err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
1247 fnew->mask->filter_ht_params);
1248 if (err)
1249 goto errout_mask;
1250
Hadar Hen Zion79685212016-12-01 14:06:34 +02001251 if (!tc_skip_hw(fnew->flags)) {
Paul Blakey05cd2712018-04-30 14:28:30 +03001252 err = fl_hw_replace_filter(tp, fnew, extack);
Hadar Hen Zion79685212016-12-01 14:06:34 +02001253 if (err)
Paul Blakey05cd2712018-04-30 14:28:30 +03001254 goto errout_mask;
Hadar Hen Zion79685212016-12-01 14:06:34 +02001255 }
Amir Vadai5b33f482016-03-08 12:42:29 +02001256
Or Gerlitz55593962017-02-16 10:31:13 +02001257 if (!tc_in_hw(fnew->flags))
1258 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1259
Amir Vadai5b33f482016-03-08 12:42:29 +02001260 if (fold) {
Jiri Pirko725cbb622016-11-28 15:40:13 +01001261 if (!tc_skip_sw(fold->flags))
Paul Blakey05cd2712018-04-30 14:28:30 +03001262 rhashtable_remove_fast(&fold->mask->ht,
1263 &fold->ht_node,
1264 fold->mask->filter_ht_params);
Hadar Hen Zion79685212016-12-01 14:06:34 +02001265 if (!tc_skip_hw(fold->flags))
Jakub Kicinski1b0f8032018-01-24 12:54:21 -08001266 fl_hw_destroy_filter(tp, fold, NULL);
Amir Vadai5b33f482016-03-08 12:42:29 +02001267 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001268
WANG Cong8113c092017-08-04 21:31:43 -07001269 *arg = fnew;
Jiri Pirko77b99002015-05-12 14:56:21 +02001270
1271 if (fold) {
Matthew Wilcox234a4622017-11-28 09:56:36 -05001272 idr_replace(&head->handle_idr, fnew, fnew->handle);
Daniel Borkmannff3532f2015-07-17 22:38:44 +02001273 list_replace_rcu(&fold->list, &fnew->list);
Jiri Pirko77b99002015-05-12 14:56:21 +02001274 tcf_unbind_filter(tp, &fold->res);
Cong Wang0dadc112017-11-06 13:47:24 -08001275 tcf_exts_get_net(&fold->exts);
Cong Wangaaa908f2018-05-23 15:26:53 -07001276 tcf_queue_work(&fold->rwork, fl_destroy_filter_work);
Jiri Pirko77b99002015-05-12 14:56:21 +02001277 } else {
Paul Blakey05cd2712018-04-30 14:28:30 +03001278 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
Jiri Pirko77b99002015-05-12 14:56:21 +02001279 }
1280
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001281 kfree(tb);
Jiri Pirko77b99002015-05-12 14:56:21 +02001282 return 0;
1283
Paul Blakey05cd2712018-04-30 14:28:30 +03001284errout_mask:
1285 fl_mask_put(head, fnew->mask, false);
1286
Cong Wangfe2502e2017-09-20 09:18:45 -07001287errout_idr:
Paul Blakey8258d2d2018-05-30 11:29:15 +03001288 if (!fold)
Matthew Wilcox9c160942017-11-28 09:48:43 -05001289 idr_remove(&head->handle_idr, fnew->handle);
Jiri Pirko77b99002015-05-12 14:56:21 +02001290errout:
WANG Congb9a24bb2016-08-19 12:36:54 -07001291 tcf_exts_destroy(&fnew->exts);
Jiri Pirko77b99002015-05-12 14:56:21 +02001292 kfree(fnew);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001293errout_tb:
1294 kfree(tb);
Jiri Pirko77b99002015-05-12 14:56:21 +02001295 return err;
1296}
1297
Alexander Aring571acf22018-01-18 11:20:53 -05001298static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
1299 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001300{
1301 struct cls_fl_head *head = rtnl_dereference(tp->root);
WANG Cong8113c092017-08-04 21:31:43 -07001302 struct cls_fl_filter *f = arg;
Jiri Pirko77b99002015-05-12 14:56:21 +02001303
Or Gerlitz35cc3cef2018-12-09 18:10:24 +02001304 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
1305 f->mask->filter_ht_params);
Jakub Kicinski1b0f8032018-01-24 12:54:21 -08001306 __fl_delete(tp, f, extack);
Paul Blakey05cd2712018-04-30 14:28:30 +03001307 *last = list_empty(&head->masks);
Jiri Pirko77b99002015-05-12 14:56:21 +02001308 return 0;
1309}
1310
1311static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1312{
1313 struct cls_fl_head *head = rtnl_dereference(tp->root);
1314 struct cls_fl_filter *f;
1315
Vlad Buslov01683a12018-07-09 13:29:11 +03001316 arg->count = arg->skip;
1317
1318 while ((f = idr_get_next_ul(&head->handle_idr,
1319 &arg->cookie)) != NULL) {
1320 if (arg->fn(tp, f, arg) < 0) {
1321 arg->stop = 1;
1322 break;
Paul Blakey05cd2712018-04-30 14:28:30 +03001323 }
Vlad Buslov01683a12018-07-09 13:29:11 +03001324 arg->cookie = f->handle + 1;
1325 arg->count++;
Jiri Pirko77b99002015-05-12 14:56:21 +02001326 }
1327}
1328
John Hurley31533cb2018-06-25 14:30:06 -07001329static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
1330 void *cb_priv, struct netlink_ext_ack *extack)
1331{
1332 struct cls_fl_head *head = rtnl_dereference(tp->root);
1333 struct tc_cls_flower_offload cls_flower = {};
1334 struct tcf_block *block = tp->chain->block;
1335 struct fl_flow_mask *mask;
1336 struct cls_fl_filter *f;
1337 int err;
1338
1339 list_for_each_entry(mask, &head->masks, list) {
1340 list_for_each_entry(f, &mask->filters, list) {
1341 if (tc_skip_hw(f->flags))
1342 continue;
1343
1344 tc_cls_common_offload_init(&cls_flower.common, tp,
1345 f->flags, extack);
1346 cls_flower.command = add ?
1347 TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
1348 cls_flower.cookie = (unsigned long)f;
1349 cls_flower.dissector = &mask->dissector;
Vlad Buslov9ca61632018-08-06 11:27:10 +03001350 cls_flower.mask = &mask->key;
1351 cls_flower.key = &f->mkey;
John Hurley31533cb2018-06-25 14:30:06 -07001352 cls_flower.exts = &f->exts;
1353 cls_flower.classid = f->res.classid;
1354
1355 err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
1356 if (err) {
1357 if (add && tc_skip_sw(f->flags))
1358 return err;
1359 continue;
1360 }
1361
1362 tc_cls_offload_cnt_update(block, &f->in_hw_count,
1363 &f->flags, add);
1364 }
1365 }
1366
1367 return 0;
1368}
1369
Jiri Pirko34738452018-07-23 09:23:11 +02001370static void fl_hw_create_tmplt(struct tcf_chain *chain,
1371 struct fl_flow_tmplt *tmplt)
1372{
1373 struct tc_cls_flower_offload cls_flower = {};
1374 struct tcf_block *block = chain->block;
1375 struct tcf_exts dummy_exts = { 0, };
1376
1377 cls_flower.common.chain_index = chain->index;
1378 cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
1379 cls_flower.cookie = (unsigned long) tmplt;
1380 cls_flower.dissector = &tmplt->dissector;
1381 cls_flower.mask = &tmplt->mask;
1382 cls_flower.key = &tmplt->dummy_key;
1383 cls_flower.exts = &dummy_exts;
1384
1385 /* We don't care if driver (any of them) fails to handle this
1386 * call. It serves just as a hint for it.
1387 */
1388 tc_setup_cb_call(block, NULL, TC_SETUP_CLSFLOWER,
1389 &cls_flower, false);
1390}
1391
1392static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
1393 struct fl_flow_tmplt *tmplt)
1394{
1395 struct tc_cls_flower_offload cls_flower = {};
1396 struct tcf_block *block = chain->block;
1397
1398 cls_flower.common.chain_index = chain->index;
1399 cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY;
1400 cls_flower.cookie = (unsigned long) tmplt;
1401
1402 tc_setup_cb_call(block, NULL, TC_SETUP_CLSFLOWER,
1403 &cls_flower, false);
1404}
1405
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001406static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
1407 struct nlattr **tca,
1408 struct netlink_ext_ack *extack)
1409{
1410 struct fl_flow_tmplt *tmplt;
1411 struct nlattr **tb;
1412 int err;
1413
1414 if (!tca[TCA_OPTIONS])
1415 return ERR_PTR(-EINVAL);
1416
1417 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1418 if (!tb)
1419 return ERR_PTR(-ENOBUFS);
1420 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
1421 fl_policy, NULL);
1422 if (err)
1423 goto errout_tb;
1424
1425 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03001426 if (!tmplt) {
1427 err = -ENOMEM;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001428 goto errout_tb;
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03001429 }
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001430 tmplt->chain = chain;
1431 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
1432 if (err)
1433 goto errout_tmplt;
1434 kfree(tb);
1435
1436 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
1437
Jiri Pirko34738452018-07-23 09:23:11 +02001438 fl_hw_create_tmplt(chain, tmplt);
1439
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001440 return tmplt;
1441
1442errout_tmplt:
1443 kfree(tmplt);
1444errout_tb:
1445 kfree(tb);
1446 return ERR_PTR(err);
1447}
1448
1449static void fl_tmplt_destroy(void *tmplt_priv)
1450{
1451 struct fl_flow_tmplt *tmplt = tmplt_priv;
1452
Cong Wang95278dd2018-10-02 12:50:19 -07001453 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
1454 kfree(tmplt);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001455}
1456
Jiri Pirko77b99002015-05-12 14:56:21 +02001457static int fl_dump_key_val(struct sk_buff *skb,
1458 void *val, int val_type,
1459 void *mask, int mask_type, int len)
1460{
1461 int err;
1462
1463 if (!memchr_inv(mask, 0, len))
1464 return 0;
1465 err = nla_put(skb, val_type, len, val);
1466 if (err)
1467 return err;
1468 if (mask_type != TCA_FLOWER_UNSPEC) {
1469 err = nla_put(skb, mask_type, len, mask);
1470 if (err)
1471 return err;
1472 }
1473 return 0;
1474}
1475
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001476static int fl_dump_key_mpls(struct sk_buff *skb,
1477 struct flow_dissector_key_mpls *mpls_key,
1478 struct flow_dissector_key_mpls *mpls_mask)
1479{
1480 int err;
1481
1482 if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
1483 return 0;
1484 if (mpls_mask->mpls_ttl) {
1485 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
1486 mpls_key->mpls_ttl);
1487 if (err)
1488 return err;
1489 }
1490 if (mpls_mask->mpls_tc) {
1491 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
1492 mpls_key->mpls_tc);
1493 if (err)
1494 return err;
1495 }
1496 if (mpls_mask->mpls_label) {
1497 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
1498 mpls_key->mpls_label);
1499 if (err)
1500 return err;
1501 }
1502 if (mpls_mask->mpls_bos) {
1503 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
1504 mpls_key->mpls_bos);
1505 if (err)
1506 return err;
1507 }
1508 return 0;
1509}
1510
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001511static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001512 struct flow_dissector_key_ip *key,
1513 struct flow_dissector_key_ip *mask)
1514{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001515 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1516 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1517 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1518 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1519
1520 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
1521 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001522 return -1;
1523
1524 return 0;
1525}
1526
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001527static int fl_dump_key_vlan(struct sk_buff *skb,
Jianbo Liud64efd02018-07-06 05:38:16 +00001528 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001529 struct flow_dissector_key_vlan *vlan_key,
1530 struct flow_dissector_key_vlan *vlan_mask)
1531{
1532 int err;
1533
1534 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
1535 return 0;
1536 if (vlan_mask->vlan_id) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001537 err = nla_put_u16(skb, vlan_id_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001538 vlan_key->vlan_id);
1539 if (err)
1540 return err;
1541 }
1542 if (vlan_mask->vlan_priority) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001543 err = nla_put_u8(skb, vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001544 vlan_key->vlan_priority);
1545 if (err)
1546 return err;
1547 }
1548 return 0;
1549}
1550
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001551static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
1552 u32 *flower_key, u32 *flower_mask,
1553 u32 flower_flag_bit, u32 dissector_flag_bit)
1554{
1555 if (dissector_mask & dissector_flag_bit) {
1556 *flower_mask |= flower_flag_bit;
1557 if (dissector_key & dissector_flag_bit)
1558 *flower_key |= flower_flag_bit;
1559 }
1560}
1561
1562static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
1563{
1564 u32 key, mask;
1565 __be32 _key, _mask;
1566 int err;
1567
1568 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
1569 return 0;
1570
1571 key = 0;
1572 mask = 0;
1573
1574 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
1575 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +01001576 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
1577 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1578 FLOW_DIS_FIRST_FRAG);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001579
1580 _key = cpu_to_be32(key);
1581 _mask = cpu_to_be32(mask);
1582
1583 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
1584 if (err)
1585 return err;
1586
1587 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
1588}
1589
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001590static int fl_dump_key_geneve_opt(struct sk_buff *skb,
1591 struct flow_dissector_key_enc_opts *enc_opts)
1592{
1593 struct geneve_opt *opt;
1594 struct nlattr *nest;
1595 int opt_off = 0;
1596
1597 nest = nla_nest_start(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
1598 if (!nest)
1599 goto nla_put_failure;
1600
1601 while (enc_opts->len > opt_off) {
1602 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
1603
1604 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
1605 opt->opt_class))
1606 goto nla_put_failure;
1607 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
1608 opt->type))
1609 goto nla_put_failure;
1610 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
1611 opt->length * 4, opt->opt_data))
1612 goto nla_put_failure;
1613
1614 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
1615 }
1616 nla_nest_end(skb, nest);
1617 return 0;
1618
1619nla_put_failure:
1620 nla_nest_cancel(skb, nest);
1621 return -EMSGSIZE;
1622}
1623
1624static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
1625 struct flow_dissector_key_enc_opts *enc_opts)
1626{
1627 struct nlattr *nest;
1628 int err;
1629
1630 if (!enc_opts->len)
1631 return 0;
1632
1633 nest = nla_nest_start(skb, enc_opt_type);
1634 if (!nest)
1635 goto nla_put_failure;
1636
1637 switch (enc_opts->dst_opt_type) {
1638 case TUNNEL_GENEVE_OPT:
1639 err = fl_dump_key_geneve_opt(skb, enc_opts);
1640 if (err)
1641 goto nla_put_failure;
1642 break;
1643 default:
1644 goto nla_put_failure;
1645 }
1646 nla_nest_end(skb, nest);
1647 return 0;
1648
1649nla_put_failure:
1650 nla_nest_cancel(skb, nest);
1651 return -EMSGSIZE;
1652}
1653
1654static int fl_dump_key_enc_opt(struct sk_buff *skb,
1655 struct flow_dissector_key_enc_opts *key_opts,
1656 struct flow_dissector_key_enc_opts *msk_opts)
1657{
1658 int err;
1659
1660 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
1661 if (err)
1662 return err;
1663
1664 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
1665}
1666
Jiri Pirkof5749082018-07-23 09:23:08 +02001667static int fl_dump_key(struct sk_buff *skb, struct net *net,
1668 struct fl_flow_key *key, struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001669{
Jiri Pirko77b99002015-05-12 14:56:21 +02001670 if (mask->indev_ifindex) {
1671 struct net_device *dev;
1672
1673 dev = __dev_get_by_index(net, key->indev_ifindex);
1674 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
1675 goto nla_put_failure;
1676 }
1677
1678 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1679 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1680 sizeof(key->eth.dst)) ||
1681 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1682 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1683 sizeof(key->eth.src)) ||
1684 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
1685 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
1686 sizeof(key->basic.n_proto)))
1687 goto nla_put_failure;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001688
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001689 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
1690 goto nla_put_failure;
1691
Jianbo Liud64efd02018-07-06 05:38:16 +00001692 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
1693 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001694 goto nla_put_failure;
1695
Jianbo Liud64efd02018-07-06 05:38:16 +00001696 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
1697 TCA_FLOWER_KEY_CVLAN_PRIO,
1698 &key->cvlan, &mask->cvlan) ||
1699 (mask->cvlan.vlan_tpid &&
Jianbo Liu158abbf2018-07-25 02:31:25 +00001700 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1701 key->cvlan.vlan_tpid)))
Jianbo Liud3069512018-07-06 05:38:15 +00001702 goto nla_put_failure;
1703
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00001704 if (mask->basic.n_proto) {
1705 if (mask->cvlan.vlan_tpid) {
1706 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1707 key->basic.n_proto))
1708 goto nla_put_failure;
1709 } else if (mask->vlan.vlan_tpid) {
1710 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1711 key->basic.n_proto))
1712 goto nla_put_failure;
1713 }
Jianbo Liud64efd02018-07-06 05:38:16 +00001714 }
1715
Jiri Pirko77b99002015-05-12 14:56:21 +02001716 if ((key->basic.n_proto == htons(ETH_P_IP) ||
1717 key->basic.n_proto == htons(ETH_P_IPV6)) &&
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001718 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
Jiri Pirko77b99002015-05-12 14:56:21 +02001719 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001720 sizeof(key->basic.ip_proto)) ||
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001721 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
Jiri Pirko77b99002015-05-12 14:56:21 +02001722 goto nla_put_failure;
1723
Tom Herbertc3f83242015-06-04 09:16:40 -07001724 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02001725 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1726 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1727 sizeof(key->ipv4.src)) ||
1728 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1729 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1730 sizeof(key->ipv4.dst))))
1731 goto nla_put_failure;
Tom Herbertc3f83242015-06-04 09:16:40 -07001732 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02001733 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1734 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1735 sizeof(key->ipv6.src)) ||
1736 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1737 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1738 sizeof(key->ipv6.dst))))
1739 goto nla_put_failure;
1740
1741 if (key->basic.ip_proto == IPPROTO_TCP &&
1742 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001743 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001744 sizeof(key->tp.src)) ||
1745 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001746 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001747 sizeof(key->tp.dst)) ||
1748 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1749 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1750 sizeof(key->tcp.flags))))
Jiri Pirko77b99002015-05-12 14:56:21 +02001751 goto nla_put_failure;
1752 else if (key->basic.ip_proto == IPPROTO_UDP &&
1753 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001754 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001755 sizeof(key->tp.src)) ||
1756 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001757 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001758 sizeof(key->tp.dst))))
1759 goto nla_put_failure;
Simon Horman5976c5f2016-11-03 13:24:21 +01001760 else if (key->basic.ip_proto == IPPROTO_SCTP &&
1761 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1762 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1763 sizeof(key->tp.src)) ||
1764 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1765 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1766 sizeof(key->tp.dst))))
1767 goto nla_put_failure;
Simon Horman7b684882016-12-07 13:48:28 +01001768 else if (key->basic.n_proto == htons(ETH_P_IP) &&
1769 key->basic.ip_proto == IPPROTO_ICMP &&
1770 (fl_dump_key_val(skb, &key->icmp.type,
1771 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
1772 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1773 sizeof(key->icmp.type)) ||
1774 fl_dump_key_val(skb, &key->icmp.code,
1775 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
1776 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1777 sizeof(key->icmp.code))))
1778 goto nla_put_failure;
1779 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1780 key->basic.ip_proto == IPPROTO_ICMPV6 &&
1781 (fl_dump_key_val(skb, &key->icmp.type,
1782 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
1783 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1784 sizeof(key->icmp.type)) ||
1785 fl_dump_key_val(skb, &key->icmp.code,
1786 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
1787 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1788 sizeof(key->icmp.code))))
1789 goto nla_put_failure;
Simon Horman99d31322017-01-11 14:05:43 +01001790 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
1791 key->basic.n_proto == htons(ETH_P_RARP)) &&
1792 (fl_dump_key_val(skb, &key->arp.sip,
1793 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
1794 TCA_FLOWER_KEY_ARP_SIP_MASK,
1795 sizeof(key->arp.sip)) ||
1796 fl_dump_key_val(skb, &key->arp.tip,
1797 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
1798 TCA_FLOWER_KEY_ARP_TIP_MASK,
1799 sizeof(key->arp.tip)) ||
1800 fl_dump_key_val(skb, &key->arp.op,
1801 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
1802 TCA_FLOWER_KEY_ARP_OP_MASK,
1803 sizeof(key->arp.op)) ||
1804 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1805 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1806 sizeof(key->arp.sha)) ||
1807 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1808 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1809 sizeof(key->arp.tha))))
1810 goto nla_put_failure;
Jiri Pirko77b99002015-05-12 14:56:21 +02001811
Amir Vadaibc3103f2016-09-08 16:23:47 +03001812 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
1813 (fl_dump_key_val(skb, &key->enc_ipv4.src,
1814 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
1815 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1816 sizeof(key->enc_ipv4.src)) ||
1817 fl_dump_key_val(skb, &key->enc_ipv4.dst,
1818 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
1819 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1820 sizeof(key->enc_ipv4.dst))))
1821 goto nla_put_failure;
1822 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
1823 (fl_dump_key_val(skb, &key->enc_ipv6.src,
1824 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
1825 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1826 sizeof(key->enc_ipv6.src)) ||
1827 fl_dump_key_val(skb, &key->enc_ipv6.dst,
1828 TCA_FLOWER_KEY_ENC_IPV6_DST,
1829 &mask->enc_ipv6.dst,
1830 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1831 sizeof(key->enc_ipv6.dst))))
1832 goto nla_put_failure;
1833
1834 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03001835 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001836 sizeof(key->enc_key_id)) ||
1837 fl_dump_key_val(skb, &key->enc_tp.src,
1838 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1839 &mask->enc_tp.src,
1840 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1841 sizeof(key->enc_tp.src)) ||
1842 fl_dump_key_val(skb, &key->enc_tp.dst,
1843 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1844 &mask->enc_tp.dst,
1845 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001846 sizeof(key->enc_tp.dst)) ||
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001847 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
1848 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
Amir Vadaibc3103f2016-09-08 16:23:47 +03001849 goto nla_put_failure;
1850
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001851 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
1852 goto nla_put_failure;
1853
Jiri Pirkof5749082018-07-23 09:23:08 +02001854 return 0;
1855
1856nla_put_failure:
1857 return -EMSGSIZE;
1858}
1859
1860static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
1861 struct sk_buff *skb, struct tcmsg *t)
1862{
1863 struct cls_fl_filter *f = fh;
1864 struct nlattr *nest;
1865 struct fl_flow_key *key, *mask;
1866
1867 if (!f)
1868 return skb->len;
1869
1870 t->tcm_handle = f->handle;
1871
1872 nest = nla_nest_start(skb, TCA_OPTIONS);
1873 if (!nest)
1874 goto nla_put_failure;
1875
1876 if (f->res.classid &&
1877 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
1878 goto nla_put_failure;
1879
1880 key = &f->key;
1881 mask = &f->mask->key;
1882
1883 if (fl_dump_key(skb, net, key, mask))
1884 goto nla_put_failure;
1885
1886 if (!tc_skip_hw(f->flags))
1887 fl_hw_update_stats(tp, f);
1888
Or Gerlitz749e6722017-02-16 10:31:10 +02001889 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
1890 goto nla_put_failure;
Amir Vadaie69985c2016-06-05 17:11:18 +03001891
Vlad Buslov86c55362018-09-07 17:22:21 +03001892 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
1893 goto nla_put_failure;
1894
Jiri Pirko77b99002015-05-12 14:56:21 +02001895 if (tcf_exts_dump(skb, &f->exts))
1896 goto nla_put_failure;
1897
1898 nla_nest_end(skb, nest);
1899
1900 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
1901 goto nla_put_failure;
1902
1903 return skb->len;
1904
1905nla_put_failure:
1906 nla_nest_cancel(skb, nest);
1907 return -1;
1908}
1909
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001910static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
1911{
1912 struct fl_flow_tmplt *tmplt = tmplt_priv;
1913 struct fl_flow_key *key, *mask;
1914 struct nlattr *nest;
1915
1916 nest = nla_nest_start(skb, TCA_OPTIONS);
1917 if (!nest)
1918 goto nla_put_failure;
1919
1920 key = &tmplt->dummy_key;
1921 mask = &tmplt->mask;
1922
1923 if (fl_dump_key(skb, net, key, mask))
1924 goto nla_put_failure;
1925
1926 nla_nest_end(skb, nest);
1927
1928 return skb->len;
1929
1930nla_put_failure:
1931 nla_nest_cancel(skb, nest);
1932 return -EMSGSIZE;
1933}
1934
Cong Wang07d79fc2017-08-30 14:30:36 -07001935static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
1936{
1937 struct cls_fl_filter *f = fh;
1938
1939 if (f && f->res.classid == classid)
1940 f->res.class = cl;
1941}
1942
Jiri Pirko77b99002015-05-12 14:56:21 +02001943static struct tcf_proto_ops cls_fl_ops __read_mostly = {
1944 .kind = "flower",
1945 .classify = fl_classify,
1946 .init = fl_init,
1947 .destroy = fl_destroy,
1948 .get = fl_get,
1949 .change = fl_change,
1950 .delete = fl_delete,
1951 .walk = fl_walk,
John Hurley31533cb2018-06-25 14:30:06 -07001952 .reoffload = fl_reoffload,
Jiri Pirko77b99002015-05-12 14:56:21 +02001953 .dump = fl_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -07001954 .bind_class = fl_bind_class,
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001955 .tmplt_create = fl_tmplt_create,
1956 .tmplt_destroy = fl_tmplt_destroy,
1957 .tmplt_dump = fl_tmplt_dump,
Jiri Pirko77b99002015-05-12 14:56:21 +02001958 .owner = THIS_MODULE,
1959};
1960
1961static int __init cls_fl_init(void)
1962{
1963 return register_tcf_proto_ops(&cls_fl_ops);
1964}
1965
1966static void __exit cls_fl_exit(void)
1967{
1968 unregister_tcf_proto_ops(&cls_fl_ops);
1969}
1970
1971module_init(cls_fl_init);
1972module_exit(cls_fl_exit);
1973
1974MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
1975MODULE_DESCRIPTION("Flower classifier");
1976MODULE_LICENSE("GPL v2");