blob: 9cd8122a5c38e692f1e67ca6887c2571c06b7aa6 [file] [log] [blame]
Jiri Pirko77b99002015-05-12 14:56:21 +02001/*
2 * net/sched/cls_flower.c Flower classifier
3 *
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/rhashtable.h>
Daniel Borkmannd9363772016-11-27 01:18:01 +010016#include <linux/workqueue.h>
Vlad Buslov06177552019-03-21 15:17:35 +020017#include <linux/refcount.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020018
19#include <linux/if_ether.h>
20#include <linux/in6.h>
21#include <linux/ip.h>
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040022#include <linux/mpls.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020023
24#include <net/sch_generic.h>
25#include <net/pkt_cls.h>
26#include <net/ip.h>
27#include <net/flow_dissector.h>
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020028#include <net/geneve.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020029
Amir Vadaibc3103f2016-09-08 16:23:47 +030030#include <net/dst.h>
31#include <net/dst_metadata.h>
32
Jiri Pirko77b99002015-05-12 14:56:21 +020033struct fl_flow_key {
34 int indev_ifindex;
Tom Herbert42aecaa2015-06-04 09:16:39 -070035 struct flow_dissector_key_control control;
Amir Vadaibc3103f2016-09-08 16:23:47 +030036 struct flow_dissector_key_control enc_control;
Jiri Pirko77b99002015-05-12 14:56:21 +020037 struct flow_dissector_key_basic basic;
38 struct flow_dissector_key_eth_addrs eth;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +030039 struct flow_dissector_key_vlan vlan;
Jianbo Liud64efd02018-07-06 05:38:16 +000040 struct flow_dissector_key_vlan cvlan;
Jiri Pirko77b99002015-05-12 14:56:21 +020041 union {
Tom Herbertc3f83242015-06-04 09:16:40 -070042 struct flow_dissector_key_ipv4_addrs ipv4;
Jiri Pirko77b99002015-05-12 14:56:21 +020043 struct flow_dissector_key_ipv6_addrs ipv6;
44 };
45 struct flow_dissector_key_ports tp;
Simon Horman7b684882016-12-07 13:48:28 +010046 struct flow_dissector_key_icmp icmp;
Simon Horman99d31322017-01-11 14:05:43 +010047 struct flow_dissector_key_arp arp;
Amir Vadaibc3103f2016-09-08 16:23:47 +030048 struct flow_dissector_key_keyid enc_key_id;
49 union {
50 struct flow_dissector_key_ipv4_addrs enc_ipv4;
51 struct flow_dissector_key_ipv6_addrs enc_ipv6;
52 };
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +020053 struct flow_dissector_key_ports enc_tp;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040054 struct flow_dissector_key_mpls mpls;
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +020055 struct flow_dissector_key_tcp tcp;
Or Gerlitz4d80cc02017-06-01 21:37:38 +030056 struct flow_dissector_key_ip ip;
Or Gerlitz0e2c17b2018-07-17 19:27:18 +030057 struct flow_dissector_key_ip enc_ip;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020058 struct flow_dissector_key_enc_opts enc_opts;
Amritha Nambiar5c722992018-11-12 16:15:55 -080059 struct flow_dissector_key_ports tp_min;
60 struct flow_dissector_key_ports tp_max;
Jiri Pirko77b99002015-05-12 14:56:21 +020061} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
62
63struct fl_flow_mask_range {
64 unsigned short int start;
65 unsigned short int end;
66};
67
68struct fl_flow_mask {
69 struct fl_flow_key key;
70 struct fl_flow_mask_range range;
Amritha Nambiar5c722992018-11-12 16:15:55 -080071 u32 flags;
Paul Blakey05cd2712018-04-30 14:28:30 +030072 struct rhash_head ht_node;
73 struct rhashtable ht;
74 struct rhashtable_params filter_ht_params;
75 struct flow_dissector dissector;
76 struct list_head filters;
Paolo Abeni44a5cd42018-06-21 20:02:16 +020077 struct rcu_work rwork;
Paul Blakey05cd2712018-04-30 14:28:30 +030078 struct list_head list;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +020079 refcount_t refcnt;
Jiri Pirko77b99002015-05-12 14:56:21 +020080};
81
Jiri Pirkob95ec7e2018-07-23 09:23:10 +020082struct fl_flow_tmplt {
83 struct fl_flow_key dummy_key;
84 struct fl_flow_key mask;
85 struct flow_dissector dissector;
86 struct tcf_chain *chain;
87};
88
Jiri Pirko77b99002015-05-12 14:56:21 +020089struct cls_fl_head {
90 struct rhashtable ht;
Vlad Buslov259e60f2019-03-21 15:17:39 +020091 spinlock_t masks_lock; /* Protect masks list */
Paul Blakey05cd2712018-04-30 14:28:30 +030092 struct list_head masks;
Cong Wangaaa908f2018-05-23 15:26:53 -070093 struct rcu_work rwork;
Chris Mic15ab232017-08-30 02:31:58 -040094 struct idr handle_idr;
Jiri Pirko77b99002015-05-12 14:56:21 +020095};
96
97struct cls_fl_filter {
Paul Blakey05cd2712018-04-30 14:28:30 +030098 struct fl_flow_mask *mask;
Jiri Pirko77b99002015-05-12 14:56:21 +020099 struct rhash_head ht_node;
100 struct fl_flow_key mkey;
101 struct tcf_exts exts;
102 struct tcf_result res;
103 struct fl_flow_key key;
104 struct list_head list;
105 u32 handle;
Amir Vadaie69985c2016-06-05 17:11:18 +0300106 u32 flags;
Vlad Buslov86c55362018-09-07 17:22:21 +0300107 u32 in_hw_count;
Cong Wangaaa908f2018-05-23 15:26:53 -0700108 struct rcu_work rwork;
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200109 struct net_device *hw_dev;
Vlad Buslov06177552019-03-21 15:17:35 +0200110 /* Flower classifier is unlocked, which means that its reference counter
111 * can be changed concurrently without any kind of external
112 * synchronization. Use atomic reference counter to be concurrency-safe.
113 */
114 refcount_t refcnt;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200115 bool deleted;
Jiri Pirko77b99002015-05-12 14:56:21 +0200116};
117
Paul Blakey05cd2712018-04-30 14:28:30 +0300118static const struct rhashtable_params mask_ht_params = {
119 .key_offset = offsetof(struct fl_flow_mask, key),
120 .key_len = sizeof(struct fl_flow_key),
121 .head_offset = offsetof(struct fl_flow_mask, ht_node),
122 .automatic_shrinking = true,
123};
124
Jiri Pirko77b99002015-05-12 14:56:21 +0200125static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
126{
127 return mask->range.end - mask->range.start;
128}
129
130static void fl_mask_update_range(struct fl_flow_mask *mask)
131{
132 const u8 *bytes = (const u8 *) &mask->key;
133 size_t size = sizeof(mask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300134 size_t i, first = 0, last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200135
Paul Blakey05cd2712018-04-30 14:28:30 +0300136 for (i = 0; i < size; i++) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200137 if (bytes[i]) {
Paul Blakey05cd2712018-04-30 14:28:30 +0300138 first = i;
139 break;
140 }
141 }
142 last = first;
143 for (i = size - 1; i != first; i--) {
144 if (bytes[i]) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200145 last = i;
Paul Blakey05cd2712018-04-30 14:28:30 +0300146 break;
Jiri Pirko77b99002015-05-12 14:56:21 +0200147 }
148 }
149 mask->range.start = rounddown(first, sizeof(long));
150 mask->range.end = roundup(last + 1, sizeof(long));
151}
152
153static void *fl_key_get_start(struct fl_flow_key *key,
154 const struct fl_flow_mask *mask)
155{
156 return (u8 *) key + mask->range.start;
157}
158
159static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
160 struct fl_flow_mask *mask)
161{
162 const long *lkey = fl_key_get_start(key, mask);
163 const long *lmask = fl_key_get_start(&mask->key, mask);
164 long *lmkey = fl_key_get_start(mkey, mask);
165 int i;
166
167 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
168 *lmkey++ = *lkey++ & *lmask++;
169}
170
Jiri Pirkob95ec7e2018-07-23 09:23:10 +0200171static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
172 struct fl_flow_mask *mask)
173{
174 const long *lmask = fl_key_get_start(&mask->key, mask);
175 const long *ltmplt;
176 int i;
177
178 if (!tmplt)
179 return true;
180 ltmplt = fl_key_get_start(&tmplt->mask, mask);
181 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
182 if (~*ltmplt++ & *lmask++)
183 return false;
184 }
185 return true;
186}
187
Jiri Pirko77b99002015-05-12 14:56:21 +0200188static void fl_clear_masked_range(struct fl_flow_key *key,
189 struct fl_flow_mask *mask)
190{
191 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
192}
193
Amritha Nambiar5c722992018-11-12 16:15:55 -0800194static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
195 struct fl_flow_key *key,
196 struct fl_flow_key *mkey)
197{
198 __be16 min_mask, max_mask, min_val, max_val;
199
200 min_mask = htons(filter->mask->key.tp_min.dst);
201 max_mask = htons(filter->mask->key.tp_max.dst);
202 min_val = htons(filter->key.tp_min.dst);
203 max_val = htons(filter->key.tp_max.dst);
204
205 if (min_mask && max_mask) {
206 if (htons(key->tp.dst) < min_val ||
207 htons(key->tp.dst) > max_val)
208 return false;
209
210 /* skb does not have min and max values */
211 mkey->tp_min.dst = filter->mkey.tp_min.dst;
212 mkey->tp_max.dst = filter->mkey.tp_max.dst;
213 }
214 return true;
215}
216
217static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
218 struct fl_flow_key *key,
219 struct fl_flow_key *mkey)
220{
221 __be16 min_mask, max_mask, min_val, max_val;
222
223 min_mask = htons(filter->mask->key.tp_min.src);
224 max_mask = htons(filter->mask->key.tp_max.src);
225 min_val = htons(filter->key.tp_min.src);
226 max_val = htons(filter->key.tp_max.src);
227
228 if (min_mask && max_mask) {
229 if (htons(key->tp.src) < min_val ||
230 htons(key->tp.src) > max_val)
231 return false;
232
233 /* skb does not have min and max values */
234 mkey->tp_min.src = filter->mkey.tp_min.src;
235 mkey->tp_max.src = filter->mkey.tp_max.src;
236 }
237 return true;
238}
239
240static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
241 struct fl_flow_key *mkey)
Paul Blakeya3308d82017-01-16 10:45:13 +0200242{
Paul Blakey05cd2712018-04-30 14:28:30 +0300243 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
244 mask->filter_ht_params);
Paul Blakeya3308d82017-01-16 10:45:13 +0200245}
246
Amritha Nambiar5c722992018-11-12 16:15:55 -0800247static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
248 struct fl_flow_key *mkey,
249 struct fl_flow_key *key)
250{
251 struct cls_fl_filter *filter, *f;
252
253 list_for_each_entry_rcu(filter, &mask->filters, list) {
254 if (!fl_range_port_dst_cmp(filter, key, mkey))
255 continue;
256
257 if (!fl_range_port_src_cmp(filter, key, mkey))
258 continue;
259
260 f = __fl_lookup(mask, mkey);
261 if (f)
262 return f;
263 }
264 return NULL;
265}
266
267static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
268 struct fl_flow_key *mkey,
269 struct fl_flow_key *key)
270{
271 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
272 return fl_lookup_range(mask, mkey, key);
273
274 return __fl_lookup(mask, mkey);
275}
276
Jiri Pirko77b99002015-05-12 14:56:21 +0200277static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
278 struct tcf_result *res)
279{
280 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
281 struct cls_fl_filter *f;
Paul Blakey05cd2712018-04-30 14:28:30 +0300282 struct fl_flow_mask *mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200283 struct fl_flow_key skb_key;
284 struct fl_flow_key skb_mkey;
285
Paul Blakey05cd2712018-04-30 14:28:30 +0300286 list_for_each_entry_rcu(mask, &head->masks, list) {
287 fl_clear_masked_range(&skb_key, mask);
Amir Vadaie69985c2016-06-05 17:11:18 +0300288
Paul Blakey05cd2712018-04-30 14:28:30 +0300289 skb_key.indev_ifindex = skb->skb_iif;
290 /* skb_flow_dissect() does not set n_proto in case an unknown
291 * protocol, so do it rather here.
292 */
293 skb_key.basic.n_proto = skb->protocol;
294 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
295 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
Amir Vadaibc3103f2016-09-08 16:23:47 +0300296
Paul Blakey05cd2712018-04-30 14:28:30 +0300297 fl_set_masked_key(&skb_mkey, &skb_key, mask);
Jiri Pirko77b99002015-05-12 14:56:21 +0200298
Amritha Nambiar5c722992018-11-12 16:15:55 -0800299 f = fl_lookup(mask, &skb_mkey, &skb_key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300300 if (f && !tc_skip_sw(f->flags)) {
301 *res = f->res;
302 return tcf_exts_exec(skb, &f->exts, res);
303 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200304 }
305 return -1;
306}
307
308static int fl_init(struct tcf_proto *tp)
309{
310 struct cls_fl_head *head;
311
312 head = kzalloc(sizeof(*head), GFP_KERNEL);
313 if (!head)
314 return -ENOBUFS;
315
Vlad Buslov259e60f2019-03-21 15:17:39 +0200316 spin_lock_init(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300317 INIT_LIST_HEAD_RCU(&head->masks);
Jiri Pirko77b99002015-05-12 14:56:21 +0200318 rcu_assign_pointer(tp->root, head);
Chris Mic15ab232017-08-30 02:31:58 -0400319 idr_init(&head->handle_idr);
Jiri Pirko77b99002015-05-12 14:56:21 +0200320
Paul Blakey05cd2712018-04-30 14:28:30 +0300321 return rhashtable_init(&head->ht, &mask_ht_params);
322}
323
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200324static void fl_mask_free(struct fl_flow_mask *mask)
325{
Vlad Buslovf48ef4d2019-03-21 15:17:37 +0200326 WARN_ON(!list_empty(&mask->filters));
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200327 rhashtable_destroy(&mask->ht);
328 kfree(mask);
329}
330
331static void fl_mask_free_work(struct work_struct *work)
332{
333 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
334 struct fl_flow_mask, rwork);
335
336 fl_mask_free(mask);
337}
338
Paul Blakey05cd2712018-04-30 14:28:30 +0300339static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
340 bool async)
341{
Vlad Buslovf48ef4d2019-03-21 15:17:37 +0200342 if (!refcount_dec_and_test(&mask->refcnt))
Paul Blakey05cd2712018-04-30 14:28:30 +0300343 return false;
344
345 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200346
347 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300348 list_del_rcu(&mask->list);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200349 spin_unlock(&head->masks_lock);
350
Paul Blakey05cd2712018-04-30 14:28:30 +0300351 if (async)
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200352 tcf_queue_work(&mask->rwork, fl_mask_free_work);
Paul Blakey05cd2712018-04-30 14:28:30 +0300353 else
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200354 fl_mask_free(mask);
Paul Blakey05cd2712018-04-30 14:28:30 +0300355
356 return true;
Jiri Pirko77b99002015-05-12 14:56:21 +0200357}
358
Cong Wang0dadc112017-11-06 13:47:24 -0800359static void __fl_destroy_filter(struct cls_fl_filter *f)
360{
361 tcf_exts_destroy(&f->exts);
362 tcf_exts_put_net(&f->exts);
363 kfree(f);
364}
365
Cong Wang0552c8a2017-10-26 18:24:33 -0700366static void fl_destroy_filter_work(struct work_struct *work)
367{
Cong Wangaaa908f2018-05-23 15:26:53 -0700368 struct cls_fl_filter *f = container_of(to_rcu_work(work),
369 struct cls_fl_filter, rwork);
Cong Wang0552c8a2017-10-26 18:24:33 -0700370
Cong Wang0dadc112017-11-06 13:47:24 -0800371 __fl_destroy_filter(f);
Cong Wang0552c8a2017-10-26 18:24:33 -0700372}
373
Jakub Kicinski1b0f8032018-01-24 12:54:21 -0800374static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200375 bool rtnl_held, struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200376{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200377 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200378 struct tcf_block *block = tp->chain->block;
Amir Vadai5b33f482016-03-08 12:42:29 +0200379
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200380 if (!rtnl_held)
381 rtnl_lock();
382
Jakub Kicinski1b0f8032018-01-24 12:54:21 -0800383 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200384 cls_flower.command = TC_CLSFLOWER_DESTROY;
385 cls_flower.cookie = (unsigned long) f;
Amir Vadai5b33f482016-03-08 12:42:29 +0200386
Cong Wangaeb3fec2018-12-11 11:15:46 -0800387 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200388 spin_lock(&tp->lock);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100389 tcf_block_offload_dec(block, &f->flags);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200390 spin_unlock(&tp->lock);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200391
392 if (!rtnl_held)
393 rtnl_unlock();
Amir Vadai5b33f482016-03-08 12:42:29 +0200394}
395
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300396static int fl_hw_replace_filter(struct tcf_proto *tp,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200397 struct cls_fl_filter *f, bool rtnl_held,
Quentin Monnet41002032018-01-19 17:44:43 -0800398 struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200399{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200400 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200401 struct tcf_block *block = tp->chain->block;
Jiri Pirko717503b2017-10-11 09:41:09 +0200402 bool skip_sw = tc_skip_sw(f->flags);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200403 int err = 0;
404
405 if (!rtnl_held)
406 rtnl_lock();
Amir Vadai5b33f482016-03-08 12:42:29 +0200407
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100408 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200409 if (!cls_flower.rule) {
410 err = -ENOMEM;
411 goto errout;
412 }
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100413
Jakub Kicinskiea205942018-01-24 12:54:20 -0800414 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200415 cls_flower.command = TC_CLSFLOWER_REPLACE;
416 cls_flower.cookie = (unsigned long) f;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100417 cls_flower.rule->match.dissector = &f->mask->dissector;
418 cls_flower.rule->match.mask = &f->mask->key;
419 cls_flower.rule->match.key = &f->mkey;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700420 cls_flower.classid = f->res.classid;
Amir Vadai5b33f482016-03-08 12:42:29 +0200421
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100422 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
423 if (err) {
424 kfree(cls_flower.rule);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200425 if (skip_sw)
Vlad Buslov1f15bb42019-02-12 23:39:06 +0200426 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200427 else
428 err = 0;
429 goto errout;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100430 }
431
Cong Wangaeb3fec2018-12-11 11:15:46 -0800432 err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100433 kfree(cls_flower.rule);
434
Jiri Pirko717503b2017-10-11 09:41:09 +0200435 if (err < 0) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200436 fl_hw_destroy_filter(tp, f, true, NULL);
437 goto errout;
Jiri Pirko717503b2017-10-11 09:41:09 +0200438 } else if (err > 0) {
John Hurley31533cb2018-06-25 14:30:06 -0700439 f->in_hw_count = err;
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200440 err = 0;
Vlad Buslov3d81e712019-03-21 15:17:42 +0200441 spin_lock(&tp->lock);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100442 tcf_block_offload_inc(block, &f->flags);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200443 spin_unlock(&tp->lock);
Jiri Pirko717503b2017-10-11 09:41:09 +0200444 }
445
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200446 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) {
447 err = -EINVAL;
448 goto errout;
449 }
Jiri Pirko717503b2017-10-11 09:41:09 +0200450
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200451errout:
452 if (!rtnl_held)
453 rtnl_unlock();
454
455 return err;
Amir Vadai5b33f482016-03-08 12:42:29 +0200456}
457
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200458static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
459 bool rtnl_held)
Amir Vadai10cbc682016-05-13 12:55:37 +0000460{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200461 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200462 struct tcf_block *block = tp->chain->block;
Amir Vadai10cbc682016-05-13 12:55:37 +0000463
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200464 if (!rtnl_held)
465 rtnl_lock();
466
Jakub Kicinskiea205942018-01-24 12:54:20 -0800467 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200468 cls_flower.command = TC_CLSFLOWER_STATS;
469 cls_flower.cookie = (unsigned long) f;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700470 cls_flower.classid = f->res.classid;
Amir Vadai10cbc682016-05-13 12:55:37 +0000471
Cong Wangaeb3fec2018-12-11 11:15:46 -0800472 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Pablo Neira Ayuso3b1903e2019-02-02 12:50:47 +0100473
474 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
475 cls_flower.stats.pkts,
476 cls_flower.stats.lastused);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200477
478 if (!rtnl_held)
479 rtnl_unlock();
Amir Vadai10cbc682016-05-13 12:55:37 +0000480}
481
Vlad Buslove4746192019-03-21 15:17:33 +0200482static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
483{
484 /* Flower classifier only changes root pointer during init and destroy.
485 * Users must obtain reference to tcf_proto instance before calling its
486 * API, so tp->root pointer is protected from concurrent call to
487 * fl_destroy() by reference counting.
488 */
489 return rcu_dereference_raw(tp->root);
490}
491
Vlad Buslov06177552019-03-21 15:17:35 +0200492static void __fl_put(struct cls_fl_filter *f)
493{
494 if (!refcount_dec_and_test(&f->refcnt))
495 return;
496
Vlad Buslovb2552b82019-03-21 15:17:36 +0200497 WARN_ON(!f->deleted);
498
Vlad Buslov06177552019-03-21 15:17:35 +0200499 if (tcf_exts_get_net(&f->exts))
500 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
501 else
502 __fl_destroy_filter(f);
503}
504
505static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
506{
507 struct cls_fl_filter *f;
508
509 rcu_read_lock();
510 f = idr_find(&head->handle_idr, handle);
511 if (f && !refcount_inc_not_zero(&f->refcnt))
512 f = NULL;
513 rcu_read_unlock();
514
515 return f;
516}
517
518static struct cls_fl_filter *fl_get_next_filter(struct tcf_proto *tp,
519 unsigned long *handle)
520{
521 struct cls_fl_head *head = fl_head_dereference(tp);
522 struct cls_fl_filter *f;
523
524 rcu_read_lock();
525 while ((f = idr_get_next_ul(&head->handle_idr, handle))) {
526 /* don't return filters that are being deleted */
527 if (refcount_inc_not_zero(&f->refcnt))
528 break;
529 ++(*handle);
530 }
531 rcu_read_unlock();
532
533 return f;
534}
535
Vlad Buslovb2552b82019-03-21 15:17:36 +0200536static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200537 bool *last, bool rtnl_held,
538 struct netlink_ext_ack *extack)
Roi Dayan13fa8762016-11-01 16:08:29 +0200539{
Vlad Buslove4746192019-03-21 15:17:33 +0200540 struct cls_fl_head *head = fl_head_dereference(tp);
Paul Blakey05cd2712018-04-30 14:28:30 +0300541 bool async = tcf_exts_get_net(&f->exts);
Chris Mic15ab232017-08-30 02:31:58 -0400542
Vlad Buslovb2552b82019-03-21 15:17:36 +0200543 *last = false;
544
Vlad Buslov3d81e712019-03-21 15:17:42 +0200545 spin_lock(&tp->lock);
546 if (f->deleted) {
547 spin_unlock(&tp->lock);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200548 return -ENOENT;
Vlad Buslov3d81e712019-03-21 15:17:42 +0200549 }
Vlad Buslovb2552b82019-03-21 15:17:36 +0200550
551 f->deleted = true;
552 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
553 f->mask->filter_ht_params);
Matthew Wilcox9c160942017-11-28 09:48:43 -0500554 idr_remove(&head->handle_idr, f->handle);
Roi Dayan13fa8762016-11-01 16:08:29 +0200555 list_del_rcu(&f->list);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200556 spin_unlock(&tp->lock);
557
Vlad Buslovb2552b82019-03-21 15:17:36 +0200558 *last = fl_mask_put(head, f->mask, async);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200559 if (!tc_skip_hw(f->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200560 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
Roi Dayan13fa8762016-11-01 16:08:29 +0200561 tcf_unbind_filter(tp, &f->res);
Vlad Buslov06177552019-03-21 15:17:35 +0200562 __fl_put(f);
Paul Blakey05cd2712018-04-30 14:28:30 +0300563
Vlad Buslovb2552b82019-03-21 15:17:36 +0200564 return 0;
Roi Dayan13fa8762016-11-01 16:08:29 +0200565}
566
Daniel Borkmannd9363772016-11-27 01:18:01 +0100567static void fl_destroy_sleepable(struct work_struct *work)
568{
Cong Wangaaa908f2018-05-23 15:26:53 -0700569 struct cls_fl_head *head = container_of(to_rcu_work(work),
570 struct cls_fl_head,
571 rwork);
Paul Blakeyde9dc652018-06-03 10:06:13 +0300572
573 rhashtable_destroy(&head->ht);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100574 kfree(head);
575 module_put(THIS_MODULE);
576}
577
Vlad Buslov12db03b2019-02-11 10:55:45 +0200578static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
579 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200580{
Vlad Buslove4746192019-03-21 15:17:33 +0200581 struct cls_fl_head *head = fl_head_dereference(tp);
Paul Blakey05cd2712018-04-30 14:28:30 +0300582 struct fl_flow_mask *mask, *next_mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200583 struct cls_fl_filter *f, *next;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200584 bool last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200585
Paul Blakey05cd2712018-04-30 14:28:30 +0300586 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
587 list_for_each_entry_safe(f, next, &mask->filters, list) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200588 __fl_delete(tp, f, &last, rtnl_held, extack);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200589 if (last)
Paul Blakey05cd2712018-04-30 14:28:30 +0300590 break;
591 }
592 }
Chris Mic15ab232017-08-30 02:31:58 -0400593 idr_destroy(&head->handle_idr);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100594
595 __module_get(THIS_MODULE);
Cong Wangaaa908f2018-05-23 15:26:53 -0700596 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
Jiri Pirko77b99002015-05-12 14:56:21 +0200597}
598
Vlad Buslov06177552019-03-21 15:17:35 +0200599static void fl_put(struct tcf_proto *tp, void *arg)
600{
601 struct cls_fl_filter *f = arg;
602
603 __fl_put(f);
604}
605
WANG Cong8113c092017-08-04 21:31:43 -0700606static void *fl_get(struct tcf_proto *tp, u32 handle)
Jiri Pirko77b99002015-05-12 14:56:21 +0200607{
Vlad Buslove4746192019-03-21 15:17:33 +0200608 struct cls_fl_head *head = fl_head_dereference(tp);
Jiri Pirko77b99002015-05-12 14:56:21 +0200609
Vlad Buslov06177552019-03-21 15:17:35 +0200610 return __fl_get(head, handle);
Jiri Pirko77b99002015-05-12 14:56:21 +0200611}
612
613static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
614 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
615 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
616 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
617 .len = IFNAMSIZ },
618 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
619 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
620 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
621 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
622 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
623 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
624 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
625 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
626 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
627 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
628 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
629 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
630 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
631 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
632 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
633 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
Jamal Hadi Salimb175c3a2015-06-25 06:55:27 -0400634 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
635 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300636 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
637 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
638 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
Amir Vadaibc3103f2016-09-08 16:23:47 +0300639 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
640 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
641 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
642 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
643 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
644 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
645 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
646 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
647 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
Or Gerlitzaa72d702016-09-15 15:28:22 +0300648 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
649 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
650 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
651 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
Simon Horman5976c5f2016-11-03 13:24:21 +0100652 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
653 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
654 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
655 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200656 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
657 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
658 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
659 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200660 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
661 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
Simon Horman7b684882016-12-07 13:48:28 +0100662 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
663 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
664 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
665 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
666 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
667 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
668 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
669 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
Simon Horman99d31322017-01-11 14:05:43 +0100670 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
671 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
672 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
673 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
674 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
675 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
676 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
677 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
678 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
679 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400680 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
681 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
682 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
683 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +0200684 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
685 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300686 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
687 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
688 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
689 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
Jianbo Liud64efd02018-07-06 05:38:16 +0000690 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
691 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
692 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300693 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
694 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
695 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
696 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200697 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
698 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
699};
700
701static const struct nla_policy
702enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
703 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
704};
705
706static const struct nla_policy
707geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
708 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
709 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
710 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
711 .len = 128 },
Jiri Pirko77b99002015-05-12 14:56:21 +0200712};
713
714static void fl_set_key_val(struct nlattr **tb,
715 void *val, int val_type,
716 void *mask, int mask_type, int len)
717{
718 if (!tb[val_type])
719 return;
720 memcpy(val, nla_data(tb[val_type]), len);
721 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
722 memset(mask, 0xff, len);
723 else
724 memcpy(mask, nla_data(tb[mask_type]), len);
725}
726
Amritha Nambiar5c722992018-11-12 16:15:55 -0800727static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
728 struct fl_flow_key *mask)
729{
730 fl_set_key_val(tb, &key->tp_min.dst,
731 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
732 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
733 fl_set_key_val(tb, &key->tp_max.dst,
734 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
735 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
736 fl_set_key_val(tb, &key->tp_min.src,
737 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
738 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
739 fl_set_key_val(tb, &key->tp_max.src,
740 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
741 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
742
743 if ((mask->tp_min.dst && mask->tp_max.dst &&
744 htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
745 (mask->tp_min.src && mask->tp_max.src &&
746 htons(key->tp_max.src) <= htons(key->tp_min.src)))
747 return -EINVAL;
748
749 return 0;
750}
751
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400752static int fl_set_key_mpls(struct nlattr **tb,
753 struct flow_dissector_key_mpls *key_val,
754 struct flow_dissector_key_mpls *key_mask)
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400755{
756 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
757 key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
758 key_mask->mpls_ttl = MPLS_TTL_MASK;
759 }
760 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400761 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
762
763 if (bos & ~MPLS_BOS_MASK)
764 return -EINVAL;
765 key_val->mpls_bos = bos;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400766 key_mask->mpls_bos = MPLS_BOS_MASK;
767 }
768 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400769 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
770
771 if (tc & ~MPLS_TC_MASK)
772 return -EINVAL;
773 key_val->mpls_tc = tc;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400774 key_mask->mpls_tc = MPLS_TC_MASK;
775 }
776 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400777 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
778
779 if (label & ~MPLS_LABEL_MASK)
780 return -EINVAL;
781 key_val->mpls_label = label;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400782 key_mask->mpls_label = MPLS_LABEL_MASK;
783 }
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400784 return 0;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400785}
786
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300787static void fl_set_key_vlan(struct nlattr **tb,
Jianbo Liuaaab0832018-07-06 05:38:13 +0000788 __be16 ethertype,
Jianbo Liud64efd02018-07-06 05:38:16 +0000789 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300790 struct flow_dissector_key_vlan *key_val,
791 struct flow_dissector_key_vlan *key_mask)
792{
793#define VLAN_PRIORITY_MASK 0x7
794
Jianbo Liud64efd02018-07-06 05:38:16 +0000795 if (tb[vlan_id_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300796 key_val->vlan_id =
Jianbo Liud64efd02018-07-06 05:38:16 +0000797 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300798 key_mask->vlan_id = VLAN_VID_MASK;
799 }
Jianbo Liud64efd02018-07-06 05:38:16 +0000800 if (tb[vlan_prio_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300801 key_val->vlan_priority =
Jianbo Liud64efd02018-07-06 05:38:16 +0000802 nla_get_u8(tb[vlan_prio_key]) &
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300803 VLAN_PRIORITY_MASK;
804 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
805 }
Jianbo Liuaaab0832018-07-06 05:38:13 +0000806 key_val->vlan_tpid = ethertype;
807 key_mask->vlan_tpid = cpu_to_be16(~0);
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300808}
809
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200810static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
811 u32 *dissector_key, u32 *dissector_mask,
812 u32 flower_flag_bit, u32 dissector_flag_bit)
813{
814 if (flower_mask & flower_flag_bit) {
815 *dissector_mask |= dissector_flag_bit;
816 if (flower_key & flower_flag_bit)
817 *dissector_key |= dissector_flag_bit;
818 }
819}
820
Or Gerlitzd9724772016-12-22 14:28:15 +0200821static int fl_set_key_flags(struct nlattr **tb,
822 u32 *flags_key, u32 *flags_mask)
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200823{
824 u32 key, mask;
825
Or Gerlitzd9724772016-12-22 14:28:15 +0200826 /* mask is mandatory for flags */
827 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
828 return -EINVAL;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200829
830 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
Or Gerlitzd9724772016-12-22 14:28:15 +0200831 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200832
833 *flags_key = 0;
834 *flags_mask = 0;
835
836 fl_set_key_flag(key, mask, flags_key, flags_mask,
837 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +0100838 fl_set_key_flag(key, mask, flags_key, flags_mask,
839 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
840 FLOW_DIS_FIRST_FRAG);
Or Gerlitzd9724772016-12-22 14:28:15 +0200841
842 return 0;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200843}
844
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300845static void fl_set_key_ip(struct nlattr **tb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300846 struct flow_dissector_key_ip *key,
847 struct flow_dissector_key_ip *mask)
848{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300849 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
850 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
851 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
852 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300853
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300854 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
855 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300856}
857
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200858static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
859 int depth, int option_len,
860 struct netlink_ext_ack *extack)
861{
862 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
863 struct nlattr *class = NULL, *type = NULL, *data = NULL;
864 struct geneve_opt *opt;
865 int err, data_len = 0;
866
867 if (option_len > sizeof(struct geneve_opt))
868 data_len = option_len - sizeof(struct geneve_opt);
869
870 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
871 memset(opt, 0xff, option_len);
872 opt->length = data_len / 4;
873 opt->r1 = 0;
874 opt->r2 = 0;
875 opt->r3 = 0;
876
877 /* If no mask has been prodived we assume an exact match. */
878 if (!depth)
879 return sizeof(struct geneve_opt) + data_len;
880
881 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
882 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
883 return -EINVAL;
884 }
885
886 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
887 nla, geneve_opt_policy, extack);
888 if (err < 0)
889 return err;
890
891 /* We are not allowed to omit any of CLASS, TYPE or DATA
892 * fields from the key.
893 */
894 if (!option_len &&
895 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
896 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
897 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
898 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
899 return -EINVAL;
900 }
901
902 /* Omitting any of CLASS, TYPE or DATA fields is allowed
903 * for the mask.
904 */
905 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
906 int new_len = key->enc_opts.len;
907
908 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
909 data_len = nla_len(data);
910 if (data_len < 4) {
911 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
912 return -ERANGE;
913 }
914 if (data_len % 4) {
915 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
916 return -ERANGE;
917 }
918
919 new_len += sizeof(struct geneve_opt) + data_len;
920 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
921 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
922 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
923 return -ERANGE;
924 }
925 opt->length = data_len / 4;
926 memcpy(opt->opt_data, nla_data(data), data_len);
927 }
928
929 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
930 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
931 opt->opt_class = nla_get_be16(class);
932 }
933
934 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
935 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
936 opt->type = nla_get_u8(type);
937 }
938
939 return sizeof(struct geneve_opt) + data_len;
940}
941
942static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
943 struct fl_flow_key *mask,
944 struct netlink_ext_ack *extack)
945{
946 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
Jakub Kicinski63c82992018-11-09 21:06:26 -0800947 int err, option_len, key_depth, msk_depth = 0;
948
949 err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
950 TCA_FLOWER_KEY_ENC_OPTS_MAX,
951 enc_opts_policy, extack);
952 if (err)
953 return err;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200954
955 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
956
957 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
Jakub Kicinski63c82992018-11-09 21:06:26 -0800958 err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
959 TCA_FLOWER_KEY_ENC_OPTS_MAX,
960 enc_opts_policy, extack);
961 if (err)
962 return err;
963
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200964 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
965 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
966 }
967
968 nla_for_each_attr(nla_opt_key, nla_enc_key,
969 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
970 switch (nla_type(nla_opt_key)) {
971 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
972 option_len = 0;
973 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
974 option_len = fl_set_geneve_opt(nla_opt_key, key,
975 key_depth, option_len,
976 extack);
977 if (option_len < 0)
978 return option_len;
979
980 key->enc_opts.len += option_len;
981 /* At the same time we need to parse through the mask
982 * in order to verify exact and mask attribute lengths.
983 */
984 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
985 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
986 msk_depth, option_len,
987 extack);
988 if (option_len < 0)
989 return option_len;
990
991 mask->enc_opts.len += option_len;
992 if (key->enc_opts.len != mask->enc_opts.len) {
993 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
994 return -EINVAL;
995 }
996
997 if (msk_depth)
998 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
999 break;
1000 default:
1001 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1002 return -EINVAL;
1003 }
1004 }
1005
1006 return 0;
1007}
1008
Jiri Pirko77b99002015-05-12 14:56:21 +02001009static int fl_set_key(struct net *net, struct nlattr **tb,
Alexander Aring1057c552018-01-18 11:20:54 -05001010 struct fl_flow_key *key, struct fl_flow_key *mask,
1011 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001012{
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001013 __be16 ethertype;
Or Gerlitzd9724772016-12-22 14:28:15 +02001014 int ret = 0;
Brian Haleydd3aa3b2015-05-14 13:20:15 -04001015#ifdef CONFIG_NET_CLS_IND
Jiri Pirko77b99002015-05-12 14:56:21 +02001016 if (tb[TCA_FLOWER_INDEV]) {
Alexander Aring1057c552018-01-18 11:20:54 -05001017 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001018 if (err < 0)
1019 return err;
1020 key->indev_ifindex = err;
1021 mask->indev_ifindex = 0xffffffff;
1022 }
Brian Haleydd3aa3b2015-05-14 13:20:15 -04001023#endif
Jiri Pirko77b99002015-05-12 14:56:21 +02001024
1025 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1026 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1027 sizeof(key->eth.dst));
1028 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1029 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1030 sizeof(key->eth.src));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001031
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001032 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001033 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1034
Jianbo Liuaaab0832018-07-06 05:38:13 +00001035 if (eth_type_vlan(ethertype)) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001036 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1037 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1038 &mask->vlan);
1039
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00001040 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1041 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1042 if (eth_type_vlan(ethertype)) {
1043 fl_set_key_vlan(tb, ethertype,
1044 TCA_FLOWER_KEY_CVLAN_ID,
1045 TCA_FLOWER_KEY_CVLAN_PRIO,
1046 &key->cvlan, &mask->cvlan);
1047 fl_set_key_val(tb, &key->basic.n_proto,
1048 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1049 &mask->basic.n_proto,
1050 TCA_FLOWER_UNSPEC,
1051 sizeof(key->basic.n_proto));
1052 } else {
1053 key->basic.n_proto = ethertype;
1054 mask->basic.n_proto = cpu_to_be16(~0);
1055 }
Jianbo Liud64efd02018-07-06 05:38:16 +00001056 }
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001057 } else {
1058 key->basic.n_proto = ethertype;
1059 mask->basic.n_proto = cpu_to_be16(~0);
1060 }
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001061 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001062
Jiri Pirko77b99002015-05-12 14:56:21 +02001063 if (key->basic.n_proto == htons(ETH_P_IP) ||
1064 key->basic.n_proto == htons(ETH_P_IPV6)) {
1065 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1066 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1067 sizeof(key->basic.ip_proto));
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001068 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
Jiri Pirko77b99002015-05-12 14:56:21 +02001069 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001070
1071 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1072 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001073 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001074 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1075 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1076 sizeof(key->ipv4.src));
1077 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1078 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1079 sizeof(key->ipv4.dst));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001080 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1081 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001082 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001083 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1084 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1085 sizeof(key->ipv6.src));
1086 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1087 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1088 sizeof(key->ipv6.dst));
1089 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001090
Jiri Pirko77b99002015-05-12 14:56:21 +02001091 if (key->basic.ip_proto == IPPROTO_TCP) {
1092 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001093 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001094 sizeof(key->tp.src));
1095 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001096 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001097 sizeof(key->tp.dst));
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001098 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1099 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1100 sizeof(key->tcp.flags));
Jiri Pirko77b99002015-05-12 14:56:21 +02001101 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1102 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001103 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001104 sizeof(key->tp.src));
1105 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001106 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001107 sizeof(key->tp.dst));
Simon Horman5976c5f2016-11-03 13:24:21 +01001108 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1109 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1110 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1111 sizeof(key->tp.src));
1112 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1113 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1114 sizeof(key->tp.dst));
Simon Horman7b684882016-12-07 13:48:28 +01001115 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1116 key->basic.ip_proto == IPPROTO_ICMP) {
1117 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1118 &mask->icmp.type,
1119 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1120 sizeof(key->icmp.type));
1121 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1122 &mask->icmp.code,
1123 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1124 sizeof(key->icmp.code));
1125 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1126 key->basic.ip_proto == IPPROTO_ICMPV6) {
1127 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1128 &mask->icmp.type,
1129 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1130 sizeof(key->icmp.type));
Simon Horman040587a2017-01-30 16:19:02 +01001131 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
Simon Horman7b684882016-12-07 13:48:28 +01001132 &mask->icmp.code,
Simon Horman040587a2017-01-30 16:19:02 +01001133 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
Simon Horman7b684882016-12-07 13:48:28 +01001134 sizeof(key->icmp.code));
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001135 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1136 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -04001137 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1138 if (ret)
1139 return ret;
Simon Horman99d31322017-01-11 14:05:43 +01001140 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1141 key->basic.n_proto == htons(ETH_P_RARP)) {
1142 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1143 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1144 sizeof(key->arp.sip));
1145 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1146 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1147 sizeof(key->arp.tip));
1148 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1149 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1150 sizeof(key->arp.op));
1151 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1152 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1153 sizeof(key->arp.sha));
1154 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1155 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1156 sizeof(key->arp.tha));
Jiri Pirko77b99002015-05-12 14:56:21 +02001157 }
1158
Amritha Nambiar5c722992018-11-12 16:15:55 -08001159 if (key->basic.ip_proto == IPPROTO_TCP ||
1160 key->basic.ip_proto == IPPROTO_UDP ||
1161 key->basic.ip_proto == IPPROTO_SCTP) {
1162 ret = fl_set_key_port_range(tb, key, mask);
1163 if (ret)
1164 return ret;
1165 }
1166
Amir Vadaibc3103f2016-09-08 16:23:47 +03001167 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1168 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1169 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001170 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001171 fl_set_key_val(tb, &key->enc_ipv4.src,
1172 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1173 &mask->enc_ipv4.src,
1174 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1175 sizeof(key->enc_ipv4.src));
1176 fl_set_key_val(tb, &key->enc_ipv4.dst,
1177 TCA_FLOWER_KEY_ENC_IPV4_DST,
1178 &mask->enc_ipv4.dst,
1179 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1180 sizeof(key->enc_ipv4.dst));
1181 }
1182
1183 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1184 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1185 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001186 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001187 fl_set_key_val(tb, &key->enc_ipv6.src,
1188 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1189 &mask->enc_ipv6.src,
1190 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1191 sizeof(key->enc_ipv6.src));
1192 fl_set_key_val(tb, &key->enc_ipv6.dst,
1193 TCA_FLOWER_KEY_ENC_IPV6_DST,
1194 &mask->enc_ipv6.dst,
1195 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1196 sizeof(key->enc_ipv6.dst));
1197 }
1198
1199 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03001200 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
Amir Vadaibc3103f2016-09-08 16:23:47 +03001201 sizeof(key->enc_key_id.keyid));
1202
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001203 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1204 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1205 sizeof(key->enc_tp.src));
1206
1207 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1208 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1209 sizeof(key->enc_tp.dst));
1210
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001211 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1212
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001213 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1214 ret = fl_set_enc_opt(tb, key, mask, extack);
1215 if (ret)
1216 return ret;
1217 }
1218
Or Gerlitzd9724772016-12-22 14:28:15 +02001219 if (tb[TCA_FLOWER_KEY_FLAGS])
1220 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001221
Or Gerlitzd9724772016-12-22 14:28:15 +02001222 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001223}
1224
Paul Blakey05cd2712018-04-30 14:28:30 +03001225static void fl_mask_copy(struct fl_flow_mask *dst,
1226 struct fl_flow_mask *src)
Jiri Pirko77b99002015-05-12 14:56:21 +02001227{
Paul Blakey05cd2712018-04-30 14:28:30 +03001228 const void *psrc = fl_key_get_start(&src->key, src);
1229 void *pdst = fl_key_get_start(&dst->key, src);
Jiri Pirko77b99002015-05-12 14:56:21 +02001230
Paul Blakey05cd2712018-04-30 14:28:30 +03001231 memcpy(pdst, psrc, fl_mask_range(src));
1232 dst->range = src->range;
Jiri Pirko77b99002015-05-12 14:56:21 +02001233}
1234
1235static const struct rhashtable_params fl_ht_params = {
1236 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1237 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1238 .automatic_shrinking = true,
1239};
1240
Paul Blakey05cd2712018-04-30 14:28:30 +03001241static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001242{
Paul Blakey05cd2712018-04-30 14:28:30 +03001243 mask->filter_ht_params = fl_ht_params;
1244 mask->filter_ht_params.key_len = fl_mask_range(mask);
1245 mask->filter_ht_params.key_offset += mask->range.start;
Jiri Pirko77b99002015-05-12 14:56:21 +02001246
Paul Blakey05cd2712018-04-30 14:28:30 +03001247 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
Jiri Pirko77b99002015-05-12 14:56:21 +02001248}
1249
1250#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
zhong jiangcb205a82018-09-19 19:32:11 +08001251#define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
Jiri Pirko77b99002015-05-12 14:56:21 +02001252
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001253#define FL_KEY_IS_MASKED(mask, member) \
1254 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1255 0, FL_KEY_MEMBER_SIZE(member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001256
1257#define FL_KEY_SET(keys, cnt, id, member) \
1258 do { \
1259 keys[cnt].key_id = id; \
1260 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1261 cnt++; \
1262 } while(0);
1263
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001264#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001265 do { \
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001266 if (FL_KEY_IS_MASKED(mask, member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001267 FL_KEY_SET(keys, cnt, id, member); \
1268 } while(0);
1269
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001270static void fl_init_dissector(struct flow_dissector *dissector,
1271 struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001272{
1273 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1274 size_t cnt = 0;
1275
Tom Herbert42aecaa2015-06-04 09:16:39 -07001276 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
Jiri Pirko77b99002015-05-12 14:56:21 +02001277 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001278 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001279 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001280 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001281 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001282 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001283 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
Amritha Nambiar5c722992018-11-12 16:15:55 -08001284 if (FL_KEY_IS_MASKED(mask, tp) ||
1285 FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
1286 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001287 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001288 FLOW_DISSECTOR_KEY_IP, ip);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001289 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001290 FLOW_DISSECTOR_KEY_TCP, tcp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001291 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman7b684882016-12-07 13:48:28 +01001292 FLOW_DISSECTOR_KEY_ICMP, icmp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001293 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman99d31322017-01-11 14:05:43 +01001294 FLOW_DISSECTOR_KEY_ARP, arp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001295 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001296 FLOW_DISSECTOR_KEY_MPLS, mpls);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001297 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001298 FLOW_DISSECTOR_KEY_VLAN, vlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001299 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jianbo Liud64efd02018-07-06 05:38:16 +00001300 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001301 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001302 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001303 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001304 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001305 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001306 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001307 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1308 FL_KEY_IS_MASKED(mask, enc_ipv6))
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001309 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1310 enc_control);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001311 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001312 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001313 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001314 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001315 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1316 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
Jiri Pirko77b99002015-05-12 14:56:21 +02001317
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001318 skb_flow_dissector_init(dissector, keys, cnt);
Paul Blakey05cd2712018-04-30 14:28:30 +03001319}
1320
1321static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1322 struct fl_flow_mask *mask)
1323{
1324 struct fl_flow_mask *newmask;
1325 int err;
1326
1327 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1328 if (!newmask)
1329 return ERR_PTR(-ENOMEM);
1330
1331 fl_mask_copy(newmask, mask);
1332
Amritha Nambiar5c722992018-11-12 16:15:55 -08001333 if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
1334 (newmask->key.tp_min.src && newmask->key.tp_max.src))
1335 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1336
Paul Blakey05cd2712018-04-30 14:28:30 +03001337 err = fl_init_mask_hashtable(newmask);
1338 if (err)
1339 goto errout_free;
1340
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001341 fl_init_dissector(&newmask->dissector, &newmask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +03001342
1343 INIT_LIST_HEAD_RCU(&newmask->filters);
1344
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001345 refcount_set(&newmask->refcnt, 1);
Vlad Buslov195c2342019-03-21 15:17:38 +02001346 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1347 &newmask->ht_node, mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001348 if (err)
1349 goto errout_destroy;
1350
Vlad Buslov195c2342019-03-21 15:17:38 +02001351 /* Wait until any potential concurrent users of mask are finished */
1352 synchronize_rcu();
1353
Vlad Buslov259e60f2019-03-21 15:17:39 +02001354 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001355 list_add_tail_rcu(&newmask->list, &head->masks);
Vlad Buslov259e60f2019-03-21 15:17:39 +02001356 spin_unlock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001357
1358 return newmask;
1359
1360errout_destroy:
1361 rhashtable_destroy(&newmask->ht);
1362errout_free:
1363 kfree(newmask);
1364
1365 return ERR_PTR(err);
Jiri Pirko77b99002015-05-12 14:56:21 +02001366}
1367
1368static int fl_check_assign_mask(struct cls_fl_head *head,
Paul Blakey05cd2712018-04-30 14:28:30 +03001369 struct cls_fl_filter *fnew,
1370 struct cls_fl_filter *fold,
Jiri Pirko77b99002015-05-12 14:56:21 +02001371 struct fl_flow_mask *mask)
1372{
Paul Blakey05cd2712018-04-30 14:28:30 +03001373 struct fl_flow_mask *newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001374 int ret = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001375
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001376 rcu_read_lock();
Vlad Buslov195c2342019-03-21 15:17:38 +02001377
1378 /* Insert mask as temporary node to prevent concurrent creation of mask
1379 * with same key. Any concurrent lookups with same key will return
1380 * -EAGAIN because mask's refcnt is zero. It is safe to insert
1381 * stack-allocated 'mask' to masks hash table because we call
1382 * synchronize_rcu() before returning from this function (either in case
1383 * of error or after replacing it with heap-allocated mask in
1384 * fl_create_new_mask()).
1385 */
1386 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1387 &mask->ht_node,
1388 mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001389 if (!fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001390 rcu_read_unlock();
1391
Vlad Buslov195c2342019-03-21 15:17:38 +02001392 if (fold) {
1393 ret = -EINVAL;
1394 goto errout_cleanup;
1395 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001396
1397 newmask = fl_create_new_mask(head, mask);
Vlad Buslov195c2342019-03-21 15:17:38 +02001398 if (IS_ERR(newmask)) {
1399 ret = PTR_ERR(newmask);
1400 goto errout_cleanup;
1401 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001402
1403 fnew->mask = newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001404 return 0;
Vlad Buslov195c2342019-03-21 15:17:38 +02001405 } else if (IS_ERR(fnew->mask)) {
1406 ret = PTR_ERR(fnew->mask);
Paul Blakeyf6521c52018-06-03 10:06:14 +03001407 } else if (fold && fold->mask != fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001408 ret = -EINVAL;
1409 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1410 /* Mask was deleted concurrently, try again */
1411 ret = -EAGAIN;
Jiri Pirko77b99002015-05-12 14:56:21 +02001412 }
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001413 rcu_read_unlock();
1414 return ret;
Vlad Buslov195c2342019-03-21 15:17:38 +02001415
1416errout_cleanup:
1417 rhashtable_remove_fast(&head->ht, &mask->ht_node,
1418 mask_ht_params);
1419 /* Wait until any potential concurrent users of mask are finished */
1420 synchronize_rcu();
1421 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001422}
1423
1424static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1425 struct cls_fl_filter *f, struct fl_flow_mask *mask,
1426 unsigned long base, struct nlattr **tb,
Alexander Aring50a56192018-01-18 11:20:52 -05001427 struct nlattr *est, bool ovr,
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001428 struct fl_flow_tmplt *tmplt, bool rtnl_held,
Alexander Aring50a56192018-01-18 11:20:52 -05001429 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001430{
Jiri Pirko77b99002015-05-12 14:56:21 +02001431 int err;
1432
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001433 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
Vlad Buslovec6743a2019-02-11 10:55:43 +02001434 extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001435 if (err < 0)
1436 return err;
1437
1438 if (tb[TCA_FLOWER_CLASSID]) {
1439 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001440 if (!rtnl_held)
1441 rtnl_lock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001442 tcf_bind_filter(tp, &f->res, base);
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001443 if (!rtnl_held)
1444 rtnl_unlock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001445 }
1446
Alexander Aring1057c552018-01-18 11:20:54 -05001447 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001448 if (err)
Jiri Pirko45507522017-08-04 14:29:06 +02001449 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02001450
1451 fl_mask_update_range(mask);
1452 fl_set_masked_key(&f->mkey, &f->key, mask);
1453
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001454 if (!fl_mask_fits_tmplt(tmplt, mask)) {
1455 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1456 return -EINVAL;
1457 }
1458
Jiri Pirko77b99002015-05-12 14:56:21 +02001459 return 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001460}
1461
Vlad Buslov1f17f772019-04-05 20:56:26 +03001462static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1463 struct cls_fl_filter *fold,
1464 bool *in_ht)
1465{
1466 struct fl_flow_mask *mask = fnew->mask;
1467 int err;
1468
Vlad Buslov9e355522019-04-11 19:12:20 +03001469 err = rhashtable_lookup_insert_fast(&mask->ht,
1470 &fnew->ht_node,
1471 mask->filter_ht_params);
Vlad Buslov1f17f772019-04-05 20:56:26 +03001472 if (err) {
1473 *in_ht = false;
1474 /* It is okay if filter with same key exists when
1475 * overwriting.
1476 */
1477 return fold && err == -EEXIST ? 0 : err;
1478 }
1479
1480 *in_ht = true;
1481 return 0;
1482}
1483
Jiri Pirko77b99002015-05-12 14:56:21 +02001484static int fl_change(struct net *net, struct sk_buff *in_skb,
1485 struct tcf_proto *tp, unsigned long base,
1486 u32 handle, struct nlattr **tca,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001487 void **arg, bool ovr, bool rtnl_held,
1488 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001489{
Vlad Buslove4746192019-03-21 15:17:33 +02001490 struct cls_fl_head *head = fl_head_dereference(tp);
WANG Cong8113c092017-08-04 21:31:43 -07001491 struct cls_fl_filter *fold = *arg;
Jiri Pirko77b99002015-05-12 14:56:21 +02001492 struct cls_fl_filter *fnew;
Ivan Vecera2cddd202019-01-16 16:53:52 +01001493 struct fl_flow_mask *mask;
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001494 struct nlattr **tb;
Vlad Buslov1f17f772019-04-05 20:56:26 +03001495 bool in_ht;
Jiri Pirko77b99002015-05-12 14:56:21 +02001496 int err;
1497
Vlad Buslov06177552019-03-21 15:17:35 +02001498 if (!tca[TCA_OPTIONS]) {
1499 err = -EINVAL;
1500 goto errout_fold;
1501 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001502
Ivan Vecera2cddd202019-01-16 16:53:52 +01001503 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
Vlad Buslov06177552019-03-21 15:17:35 +02001504 if (!mask) {
1505 err = -ENOBUFS;
1506 goto errout_fold;
1507 }
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001508
Ivan Vecera2cddd202019-01-16 16:53:52 +01001509 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1510 if (!tb) {
1511 err = -ENOBUFS;
1512 goto errout_mask_alloc;
1513 }
1514
Johannes Bergfceb6432017-04-12 14:34:07 +02001515 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
1516 fl_policy, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02001517 if (err < 0)
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001518 goto errout_tb;
Jiri Pirko77b99002015-05-12 14:56:21 +02001519
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001520 if (fold && handle && fold->handle != handle) {
1521 err = -EINVAL;
1522 goto errout_tb;
1523 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001524
1525 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001526 if (!fnew) {
1527 err = -ENOBUFS;
1528 goto errout_tb;
1529 }
Vlad Buslov06177552019-03-21 15:17:35 +02001530 refcount_set(&fnew->refcnt, 1);
Jiri Pirko77b99002015-05-12 14:56:21 +02001531
Cong Wang14215102019-02-20 21:37:42 -08001532 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
WANG Congb9a24bb2016-08-19 12:36:54 -07001533 if (err < 0)
1534 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +02001535
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001536 if (tb[TCA_FLOWER_FLAGS]) {
1537 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1538
1539 if (!tc_flags_valid(fnew->flags)) {
1540 err = -EINVAL;
1541 goto errout;
1542 }
1543 }
1544
1545 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001546 tp->chain->tmplt_priv, rtnl_held, extack);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001547 if (err)
1548 goto errout;
1549
1550 err = fl_check_assign_mask(head, fnew, fold, mask);
1551 if (err)
1552 goto errout;
1553
Vlad Buslov1f17f772019-04-05 20:56:26 +03001554 err = fl_ht_insert_unique(fnew, fold, &in_ht);
1555 if (err)
1556 goto errout_mask;
1557
Hadar Hen Zion79685212016-12-01 14:06:34 +02001558 if (!tc_skip_hw(fnew->flags)) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001559 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
Hadar Hen Zion79685212016-12-01 14:06:34 +02001560 if (err)
Vlad Buslov1f17f772019-04-05 20:56:26 +03001561 goto errout_ht;
Hadar Hen Zion79685212016-12-01 14:06:34 +02001562 }
Amir Vadai5b33f482016-03-08 12:42:29 +02001563
Or Gerlitz55593962017-02-16 10:31:13 +02001564 if (!tc_in_hw(fnew->flags))
1565 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1566
Vlad Buslov3d81e712019-03-21 15:17:42 +02001567 spin_lock(&tp->lock);
1568
Vlad Buslov272ffaa2019-03-21 15:17:41 +02001569 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1570 * proto again or create new one, if necessary.
1571 */
1572 if (tp->deleting) {
1573 err = -EAGAIN;
1574 goto errout_hw;
1575 }
1576
Vlad Buslov06177552019-03-21 15:17:35 +02001577 refcount_inc(&fnew->refcnt);
Amir Vadai5b33f482016-03-08 12:42:29 +02001578 if (fold) {
Vlad Buslovb2552b82019-03-21 15:17:36 +02001579 /* Fold filter was deleted concurrently. Retry lookup. */
1580 if (fold->deleted) {
1581 err = -EAGAIN;
1582 goto errout_hw;
1583 }
1584
Vlad Buslov620da482019-03-21 15:17:34 +02001585 fnew->handle = handle;
1586
Vlad Buslov1f17f772019-04-05 20:56:26 +03001587 if (!in_ht) {
1588 struct rhashtable_params params =
1589 fnew->mask->filter_ht_params;
1590
1591 err = rhashtable_insert_fast(&fnew->mask->ht,
1592 &fnew->ht_node,
1593 params);
1594 if (err)
1595 goto errout_hw;
1596 in_ht = true;
1597 }
Vlad Buslov620da482019-03-21 15:17:34 +02001598
Roi Dayan599d2572018-12-19 18:07:56 +02001599 rhashtable_remove_fast(&fold->mask->ht,
1600 &fold->ht_node,
1601 fold->mask->filter_ht_params);
Matthew Wilcox234a4622017-11-28 09:56:36 -05001602 idr_replace(&head->handle_idr, fnew, fnew->handle);
Daniel Borkmannff3532f2015-07-17 22:38:44 +02001603 list_replace_rcu(&fold->list, &fnew->list);
Vlad Buslovb2552b82019-03-21 15:17:36 +02001604 fold->deleted = true;
Vlad Buslov620da482019-03-21 15:17:34 +02001605
Vlad Buslov3d81e712019-03-21 15:17:42 +02001606 spin_unlock(&tp->lock);
1607
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001608 fl_mask_put(head, fold->mask, true);
Vlad Buslov620da482019-03-21 15:17:34 +02001609 if (!tc_skip_hw(fold->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001610 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02001611 tcf_unbind_filter(tp, &fold->res);
Cong Wang0dadc112017-11-06 13:47:24 -08001612 tcf_exts_get_net(&fold->exts);
Vlad Buslov06177552019-03-21 15:17:35 +02001613 /* Caller holds reference to fold, so refcnt is always > 0
1614 * after this.
1615 */
1616 refcount_dec(&fold->refcnt);
1617 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02001618 } else {
Vlad Buslov620da482019-03-21 15:17:34 +02001619 if (handle) {
1620 /* user specifies a handle and it doesn't exist */
1621 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1622 handle, GFP_ATOMIC);
Vlad Buslov9a2d9382019-03-21 15:17:40 +02001623
1624 /* Filter with specified handle was concurrently
1625 * inserted after initial check in cls_api. This is not
1626 * necessarily an error if NLM_F_EXCL is not set in
1627 * message flags. Returning EAGAIN will cause cls_api to
1628 * try to update concurrently inserted rule.
1629 */
1630 if (err == -ENOSPC)
1631 err = -EAGAIN;
Vlad Buslov620da482019-03-21 15:17:34 +02001632 } else {
1633 handle = 1;
1634 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1635 INT_MAX, GFP_ATOMIC);
1636 }
1637 if (err)
1638 goto errout_hw;
1639
1640 fnew->handle = handle;
Paul Blakey05cd2712018-04-30 14:28:30 +03001641 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
Vlad Buslov3d81e712019-03-21 15:17:42 +02001642 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02001643 }
1644
Vlad Buslov620da482019-03-21 15:17:34 +02001645 *arg = fnew;
1646
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001647 kfree(tb);
Ivan Vecera2cddd202019-01-16 16:53:52 +01001648 kfree(mask);
Jiri Pirko77b99002015-05-12 14:56:21 +02001649 return 0;
1650
Vlad Buslov620da482019-03-21 15:17:34 +02001651errout_hw:
Vlad Buslov3d81e712019-03-21 15:17:42 +02001652 spin_unlock(&tp->lock);
Vlad Buslov620da482019-03-21 15:17:34 +02001653 if (!tc_skip_hw(fnew->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001654 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
Vlad Buslov1f17f772019-04-05 20:56:26 +03001655errout_ht:
1656 if (in_ht)
1657 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1658 fnew->mask->filter_ht_params);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001659errout_mask:
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001660 fl_mask_put(head, fnew->mask, true);
Jiri Pirko77b99002015-05-12 14:56:21 +02001661errout:
Vlad Buslov1f17f772019-04-05 20:56:26 +03001662 tcf_queue_work(&fnew->rwork, fl_destroy_filter_work);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001663errout_tb:
1664 kfree(tb);
Ivan Vecera2cddd202019-01-16 16:53:52 +01001665errout_mask_alloc:
1666 kfree(mask);
Vlad Buslov06177552019-03-21 15:17:35 +02001667errout_fold:
1668 if (fold)
1669 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02001670 return err;
1671}
1672
Alexander Aring571acf22018-01-18 11:20:53 -05001673static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001674 bool rtnl_held, struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001675{
Vlad Buslove4746192019-03-21 15:17:33 +02001676 struct cls_fl_head *head = fl_head_dereference(tp);
WANG Cong8113c092017-08-04 21:31:43 -07001677 struct cls_fl_filter *f = arg;
Vlad Buslovb2552b82019-03-21 15:17:36 +02001678 bool last_on_mask;
1679 int err = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001680
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001681 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
Paul Blakey05cd2712018-04-30 14:28:30 +03001682 *last = list_empty(&head->masks);
Vlad Buslov06177552019-03-21 15:17:35 +02001683 __fl_put(f);
1684
Vlad Buslovb2552b82019-03-21 15:17:36 +02001685 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02001686}
1687
Vlad Buslov12db03b2019-02-11 10:55:45 +02001688static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1689 bool rtnl_held)
Jiri Pirko77b99002015-05-12 14:56:21 +02001690{
Jiri Pirko77b99002015-05-12 14:56:21 +02001691 struct cls_fl_filter *f;
1692
Vlad Buslov01683a12018-07-09 13:29:11 +03001693 arg->count = arg->skip;
1694
Vlad Buslov06177552019-03-21 15:17:35 +02001695 while ((f = fl_get_next_filter(tp, &arg->cookie)) != NULL) {
Vlad Buslov01683a12018-07-09 13:29:11 +03001696 if (arg->fn(tp, f, arg) < 0) {
Vlad Buslov06177552019-03-21 15:17:35 +02001697 __fl_put(f);
Vlad Buslov01683a12018-07-09 13:29:11 +03001698 arg->stop = 1;
1699 break;
Paul Blakey05cd2712018-04-30 14:28:30 +03001700 }
Vlad Buslov06177552019-03-21 15:17:35 +02001701 __fl_put(f);
1702 arg->cookie++;
Vlad Buslov01683a12018-07-09 13:29:11 +03001703 arg->count++;
Jiri Pirko77b99002015-05-12 14:56:21 +02001704 }
1705}
1706
John Hurley31533cb2018-06-25 14:30:06 -07001707static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
1708 void *cb_priv, struct netlink_ext_ack *extack)
1709{
John Hurley31533cb2018-06-25 14:30:06 -07001710 struct tc_cls_flower_offload cls_flower = {};
1711 struct tcf_block *block = tp->chain->block;
John Hurley95e27a42019-04-02 23:53:20 +01001712 unsigned long handle = 0;
John Hurley31533cb2018-06-25 14:30:06 -07001713 struct cls_fl_filter *f;
1714 int err;
1715
John Hurley95e27a42019-04-02 23:53:20 +01001716 while ((f = fl_get_next_filter(tp, &handle))) {
1717 if (tc_skip_hw(f->flags))
1718 goto next_flow;
John Hurley31533cb2018-06-25 14:30:06 -07001719
John Hurley95e27a42019-04-02 23:53:20 +01001720 cls_flower.rule =
1721 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1722 if (!cls_flower.rule) {
1723 __fl_put(f);
1724 return -ENOMEM;
John Hurley31533cb2018-06-25 14:30:06 -07001725 }
John Hurley95e27a42019-04-02 23:53:20 +01001726
1727 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
1728 extack);
1729 cls_flower.command = add ?
1730 TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
1731 cls_flower.cookie = (unsigned long)f;
1732 cls_flower.rule->match.dissector = &f->mask->dissector;
1733 cls_flower.rule->match.mask = &f->mask->key;
1734 cls_flower.rule->match.key = &f->mkey;
1735
1736 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
1737 if (err) {
1738 kfree(cls_flower.rule);
1739 if (tc_skip_sw(f->flags)) {
1740 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
1741 __fl_put(f);
1742 return err;
1743 }
1744 goto next_flow;
1745 }
1746
1747 cls_flower.classid = f->res.classid;
1748
1749 err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
1750 kfree(cls_flower.rule);
1751
1752 if (err) {
1753 if (add && tc_skip_sw(f->flags)) {
1754 __fl_put(f);
1755 return err;
1756 }
1757 goto next_flow;
1758 }
1759
1760 spin_lock(&tp->lock);
1761 tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags,
1762 add);
1763 spin_unlock(&tp->lock);
1764next_flow:
1765 handle++;
1766 __fl_put(f);
John Hurley31533cb2018-06-25 14:30:06 -07001767 }
1768
1769 return 0;
1770}
1771
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001772static int fl_hw_create_tmplt(struct tcf_chain *chain,
1773 struct fl_flow_tmplt *tmplt)
Jiri Pirko34738452018-07-23 09:23:11 +02001774{
1775 struct tc_cls_flower_offload cls_flower = {};
1776 struct tcf_block *block = chain->block;
Jiri Pirko34738452018-07-23 09:23:11 +02001777
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +01001778 cls_flower.rule = flow_rule_alloc(0);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001779 if (!cls_flower.rule)
1780 return -ENOMEM;
1781
Jiri Pirko34738452018-07-23 09:23:11 +02001782 cls_flower.common.chain_index = chain->index;
1783 cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
1784 cls_flower.cookie = (unsigned long) tmplt;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001785 cls_flower.rule->match.dissector = &tmplt->dissector;
1786 cls_flower.rule->match.mask = &tmplt->mask;
1787 cls_flower.rule->match.key = &tmplt->dummy_key;
Jiri Pirko34738452018-07-23 09:23:11 +02001788
1789 /* We don't care if driver (any of them) fails to handle this
1790 * call. It serves just as a hint for it.
1791 */
Cong Wangaeb3fec2018-12-11 11:15:46 -08001792 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001793 kfree(cls_flower.rule);
1794
1795 return 0;
Jiri Pirko34738452018-07-23 09:23:11 +02001796}
1797
1798static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
1799 struct fl_flow_tmplt *tmplt)
1800{
1801 struct tc_cls_flower_offload cls_flower = {};
1802 struct tcf_block *block = chain->block;
1803
1804 cls_flower.common.chain_index = chain->index;
1805 cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY;
1806 cls_flower.cookie = (unsigned long) tmplt;
1807
Cong Wangaeb3fec2018-12-11 11:15:46 -08001808 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Jiri Pirko34738452018-07-23 09:23:11 +02001809}
1810
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001811static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
1812 struct nlattr **tca,
1813 struct netlink_ext_ack *extack)
1814{
1815 struct fl_flow_tmplt *tmplt;
1816 struct nlattr **tb;
1817 int err;
1818
1819 if (!tca[TCA_OPTIONS])
1820 return ERR_PTR(-EINVAL);
1821
1822 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1823 if (!tb)
1824 return ERR_PTR(-ENOBUFS);
1825 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
1826 fl_policy, NULL);
1827 if (err)
1828 goto errout_tb;
1829
1830 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03001831 if (!tmplt) {
1832 err = -ENOMEM;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001833 goto errout_tb;
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03001834 }
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001835 tmplt->chain = chain;
1836 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
1837 if (err)
1838 goto errout_tmplt;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001839
1840 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
1841
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001842 err = fl_hw_create_tmplt(chain, tmplt);
1843 if (err)
1844 goto errout_tmplt;
Jiri Pirko34738452018-07-23 09:23:11 +02001845
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001846 kfree(tb);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001847 return tmplt;
1848
1849errout_tmplt:
1850 kfree(tmplt);
1851errout_tb:
1852 kfree(tb);
1853 return ERR_PTR(err);
1854}
1855
1856static void fl_tmplt_destroy(void *tmplt_priv)
1857{
1858 struct fl_flow_tmplt *tmplt = tmplt_priv;
1859
Cong Wang95278dd2018-10-02 12:50:19 -07001860 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
1861 kfree(tmplt);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001862}
1863
Jiri Pirko77b99002015-05-12 14:56:21 +02001864static int fl_dump_key_val(struct sk_buff *skb,
1865 void *val, int val_type,
1866 void *mask, int mask_type, int len)
1867{
1868 int err;
1869
1870 if (!memchr_inv(mask, 0, len))
1871 return 0;
1872 err = nla_put(skb, val_type, len, val);
1873 if (err)
1874 return err;
1875 if (mask_type != TCA_FLOWER_UNSPEC) {
1876 err = nla_put(skb, mask_type, len, mask);
1877 if (err)
1878 return err;
1879 }
1880 return 0;
1881}
1882
Amritha Nambiar5c722992018-11-12 16:15:55 -08001883static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
1884 struct fl_flow_key *mask)
1885{
1886 if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
1887 &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
1888 sizeof(key->tp_min.dst)) ||
1889 fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
1890 &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
1891 sizeof(key->tp_max.dst)) ||
1892 fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
1893 &mask->tp_min.src, TCA_FLOWER_UNSPEC,
1894 sizeof(key->tp_min.src)) ||
1895 fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
1896 &mask->tp_max.src, TCA_FLOWER_UNSPEC,
1897 sizeof(key->tp_max.src)))
1898 return -1;
1899
1900 return 0;
1901}
1902
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001903static int fl_dump_key_mpls(struct sk_buff *skb,
1904 struct flow_dissector_key_mpls *mpls_key,
1905 struct flow_dissector_key_mpls *mpls_mask)
1906{
1907 int err;
1908
1909 if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
1910 return 0;
1911 if (mpls_mask->mpls_ttl) {
1912 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
1913 mpls_key->mpls_ttl);
1914 if (err)
1915 return err;
1916 }
1917 if (mpls_mask->mpls_tc) {
1918 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
1919 mpls_key->mpls_tc);
1920 if (err)
1921 return err;
1922 }
1923 if (mpls_mask->mpls_label) {
1924 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
1925 mpls_key->mpls_label);
1926 if (err)
1927 return err;
1928 }
1929 if (mpls_mask->mpls_bos) {
1930 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
1931 mpls_key->mpls_bos);
1932 if (err)
1933 return err;
1934 }
1935 return 0;
1936}
1937
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001938static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001939 struct flow_dissector_key_ip *key,
1940 struct flow_dissector_key_ip *mask)
1941{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001942 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1943 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1944 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1945 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1946
1947 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
1948 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001949 return -1;
1950
1951 return 0;
1952}
1953
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001954static int fl_dump_key_vlan(struct sk_buff *skb,
Jianbo Liud64efd02018-07-06 05:38:16 +00001955 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001956 struct flow_dissector_key_vlan *vlan_key,
1957 struct flow_dissector_key_vlan *vlan_mask)
1958{
1959 int err;
1960
1961 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
1962 return 0;
1963 if (vlan_mask->vlan_id) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001964 err = nla_put_u16(skb, vlan_id_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001965 vlan_key->vlan_id);
1966 if (err)
1967 return err;
1968 }
1969 if (vlan_mask->vlan_priority) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001970 err = nla_put_u8(skb, vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001971 vlan_key->vlan_priority);
1972 if (err)
1973 return err;
1974 }
1975 return 0;
1976}
1977
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001978static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
1979 u32 *flower_key, u32 *flower_mask,
1980 u32 flower_flag_bit, u32 dissector_flag_bit)
1981{
1982 if (dissector_mask & dissector_flag_bit) {
1983 *flower_mask |= flower_flag_bit;
1984 if (dissector_key & dissector_flag_bit)
1985 *flower_key |= flower_flag_bit;
1986 }
1987}
1988
1989static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
1990{
1991 u32 key, mask;
1992 __be32 _key, _mask;
1993 int err;
1994
1995 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
1996 return 0;
1997
1998 key = 0;
1999 mask = 0;
2000
2001 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2002 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +01002003 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2004 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2005 FLOW_DIS_FIRST_FRAG);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002006
2007 _key = cpu_to_be32(key);
2008 _mask = cpu_to_be32(mask);
2009
2010 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2011 if (err)
2012 return err;
2013
2014 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2015}
2016
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002017static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2018 struct flow_dissector_key_enc_opts *enc_opts)
2019{
2020 struct geneve_opt *opt;
2021 struct nlattr *nest;
2022 int opt_off = 0;
2023
2024 nest = nla_nest_start(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2025 if (!nest)
2026 goto nla_put_failure;
2027
2028 while (enc_opts->len > opt_off) {
2029 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2030
2031 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2032 opt->opt_class))
2033 goto nla_put_failure;
2034 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2035 opt->type))
2036 goto nla_put_failure;
2037 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2038 opt->length * 4, opt->opt_data))
2039 goto nla_put_failure;
2040
2041 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2042 }
2043 nla_nest_end(skb, nest);
2044 return 0;
2045
2046nla_put_failure:
2047 nla_nest_cancel(skb, nest);
2048 return -EMSGSIZE;
2049}
2050
2051static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2052 struct flow_dissector_key_enc_opts *enc_opts)
2053{
2054 struct nlattr *nest;
2055 int err;
2056
2057 if (!enc_opts->len)
2058 return 0;
2059
2060 nest = nla_nest_start(skb, enc_opt_type);
2061 if (!nest)
2062 goto nla_put_failure;
2063
2064 switch (enc_opts->dst_opt_type) {
2065 case TUNNEL_GENEVE_OPT:
2066 err = fl_dump_key_geneve_opt(skb, enc_opts);
2067 if (err)
2068 goto nla_put_failure;
2069 break;
2070 default:
2071 goto nla_put_failure;
2072 }
2073 nla_nest_end(skb, nest);
2074 return 0;
2075
2076nla_put_failure:
2077 nla_nest_cancel(skb, nest);
2078 return -EMSGSIZE;
2079}
2080
2081static int fl_dump_key_enc_opt(struct sk_buff *skb,
2082 struct flow_dissector_key_enc_opts *key_opts,
2083 struct flow_dissector_key_enc_opts *msk_opts)
2084{
2085 int err;
2086
2087 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2088 if (err)
2089 return err;
2090
2091 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2092}
2093
Jiri Pirkof5749082018-07-23 09:23:08 +02002094static int fl_dump_key(struct sk_buff *skb, struct net *net,
2095 struct fl_flow_key *key, struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02002096{
Jiri Pirko77b99002015-05-12 14:56:21 +02002097 if (mask->indev_ifindex) {
2098 struct net_device *dev;
2099
2100 dev = __dev_get_by_index(net, key->indev_ifindex);
2101 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2102 goto nla_put_failure;
2103 }
2104
2105 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2106 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2107 sizeof(key->eth.dst)) ||
2108 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2109 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2110 sizeof(key->eth.src)) ||
2111 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2112 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2113 sizeof(key->basic.n_proto)))
2114 goto nla_put_failure;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002115
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002116 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2117 goto nla_put_failure;
2118
Jianbo Liud64efd02018-07-06 05:38:16 +00002119 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2120 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002121 goto nla_put_failure;
2122
Jianbo Liud64efd02018-07-06 05:38:16 +00002123 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2124 TCA_FLOWER_KEY_CVLAN_PRIO,
2125 &key->cvlan, &mask->cvlan) ||
2126 (mask->cvlan.vlan_tpid &&
Jianbo Liu158abbf2018-07-25 02:31:25 +00002127 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2128 key->cvlan.vlan_tpid)))
Jianbo Liud3069512018-07-06 05:38:15 +00002129 goto nla_put_failure;
2130
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00002131 if (mask->basic.n_proto) {
2132 if (mask->cvlan.vlan_tpid) {
2133 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2134 key->basic.n_proto))
2135 goto nla_put_failure;
2136 } else if (mask->vlan.vlan_tpid) {
2137 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2138 key->basic.n_proto))
2139 goto nla_put_failure;
2140 }
Jianbo Liud64efd02018-07-06 05:38:16 +00002141 }
2142
Jiri Pirko77b99002015-05-12 14:56:21 +02002143 if ((key->basic.n_proto == htons(ETH_P_IP) ||
2144 key->basic.n_proto == htons(ETH_P_IPV6)) &&
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002145 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
Jiri Pirko77b99002015-05-12 14:56:21 +02002146 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002147 sizeof(key->basic.ip_proto)) ||
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002148 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
Jiri Pirko77b99002015-05-12 14:56:21 +02002149 goto nla_put_failure;
2150
Tom Herbertc3f83242015-06-04 09:16:40 -07002151 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002152 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2153 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2154 sizeof(key->ipv4.src)) ||
2155 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2156 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2157 sizeof(key->ipv4.dst))))
2158 goto nla_put_failure;
Tom Herbertc3f83242015-06-04 09:16:40 -07002159 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002160 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2161 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2162 sizeof(key->ipv6.src)) ||
2163 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2164 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2165 sizeof(key->ipv6.dst))))
2166 goto nla_put_failure;
2167
2168 if (key->basic.ip_proto == IPPROTO_TCP &&
2169 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002170 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002171 sizeof(key->tp.src)) ||
2172 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002173 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02002174 sizeof(key->tp.dst)) ||
2175 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2176 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2177 sizeof(key->tcp.flags))))
Jiri Pirko77b99002015-05-12 14:56:21 +02002178 goto nla_put_failure;
2179 else if (key->basic.ip_proto == IPPROTO_UDP &&
2180 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002181 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002182 sizeof(key->tp.src)) ||
2183 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002184 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002185 sizeof(key->tp.dst))))
2186 goto nla_put_failure;
Simon Horman5976c5f2016-11-03 13:24:21 +01002187 else if (key->basic.ip_proto == IPPROTO_SCTP &&
2188 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2189 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2190 sizeof(key->tp.src)) ||
2191 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2192 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2193 sizeof(key->tp.dst))))
2194 goto nla_put_failure;
Simon Horman7b684882016-12-07 13:48:28 +01002195 else if (key->basic.n_proto == htons(ETH_P_IP) &&
2196 key->basic.ip_proto == IPPROTO_ICMP &&
2197 (fl_dump_key_val(skb, &key->icmp.type,
2198 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2199 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2200 sizeof(key->icmp.type)) ||
2201 fl_dump_key_val(skb, &key->icmp.code,
2202 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2203 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2204 sizeof(key->icmp.code))))
2205 goto nla_put_failure;
2206 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2207 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2208 (fl_dump_key_val(skb, &key->icmp.type,
2209 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2210 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2211 sizeof(key->icmp.type)) ||
2212 fl_dump_key_val(skb, &key->icmp.code,
2213 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2214 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2215 sizeof(key->icmp.code))))
2216 goto nla_put_failure;
Simon Horman99d31322017-01-11 14:05:43 +01002217 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2218 key->basic.n_proto == htons(ETH_P_RARP)) &&
2219 (fl_dump_key_val(skb, &key->arp.sip,
2220 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2221 TCA_FLOWER_KEY_ARP_SIP_MASK,
2222 sizeof(key->arp.sip)) ||
2223 fl_dump_key_val(skb, &key->arp.tip,
2224 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2225 TCA_FLOWER_KEY_ARP_TIP_MASK,
2226 sizeof(key->arp.tip)) ||
2227 fl_dump_key_val(skb, &key->arp.op,
2228 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2229 TCA_FLOWER_KEY_ARP_OP_MASK,
2230 sizeof(key->arp.op)) ||
2231 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2232 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2233 sizeof(key->arp.sha)) ||
2234 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2235 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2236 sizeof(key->arp.tha))))
2237 goto nla_put_failure;
Jiri Pirko77b99002015-05-12 14:56:21 +02002238
Amritha Nambiar5c722992018-11-12 16:15:55 -08002239 if ((key->basic.ip_proto == IPPROTO_TCP ||
2240 key->basic.ip_proto == IPPROTO_UDP ||
2241 key->basic.ip_proto == IPPROTO_SCTP) &&
2242 fl_dump_key_port_range(skb, key, mask))
2243 goto nla_put_failure;
2244
Amir Vadaibc3103f2016-09-08 16:23:47 +03002245 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2246 (fl_dump_key_val(skb, &key->enc_ipv4.src,
2247 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2248 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2249 sizeof(key->enc_ipv4.src)) ||
2250 fl_dump_key_val(skb, &key->enc_ipv4.dst,
2251 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2252 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2253 sizeof(key->enc_ipv4.dst))))
2254 goto nla_put_failure;
2255 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2256 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2257 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2258 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2259 sizeof(key->enc_ipv6.src)) ||
2260 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2261 TCA_FLOWER_KEY_ENC_IPV6_DST,
2262 &mask->enc_ipv6.dst,
2263 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2264 sizeof(key->enc_ipv6.dst))))
2265 goto nla_put_failure;
2266
2267 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03002268 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02002269 sizeof(key->enc_key_id)) ||
2270 fl_dump_key_val(skb, &key->enc_tp.src,
2271 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2272 &mask->enc_tp.src,
2273 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2274 sizeof(key->enc_tp.src)) ||
2275 fl_dump_key_val(skb, &key->enc_tp.dst,
2276 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2277 &mask->enc_tp.dst,
2278 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002279 sizeof(key->enc_tp.dst)) ||
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002280 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2281 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
Amir Vadaibc3103f2016-09-08 16:23:47 +03002282 goto nla_put_failure;
2283
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002284 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2285 goto nla_put_failure;
2286
Jiri Pirkof5749082018-07-23 09:23:08 +02002287 return 0;
2288
2289nla_put_failure:
2290 return -EMSGSIZE;
2291}
2292
2293static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002294 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
Jiri Pirkof5749082018-07-23 09:23:08 +02002295{
2296 struct cls_fl_filter *f = fh;
2297 struct nlattr *nest;
2298 struct fl_flow_key *key, *mask;
Vlad Buslov3d81e712019-03-21 15:17:42 +02002299 bool skip_hw;
Jiri Pirkof5749082018-07-23 09:23:08 +02002300
2301 if (!f)
2302 return skb->len;
2303
2304 t->tcm_handle = f->handle;
2305
2306 nest = nla_nest_start(skb, TCA_OPTIONS);
2307 if (!nest)
2308 goto nla_put_failure;
2309
Vlad Buslov3d81e712019-03-21 15:17:42 +02002310 spin_lock(&tp->lock);
2311
Jiri Pirkof5749082018-07-23 09:23:08 +02002312 if (f->res.classid &&
2313 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002314 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02002315
2316 key = &f->key;
2317 mask = &f->mask->key;
Vlad Buslov3d81e712019-03-21 15:17:42 +02002318 skip_hw = tc_skip_hw(f->flags);
Jiri Pirkof5749082018-07-23 09:23:08 +02002319
2320 if (fl_dump_key(skb, net, key, mask))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002321 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02002322
Or Gerlitz749e6722017-02-16 10:31:10 +02002323 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002324 goto nla_put_failure_locked;
2325
2326 spin_unlock(&tp->lock);
2327
2328 if (!skip_hw)
Vlad Buslovc24e43d82019-03-21 15:17:43 +02002329 fl_hw_update_stats(tp, f, rtnl_held);
Amir Vadaie69985c2016-06-05 17:11:18 +03002330
Vlad Buslov86c55362018-09-07 17:22:21 +03002331 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2332 goto nla_put_failure;
2333
Jiri Pirko77b99002015-05-12 14:56:21 +02002334 if (tcf_exts_dump(skb, &f->exts))
2335 goto nla_put_failure;
2336
2337 nla_nest_end(skb, nest);
2338
2339 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2340 goto nla_put_failure;
2341
2342 return skb->len;
2343
Vlad Buslov3d81e712019-03-21 15:17:42 +02002344nla_put_failure_locked:
2345 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02002346nla_put_failure:
2347 nla_nest_cancel(skb, nest);
2348 return -1;
2349}
2350
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002351static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2352{
2353 struct fl_flow_tmplt *tmplt = tmplt_priv;
2354 struct fl_flow_key *key, *mask;
2355 struct nlattr *nest;
2356
2357 nest = nla_nest_start(skb, TCA_OPTIONS);
2358 if (!nest)
2359 goto nla_put_failure;
2360
2361 key = &tmplt->dummy_key;
2362 mask = &tmplt->mask;
2363
2364 if (fl_dump_key(skb, net, key, mask))
2365 goto nla_put_failure;
2366
2367 nla_nest_end(skb, nest);
2368
2369 return skb->len;
2370
2371nla_put_failure:
2372 nla_nest_cancel(skb, nest);
2373 return -EMSGSIZE;
2374}
2375
Cong Wang07d79fc2017-08-30 14:30:36 -07002376static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
2377{
2378 struct cls_fl_filter *f = fh;
2379
2380 if (f && f->res.classid == classid)
2381 f->res.class = cl;
2382}
2383
Jiri Pirko77b99002015-05-12 14:56:21 +02002384static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2385 .kind = "flower",
2386 .classify = fl_classify,
2387 .init = fl_init,
2388 .destroy = fl_destroy,
2389 .get = fl_get,
Vlad Buslov06177552019-03-21 15:17:35 +02002390 .put = fl_put,
Jiri Pirko77b99002015-05-12 14:56:21 +02002391 .change = fl_change,
2392 .delete = fl_delete,
2393 .walk = fl_walk,
John Hurley31533cb2018-06-25 14:30:06 -07002394 .reoffload = fl_reoffload,
Jiri Pirko77b99002015-05-12 14:56:21 +02002395 .dump = fl_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -07002396 .bind_class = fl_bind_class,
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002397 .tmplt_create = fl_tmplt_create,
2398 .tmplt_destroy = fl_tmplt_destroy,
2399 .tmplt_dump = fl_tmplt_dump,
Jiri Pirko77b99002015-05-12 14:56:21 +02002400 .owner = THIS_MODULE,
Vlad Buslov92149192019-03-21 15:17:44 +02002401 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
Jiri Pirko77b99002015-05-12 14:56:21 +02002402};
2403
2404static int __init cls_fl_init(void)
2405{
2406 return register_tcf_proto_ops(&cls_fl_ops);
2407}
2408
2409static void __exit cls_fl_exit(void)
2410{
2411 unregister_tcf_proto_ops(&cls_fl_ops);
2412}
2413
2414module_init(cls_fl_init);
2415module_exit(cls_fl_exit);
2416
2417MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2418MODULE_DESCRIPTION("Flower classifier");
2419MODULE_LICENSE("GPL v2");