blob: f6685fc53119a99ec9cb04d5bc1961cdace943c5 [file] [log] [blame]
Jiri Pirko77b99002015-05-12 14:56:21 +02001/*
2 * net/sched/cls_flower.c Flower classifier
3 *
4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/rhashtable.h>
Daniel Borkmannd9363772016-11-27 01:18:01 +010016#include <linux/workqueue.h>
Vlad Buslov06177552019-03-21 15:17:35 +020017#include <linux/refcount.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020018
19#include <linux/if_ether.h>
20#include <linux/in6.h>
21#include <linux/ip.h>
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040022#include <linux/mpls.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020023
24#include <net/sch_generic.h>
25#include <net/pkt_cls.h>
26#include <net/ip.h>
27#include <net/flow_dissector.h>
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020028#include <net/geneve.h>
Jiri Pirko77b99002015-05-12 14:56:21 +020029
Amir Vadaibc3103f2016-09-08 16:23:47 +030030#include <net/dst.h>
31#include <net/dst_metadata.h>
32
Jiri Pirko77b99002015-05-12 14:56:21 +020033struct fl_flow_key {
34 int indev_ifindex;
Tom Herbert42aecaa2015-06-04 09:16:39 -070035 struct flow_dissector_key_control control;
Amir Vadaibc3103f2016-09-08 16:23:47 +030036 struct flow_dissector_key_control enc_control;
Jiri Pirko77b99002015-05-12 14:56:21 +020037 struct flow_dissector_key_basic basic;
38 struct flow_dissector_key_eth_addrs eth;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +030039 struct flow_dissector_key_vlan vlan;
Jianbo Liud64efd02018-07-06 05:38:16 +000040 struct flow_dissector_key_vlan cvlan;
Jiri Pirko77b99002015-05-12 14:56:21 +020041 union {
Tom Herbertc3f83242015-06-04 09:16:40 -070042 struct flow_dissector_key_ipv4_addrs ipv4;
Jiri Pirko77b99002015-05-12 14:56:21 +020043 struct flow_dissector_key_ipv6_addrs ipv6;
44 };
45 struct flow_dissector_key_ports tp;
Simon Horman7b684882016-12-07 13:48:28 +010046 struct flow_dissector_key_icmp icmp;
Simon Horman99d31322017-01-11 14:05:43 +010047 struct flow_dissector_key_arp arp;
Amir Vadaibc3103f2016-09-08 16:23:47 +030048 struct flow_dissector_key_keyid enc_key_id;
49 union {
50 struct flow_dissector_key_ipv4_addrs enc_ipv4;
51 struct flow_dissector_key_ipv6_addrs enc_ipv6;
52 };
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +020053 struct flow_dissector_key_ports enc_tp;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -040054 struct flow_dissector_key_mpls mpls;
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +020055 struct flow_dissector_key_tcp tcp;
Or Gerlitz4d80cc02017-06-01 21:37:38 +030056 struct flow_dissector_key_ip ip;
Or Gerlitz0e2c17b2018-07-17 19:27:18 +030057 struct flow_dissector_key_ip enc_ip;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +020058 struct flow_dissector_key_enc_opts enc_opts;
Amritha Nambiar5c722992018-11-12 16:15:55 -080059 struct flow_dissector_key_ports tp_min;
60 struct flow_dissector_key_ports tp_max;
Jiri Pirko77b99002015-05-12 14:56:21 +020061} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
62
63struct fl_flow_mask_range {
64 unsigned short int start;
65 unsigned short int end;
66};
67
68struct fl_flow_mask {
69 struct fl_flow_key key;
70 struct fl_flow_mask_range range;
Amritha Nambiar5c722992018-11-12 16:15:55 -080071 u32 flags;
Paul Blakey05cd2712018-04-30 14:28:30 +030072 struct rhash_head ht_node;
73 struct rhashtable ht;
74 struct rhashtable_params filter_ht_params;
75 struct flow_dissector dissector;
76 struct list_head filters;
Paolo Abeni44a5cd42018-06-21 20:02:16 +020077 struct rcu_work rwork;
Paul Blakey05cd2712018-04-30 14:28:30 +030078 struct list_head list;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +020079 refcount_t refcnt;
Jiri Pirko77b99002015-05-12 14:56:21 +020080};
81
Jiri Pirkob95ec7e2018-07-23 09:23:10 +020082struct fl_flow_tmplt {
83 struct fl_flow_key dummy_key;
84 struct fl_flow_key mask;
85 struct flow_dissector dissector;
86 struct tcf_chain *chain;
87};
88
Jiri Pirko77b99002015-05-12 14:56:21 +020089struct cls_fl_head {
90 struct rhashtable ht;
Vlad Buslov259e60f2019-03-21 15:17:39 +020091 spinlock_t masks_lock; /* Protect masks list */
Paul Blakey05cd2712018-04-30 14:28:30 +030092 struct list_head masks;
Vlad Buslovc049d562019-04-24 09:53:31 +030093 struct list_head hw_filters;
Cong Wangaaa908f2018-05-23 15:26:53 -070094 struct rcu_work rwork;
Chris Mic15ab232017-08-30 02:31:58 -040095 struct idr handle_idr;
Jiri Pirko77b99002015-05-12 14:56:21 +020096};
97
98struct cls_fl_filter {
Paul Blakey05cd2712018-04-30 14:28:30 +030099 struct fl_flow_mask *mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200100 struct rhash_head ht_node;
101 struct fl_flow_key mkey;
102 struct tcf_exts exts;
103 struct tcf_result res;
104 struct fl_flow_key key;
105 struct list_head list;
Vlad Buslovc049d562019-04-24 09:53:31 +0300106 struct list_head hw_list;
Jiri Pirko77b99002015-05-12 14:56:21 +0200107 u32 handle;
Amir Vadaie69985c2016-06-05 17:11:18 +0300108 u32 flags;
Vlad Buslov86c55362018-09-07 17:22:21 +0300109 u32 in_hw_count;
Cong Wangaaa908f2018-05-23 15:26:53 -0700110 struct rcu_work rwork;
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200111 struct net_device *hw_dev;
Vlad Buslov06177552019-03-21 15:17:35 +0200112 /* Flower classifier is unlocked, which means that its reference counter
113 * can be changed concurrently without any kind of external
114 * synchronization. Use atomic reference counter to be concurrency-safe.
115 */
116 refcount_t refcnt;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200117 bool deleted;
Jiri Pirko77b99002015-05-12 14:56:21 +0200118};
119
Paul Blakey05cd2712018-04-30 14:28:30 +0300120static const struct rhashtable_params mask_ht_params = {
121 .key_offset = offsetof(struct fl_flow_mask, key),
122 .key_len = sizeof(struct fl_flow_key),
123 .head_offset = offsetof(struct fl_flow_mask, ht_node),
124 .automatic_shrinking = true,
125};
126
Jiri Pirko77b99002015-05-12 14:56:21 +0200127static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
128{
129 return mask->range.end - mask->range.start;
130}
131
132static void fl_mask_update_range(struct fl_flow_mask *mask)
133{
134 const u8 *bytes = (const u8 *) &mask->key;
135 size_t size = sizeof(mask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300136 size_t i, first = 0, last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200137
Paul Blakey05cd2712018-04-30 14:28:30 +0300138 for (i = 0; i < size; i++) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200139 if (bytes[i]) {
Paul Blakey05cd2712018-04-30 14:28:30 +0300140 first = i;
141 break;
142 }
143 }
144 last = first;
145 for (i = size - 1; i != first; i--) {
146 if (bytes[i]) {
Jiri Pirko77b99002015-05-12 14:56:21 +0200147 last = i;
Paul Blakey05cd2712018-04-30 14:28:30 +0300148 break;
Jiri Pirko77b99002015-05-12 14:56:21 +0200149 }
150 }
151 mask->range.start = rounddown(first, sizeof(long));
152 mask->range.end = roundup(last + 1, sizeof(long));
153}
154
155static void *fl_key_get_start(struct fl_flow_key *key,
156 const struct fl_flow_mask *mask)
157{
158 return (u8 *) key + mask->range.start;
159}
160
161static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
162 struct fl_flow_mask *mask)
163{
164 const long *lkey = fl_key_get_start(key, mask);
165 const long *lmask = fl_key_get_start(&mask->key, mask);
166 long *lmkey = fl_key_get_start(mkey, mask);
167 int i;
168
169 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
170 *lmkey++ = *lkey++ & *lmask++;
171}
172
Jiri Pirkob95ec7e2018-07-23 09:23:10 +0200173static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
174 struct fl_flow_mask *mask)
175{
176 const long *lmask = fl_key_get_start(&mask->key, mask);
177 const long *ltmplt;
178 int i;
179
180 if (!tmplt)
181 return true;
182 ltmplt = fl_key_get_start(&tmplt->mask, mask);
183 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
184 if (~*ltmplt++ & *lmask++)
185 return false;
186 }
187 return true;
188}
189
Jiri Pirko77b99002015-05-12 14:56:21 +0200190static void fl_clear_masked_range(struct fl_flow_key *key,
191 struct fl_flow_mask *mask)
192{
193 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
194}
195
Amritha Nambiar5c722992018-11-12 16:15:55 -0800196static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
197 struct fl_flow_key *key,
198 struct fl_flow_key *mkey)
199{
200 __be16 min_mask, max_mask, min_val, max_val;
201
202 min_mask = htons(filter->mask->key.tp_min.dst);
203 max_mask = htons(filter->mask->key.tp_max.dst);
204 min_val = htons(filter->key.tp_min.dst);
205 max_val = htons(filter->key.tp_max.dst);
206
207 if (min_mask && max_mask) {
208 if (htons(key->tp.dst) < min_val ||
209 htons(key->tp.dst) > max_val)
210 return false;
211
212 /* skb does not have min and max values */
213 mkey->tp_min.dst = filter->mkey.tp_min.dst;
214 mkey->tp_max.dst = filter->mkey.tp_max.dst;
215 }
216 return true;
217}
218
219static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
220 struct fl_flow_key *key,
221 struct fl_flow_key *mkey)
222{
223 __be16 min_mask, max_mask, min_val, max_val;
224
225 min_mask = htons(filter->mask->key.tp_min.src);
226 max_mask = htons(filter->mask->key.tp_max.src);
227 min_val = htons(filter->key.tp_min.src);
228 max_val = htons(filter->key.tp_max.src);
229
230 if (min_mask && max_mask) {
231 if (htons(key->tp.src) < min_val ||
232 htons(key->tp.src) > max_val)
233 return false;
234
235 /* skb does not have min and max values */
236 mkey->tp_min.src = filter->mkey.tp_min.src;
237 mkey->tp_max.src = filter->mkey.tp_max.src;
238 }
239 return true;
240}
241
242static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
243 struct fl_flow_key *mkey)
Paul Blakeya3308d82017-01-16 10:45:13 +0200244{
Paul Blakey05cd2712018-04-30 14:28:30 +0300245 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
246 mask->filter_ht_params);
Paul Blakeya3308d82017-01-16 10:45:13 +0200247}
248
Amritha Nambiar5c722992018-11-12 16:15:55 -0800249static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
250 struct fl_flow_key *mkey,
251 struct fl_flow_key *key)
252{
253 struct cls_fl_filter *filter, *f;
254
255 list_for_each_entry_rcu(filter, &mask->filters, list) {
256 if (!fl_range_port_dst_cmp(filter, key, mkey))
257 continue;
258
259 if (!fl_range_port_src_cmp(filter, key, mkey))
260 continue;
261
262 f = __fl_lookup(mask, mkey);
263 if (f)
264 return f;
265 }
266 return NULL;
267}
268
269static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
270 struct fl_flow_key *mkey,
271 struct fl_flow_key *key)
272{
273 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
274 return fl_lookup_range(mask, mkey, key);
275
276 return __fl_lookup(mask, mkey);
277}
278
Jiri Pirko77b99002015-05-12 14:56:21 +0200279static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
280 struct tcf_result *res)
281{
282 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
283 struct cls_fl_filter *f;
Paul Blakey05cd2712018-04-30 14:28:30 +0300284 struct fl_flow_mask *mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200285 struct fl_flow_key skb_key;
286 struct fl_flow_key skb_mkey;
287
Paul Blakey05cd2712018-04-30 14:28:30 +0300288 list_for_each_entry_rcu(mask, &head->masks, list) {
289 fl_clear_masked_range(&skb_key, mask);
Amir Vadaie69985c2016-06-05 17:11:18 +0300290
Paul Blakey05cd2712018-04-30 14:28:30 +0300291 skb_key.indev_ifindex = skb->skb_iif;
292 /* skb_flow_dissect() does not set n_proto in case an unknown
293 * protocol, so do it rather here.
294 */
295 skb_key.basic.n_proto = skb->protocol;
296 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
297 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
Amir Vadaibc3103f2016-09-08 16:23:47 +0300298
Paul Blakey05cd2712018-04-30 14:28:30 +0300299 fl_set_masked_key(&skb_mkey, &skb_key, mask);
Jiri Pirko77b99002015-05-12 14:56:21 +0200300
Amritha Nambiar5c722992018-11-12 16:15:55 -0800301 f = fl_lookup(mask, &skb_mkey, &skb_key);
Paul Blakey05cd2712018-04-30 14:28:30 +0300302 if (f && !tc_skip_sw(f->flags)) {
303 *res = f->res;
304 return tcf_exts_exec(skb, &f->exts, res);
305 }
Jiri Pirko77b99002015-05-12 14:56:21 +0200306 }
307 return -1;
308}
309
310static int fl_init(struct tcf_proto *tp)
311{
312 struct cls_fl_head *head;
313
314 head = kzalloc(sizeof(*head), GFP_KERNEL);
315 if (!head)
316 return -ENOBUFS;
317
Vlad Buslov259e60f2019-03-21 15:17:39 +0200318 spin_lock_init(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300319 INIT_LIST_HEAD_RCU(&head->masks);
Vlad Buslovc049d562019-04-24 09:53:31 +0300320 INIT_LIST_HEAD(&head->hw_filters);
Jiri Pirko77b99002015-05-12 14:56:21 +0200321 rcu_assign_pointer(tp->root, head);
Chris Mic15ab232017-08-30 02:31:58 -0400322 idr_init(&head->handle_idr);
Jiri Pirko77b99002015-05-12 14:56:21 +0200323
Paul Blakey05cd2712018-04-30 14:28:30 +0300324 return rhashtable_init(&head->ht, &mask_ht_params);
325}
326
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200327static void fl_mask_free(struct fl_flow_mask *mask)
328{
Vlad Buslovf48ef4d2019-03-21 15:17:37 +0200329 WARN_ON(!list_empty(&mask->filters));
Paolo Abeni44a5cd42018-06-21 20:02:16 +0200330 rhashtable_destroy(&mask->ht);
331 kfree(mask);
332}
333
334static void fl_mask_free_work(struct work_struct *work)
335{
336 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
337 struct fl_flow_mask, rwork);
338
339 fl_mask_free(mask);
340}
341
Vlad Buslov99946772019-04-12 00:54:19 +0300342static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
Paul Blakey05cd2712018-04-30 14:28:30 +0300343{
Vlad Buslovf48ef4d2019-03-21 15:17:37 +0200344 if (!refcount_dec_and_test(&mask->refcnt))
Paul Blakey05cd2712018-04-30 14:28:30 +0300345 return false;
346
347 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200348
349 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +0300350 list_del_rcu(&mask->list);
Vlad Buslov259e60f2019-03-21 15:17:39 +0200351 spin_unlock(&head->masks_lock);
352
Vlad Buslov99946772019-04-12 00:54:19 +0300353 tcf_queue_work(&mask->rwork, fl_mask_free_work);
Paul Blakey05cd2712018-04-30 14:28:30 +0300354
355 return true;
Jiri Pirko77b99002015-05-12 14:56:21 +0200356}
357
Vlad Buslovc049d562019-04-24 09:53:31 +0300358static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
359{
360 /* Flower classifier only changes root pointer during init and destroy.
361 * Users must obtain reference to tcf_proto instance before calling its
362 * API, so tp->root pointer is protected from concurrent call to
363 * fl_destroy() by reference counting.
364 */
365 return rcu_dereference_raw(tp->root);
366}
367
Cong Wang0dadc112017-11-06 13:47:24 -0800368static void __fl_destroy_filter(struct cls_fl_filter *f)
369{
370 tcf_exts_destroy(&f->exts);
371 tcf_exts_put_net(&f->exts);
372 kfree(f);
373}
374
Cong Wang0552c8a2017-10-26 18:24:33 -0700375static void fl_destroy_filter_work(struct work_struct *work)
376{
Cong Wangaaa908f2018-05-23 15:26:53 -0700377 struct cls_fl_filter *f = container_of(to_rcu_work(work),
378 struct cls_fl_filter, rwork);
Cong Wang0552c8a2017-10-26 18:24:33 -0700379
Cong Wang0dadc112017-11-06 13:47:24 -0800380 __fl_destroy_filter(f);
Cong Wang0552c8a2017-10-26 18:24:33 -0700381}
382
Jakub Kicinski1b0f8032018-01-24 12:54:21 -0800383static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200384 bool rtnl_held, struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200385{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200386 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200387 struct tcf_block *block = tp->chain->block;
Amir Vadai5b33f482016-03-08 12:42:29 +0200388
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200389 if (!rtnl_held)
390 rtnl_lock();
391
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700392 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200393 cls_flower.command = TC_CLSFLOWER_DESTROY;
394 cls_flower.cookie = (unsigned long) f;
Amir Vadai5b33f482016-03-08 12:42:29 +0200395
Cong Wangaeb3fec2018-12-11 11:15:46 -0800396 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200397 spin_lock(&tp->lock);
Vlad Buslovc049d562019-04-24 09:53:31 +0300398 list_del_init(&f->hw_list);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100399 tcf_block_offload_dec(block, &f->flags);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200400 spin_unlock(&tp->lock);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200401
402 if (!rtnl_held)
403 rtnl_unlock();
Amir Vadai5b33f482016-03-08 12:42:29 +0200404}
405
Amir Vadaie8eb36c2016-06-13 12:06:39 +0300406static int fl_hw_replace_filter(struct tcf_proto *tp,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200407 struct cls_fl_filter *f, bool rtnl_held,
Quentin Monnet41002032018-01-19 17:44:43 -0800408 struct netlink_ext_ack *extack)
Amir Vadai5b33f482016-03-08 12:42:29 +0200409{
Vlad Buslovc049d562019-04-24 09:53:31 +0300410 struct cls_fl_head *head = fl_head_dereference(tp);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200411 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200412 struct tcf_block *block = tp->chain->block;
Jiri Pirko717503b2017-10-11 09:41:09 +0200413 bool skip_sw = tc_skip_sw(f->flags);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200414 int err = 0;
415
416 if (!rtnl_held)
417 rtnl_lock();
Amir Vadai5b33f482016-03-08 12:42:29 +0200418
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100419 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200420 if (!cls_flower.rule) {
421 err = -ENOMEM;
422 goto errout;
423 }
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100424
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700425 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200426 cls_flower.command = TC_CLSFLOWER_REPLACE;
427 cls_flower.cookie = (unsigned long) f;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100428 cls_flower.rule->match.dissector = &f->mask->dissector;
429 cls_flower.rule->match.mask = &f->mask->key;
430 cls_flower.rule->match.key = &f->mkey;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700431 cls_flower.classid = f->res.classid;
Amir Vadai5b33f482016-03-08 12:42:29 +0200432
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100433 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
434 if (err) {
435 kfree(cls_flower.rule);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200436 if (skip_sw)
Vlad Buslov1f15bb42019-02-12 23:39:06 +0200437 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200438 else
439 err = 0;
440 goto errout;
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100441 }
442
Cong Wangaeb3fec2018-12-11 11:15:46 -0800443 err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +0100444 kfree(cls_flower.rule);
445
Jiri Pirko717503b2017-10-11 09:41:09 +0200446 if (err < 0) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200447 fl_hw_destroy_filter(tp, f, true, NULL);
448 goto errout;
Jiri Pirko717503b2017-10-11 09:41:09 +0200449 } else if (err > 0) {
John Hurley31533cb2018-06-25 14:30:06 -0700450 f->in_hw_count = err;
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200451 err = 0;
Vlad Buslov3d81e712019-03-21 15:17:42 +0200452 spin_lock(&tp->lock);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100453 tcf_block_offload_inc(block, &f->flags);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200454 spin_unlock(&tp->lock);
Jiri Pirko717503b2017-10-11 09:41:09 +0200455 }
456
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200457 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) {
458 err = -EINVAL;
459 goto errout;
460 }
Jiri Pirko717503b2017-10-11 09:41:09 +0200461
Vlad Buslovc049d562019-04-24 09:53:31 +0300462 spin_lock(&tp->lock);
463 list_add(&f->hw_list, &head->hw_filters);
464 spin_unlock(&tp->lock);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200465errout:
466 if (!rtnl_held)
467 rtnl_unlock();
468
469 return err;
Amir Vadai5b33f482016-03-08 12:42:29 +0200470}
471
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200472static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
473 bool rtnl_held)
Amir Vadai10cbc682016-05-13 12:55:37 +0000474{
Jiri Pirkode4784c2017-08-07 10:15:32 +0200475 struct tc_cls_flower_offload cls_flower = {};
Jiri Pirko208c0f42017-10-19 15:50:32 +0200476 struct tcf_block *block = tp->chain->block;
Amir Vadai10cbc682016-05-13 12:55:37 +0000477
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200478 if (!rtnl_held)
479 rtnl_lock();
480
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700481 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200482 cls_flower.command = TC_CLSFLOWER_STATS;
483 cls_flower.cookie = (unsigned long) f;
Amritha Nambiar384c1812017-10-27 02:35:34 -0700484 cls_flower.classid = f->res.classid;
Amir Vadai10cbc682016-05-13 12:55:37 +0000485
Cong Wangaeb3fec2018-12-11 11:15:46 -0800486 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Pablo Neira Ayuso3b1903e2019-02-02 12:50:47 +0100487
488 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
489 cls_flower.stats.pkts,
490 cls_flower.stats.lastused);
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200491
492 if (!rtnl_held)
493 rtnl_unlock();
Amir Vadai10cbc682016-05-13 12:55:37 +0000494}
495
Vlad Buslov06177552019-03-21 15:17:35 +0200496static void __fl_put(struct cls_fl_filter *f)
497{
498 if (!refcount_dec_and_test(&f->refcnt))
499 return;
500
501 if (tcf_exts_get_net(&f->exts))
502 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
503 else
504 __fl_destroy_filter(f);
505}
506
507static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
508{
509 struct cls_fl_filter *f;
510
511 rcu_read_lock();
512 f = idr_find(&head->handle_idr, handle);
513 if (f && !refcount_inc_not_zero(&f->refcnt))
514 f = NULL;
515 rcu_read_unlock();
516
517 return f;
518}
519
520static struct cls_fl_filter *fl_get_next_filter(struct tcf_proto *tp,
521 unsigned long *handle)
522{
523 struct cls_fl_head *head = fl_head_dereference(tp);
524 struct cls_fl_filter *f;
525
526 rcu_read_lock();
527 while ((f = idr_get_next_ul(&head->handle_idr, handle))) {
528 /* don't return filters that are being deleted */
529 if (refcount_inc_not_zero(&f->refcnt))
530 break;
531 ++(*handle);
532 }
533 rcu_read_unlock();
534
535 return f;
536}
537
Vlad Buslovb2552b82019-03-21 15:17:36 +0200538static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200539 bool *last, bool rtnl_held,
540 struct netlink_ext_ack *extack)
Roi Dayan13fa8762016-11-01 16:08:29 +0200541{
Vlad Buslove4746192019-03-21 15:17:33 +0200542 struct cls_fl_head *head = fl_head_dereference(tp);
Chris Mic15ab232017-08-30 02:31:58 -0400543
Vlad Buslovb2552b82019-03-21 15:17:36 +0200544 *last = false;
545
Vlad Buslov3d81e712019-03-21 15:17:42 +0200546 spin_lock(&tp->lock);
547 if (f->deleted) {
548 spin_unlock(&tp->lock);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200549 return -ENOENT;
Vlad Buslov3d81e712019-03-21 15:17:42 +0200550 }
Vlad Buslovb2552b82019-03-21 15:17:36 +0200551
552 f->deleted = true;
553 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
554 f->mask->filter_ht_params);
Matthew Wilcox9c160942017-11-28 09:48:43 -0500555 idr_remove(&head->handle_idr, f->handle);
Roi Dayan13fa8762016-11-01 16:08:29 +0200556 list_del_rcu(&f->list);
Vlad Buslov3d81e712019-03-21 15:17:42 +0200557 spin_unlock(&tp->lock);
558
Vlad Buslov99946772019-04-12 00:54:19 +0300559 *last = fl_mask_put(head, f->mask);
Hadar Hen Zion79685212016-12-01 14:06:34 +0200560 if (!tc_skip_hw(f->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200561 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
Roi Dayan13fa8762016-11-01 16:08:29 +0200562 tcf_unbind_filter(tp, &f->res);
Vlad Buslov06177552019-03-21 15:17:35 +0200563 __fl_put(f);
Paul Blakey05cd2712018-04-30 14:28:30 +0300564
Vlad Buslovb2552b82019-03-21 15:17:36 +0200565 return 0;
Roi Dayan13fa8762016-11-01 16:08:29 +0200566}
567
Daniel Borkmannd9363772016-11-27 01:18:01 +0100568static void fl_destroy_sleepable(struct work_struct *work)
569{
Cong Wangaaa908f2018-05-23 15:26:53 -0700570 struct cls_fl_head *head = container_of(to_rcu_work(work),
571 struct cls_fl_head,
572 rwork);
Paul Blakeyde9dc652018-06-03 10:06:13 +0300573
574 rhashtable_destroy(&head->ht);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100575 kfree(head);
576 module_put(THIS_MODULE);
577}
578
Vlad Buslov12db03b2019-02-11 10:55:45 +0200579static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
580 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +0200581{
Vlad Buslove4746192019-03-21 15:17:33 +0200582 struct cls_fl_head *head = fl_head_dereference(tp);
Paul Blakey05cd2712018-04-30 14:28:30 +0300583 struct fl_flow_mask *mask, *next_mask;
Jiri Pirko77b99002015-05-12 14:56:21 +0200584 struct cls_fl_filter *f, *next;
Vlad Buslovb2552b82019-03-21 15:17:36 +0200585 bool last;
Jiri Pirko77b99002015-05-12 14:56:21 +0200586
Paul Blakey05cd2712018-04-30 14:28:30 +0300587 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
588 list_for_each_entry_safe(f, next, &mask->filters, list) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +0200589 __fl_delete(tp, f, &last, rtnl_held, extack);
Vlad Buslovb2552b82019-03-21 15:17:36 +0200590 if (last)
Paul Blakey05cd2712018-04-30 14:28:30 +0300591 break;
592 }
593 }
Chris Mic15ab232017-08-30 02:31:58 -0400594 idr_destroy(&head->handle_idr);
Daniel Borkmannd9363772016-11-27 01:18:01 +0100595
596 __module_get(THIS_MODULE);
Cong Wangaaa908f2018-05-23 15:26:53 -0700597 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
Jiri Pirko77b99002015-05-12 14:56:21 +0200598}
599
Vlad Buslov06177552019-03-21 15:17:35 +0200600static void fl_put(struct tcf_proto *tp, void *arg)
601{
602 struct cls_fl_filter *f = arg;
603
604 __fl_put(f);
605}
606
WANG Cong8113c092017-08-04 21:31:43 -0700607static void *fl_get(struct tcf_proto *tp, u32 handle)
Jiri Pirko77b99002015-05-12 14:56:21 +0200608{
Vlad Buslove4746192019-03-21 15:17:33 +0200609 struct cls_fl_head *head = fl_head_dereference(tp);
Jiri Pirko77b99002015-05-12 14:56:21 +0200610
Vlad Buslov06177552019-03-21 15:17:35 +0200611 return __fl_get(head, handle);
Jiri Pirko77b99002015-05-12 14:56:21 +0200612}
613
614static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
615 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
616 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
617 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
618 .len = IFNAMSIZ },
619 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
620 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
621 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
622 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
623 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
624 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
625 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
626 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
627 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
628 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
629 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
630 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
631 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
632 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
633 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
634 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
Jamal Hadi Salimb175c3a2015-06-25 06:55:27 -0400635 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
636 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300637 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
638 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
639 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
Amir Vadaibc3103f2016-09-08 16:23:47 +0300640 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
641 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
642 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
643 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
644 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
645 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
646 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
647 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
648 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
Or Gerlitzaa72d702016-09-15 15:28:22 +0300649 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
650 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
651 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
652 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
Simon Horman5976c5f2016-11-03 13:24:21 +0100653 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
654 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
655 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
656 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +0200657 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
658 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
659 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
660 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200661 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
662 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
Simon Horman7b684882016-12-07 13:48:28 +0100663 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
664 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
665 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
666 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
667 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
668 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
669 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
670 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
Simon Horman99d31322017-01-11 14:05:43 +0100671 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
672 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
673 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
674 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
675 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
676 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
677 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
678 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
679 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
680 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400681 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
682 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
683 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
684 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +0200685 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
686 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300687 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
688 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
689 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
690 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
Jianbo Liud64efd02018-07-06 05:38:16 +0000691 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
692 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
693 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300694 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
695 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
696 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
697 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200698 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
699 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
700};
701
702static const struct nla_policy
703enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
704 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
705};
706
707static const struct nla_policy
708geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
709 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
710 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
711 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
712 .len = 128 },
Jiri Pirko77b99002015-05-12 14:56:21 +0200713};
714
715static void fl_set_key_val(struct nlattr **tb,
716 void *val, int val_type,
717 void *mask, int mask_type, int len)
718{
719 if (!tb[val_type])
720 return;
721 memcpy(val, nla_data(tb[val_type]), len);
722 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
723 memset(mask, 0xff, len);
724 else
725 memcpy(mask, nla_data(tb[mask_type]), len);
726}
727
Amritha Nambiar5c722992018-11-12 16:15:55 -0800728static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
729 struct fl_flow_key *mask)
730{
731 fl_set_key_val(tb, &key->tp_min.dst,
732 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
733 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
734 fl_set_key_val(tb, &key->tp_max.dst,
735 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
736 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
737 fl_set_key_val(tb, &key->tp_min.src,
738 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
739 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
740 fl_set_key_val(tb, &key->tp_max.src,
741 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
742 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
743
744 if ((mask->tp_min.dst && mask->tp_max.dst &&
745 htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
746 (mask->tp_min.src && mask->tp_max.src &&
747 htons(key->tp_max.src) <= htons(key->tp_min.src)))
748 return -EINVAL;
749
750 return 0;
751}
752
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400753static int fl_set_key_mpls(struct nlattr **tb,
754 struct flow_dissector_key_mpls *key_val,
755 struct flow_dissector_key_mpls *key_mask)
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400756{
757 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
758 key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
759 key_mask->mpls_ttl = MPLS_TTL_MASK;
760 }
761 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400762 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
763
764 if (bos & ~MPLS_BOS_MASK)
765 return -EINVAL;
766 key_val->mpls_bos = bos;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400767 key_mask->mpls_bos = MPLS_BOS_MASK;
768 }
769 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400770 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
771
772 if (tc & ~MPLS_TC_MASK)
773 return -EINVAL;
774 key_val->mpls_tc = tc;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400775 key_mask->mpls_tc = MPLS_TC_MASK;
776 }
777 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400778 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
779
780 if (label & ~MPLS_LABEL_MASK)
781 return -EINVAL;
782 key_val->mpls_label = label;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400783 key_mask->mpls_label = MPLS_LABEL_MASK;
784 }
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -0400785 return 0;
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -0400786}
787
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300788static void fl_set_key_vlan(struct nlattr **tb,
Jianbo Liuaaab0832018-07-06 05:38:13 +0000789 __be16 ethertype,
Jianbo Liud64efd02018-07-06 05:38:16 +0000790 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300791 struct flow_dissector_key_vlan *key_val,
792 struct flow_dissector_key_vlan *key_mask)
793{
794#define VLAN_PRIORITY_MASK 0x7
795
Jianbo Liud64efd02018-07-06 05:38:16 +0000796 if (tb[vlan_id_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300797 key_val->vlan_id =
Jianbo Liud64efd02018-07-06 05:38:16 +0000798 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300799 key_mask->vlan_id = VLAN_VID_MASK;
800 }
Jianbo Liud64efd02018-07-06 05:38:16 +0000801 if (tb[vlan_prio_key]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300802 key_val->vlan_priority =
Jianbo Liud64efd02018-07-06 05:38:16 +0000803 nla_get_u8(tb[vlan_prio_key]) &
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300804 VLAN_PRIORITY_MASK;
805 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
806 }
Jianbo Liuaaab0832018-07-06 05:38:13 +0000807 key_val->vlan_tpid = ethertype;
808 key_mask->vlan_tpid = cpu_to_be16(~0);
Hadar Hen Zion9399ae92016-08-17 13:36:13 +0300809}
810
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200811static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
812 u32 *dissector_key, u32 *dissector_mask,
813 u32 flower_flag_bit, u32 dissector_flag_bit)
814{
815 if (flower_mask & flower_flag_bit) {
816 *dissector_mask |= dissector_flag_bit;
817 if (flower_key & flower_flag_bit)
818 *dissector_key |= dissector_flag_bit;
819 }
820}
821
Or Gerlitzd9724772016-12-22 14:28:15 +0200822static int fl_set_key_flags(struct nlattr **tb,
823 u32 *flags_key, u32 *flags_mask)
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200824{
825 u32 key, mask;
826
Or Gerlitzd9724772016-12-22 14:28:15 +0200827 /* mask is mandatory for flags */
828 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
829 return -EINVAL;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200830
831 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
Or Gerlitzd9724772016-12-22 14:28:15 +0200832 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200833
834 *flags_key = 0;
835 *flags_mask = 0;
836
837 fl_set_key_flag(key, mask, flags_key, flags_mask,
838 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +0100839 fl_set_key_flag(key, mask, flags_key, flags_mask,
840 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
841 FLOW_DIS_FIRST_FRAG);
Or Gerlitzd9724772016-12-22 14:28:15 +0200842
843 return 0;
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +0200844}
845
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300846static void fl_set_key_ip(struct nlattr **tb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300847 struct flow_dissector_key_ip *key,
848 struct flow_dissector_key_ip *mask)
849{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300850 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
851 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
852 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
853 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300854
Or Gerlitz0e2c17b2018-07-17 19:27:18 +0300855 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
856 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
Or Gerlitz4d80cc02017-06-01 21:37:38 +0300857}
858
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200859static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
860 int depth, int option_len,
861 struct netlink_ext_ack *extack)
862{
863 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
864 struct nlattr *class = NULL, *type = NULL, *data = NULL;
865 struct geneve_opt *opt;
866 int err, data_len = 0;
867
868 if (option_len > sizeof(struct geneve_opt))
869 data_len = option_len - sizeof(struct geneve_opt);
870
871 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
872 memset(opt, 0xff, option_len);
873 opt->length = data_len / 4;
874 opt->r1 = 0;
875 opt->r2 = 0;
876 opt->r3 = 0;
877
878 /* If no mask has been prodived we assume an exact match. */
879 if (!depth)
880 return sizeof(struct geneve_opt) + data_len;
881
882 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
883 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
884 return -EINVAL;
885 }
886
Johannes Berg8cb08172019-04-26 14:07:28 +0200887 err = nla_parse_nested_deprecated(tb,
888 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
889 nla, geneve_opt_policy, extack);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200890 if (err < 0)
891 return err;
892
893 /* We are not allowed to omit any of CLASS, TYPE or DATA
894 * fields from the key.
895 */
896 if (!option_len &&
897 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
898 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
899 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
900 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
901 return -EINVAL;
902 }
903
904 /* Omitting any of CLASS, TYPE or DATA fields is allowed
905 * for the mask.
906 */
907 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
908 int new_len = key->enc_opts.len;
909
910 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
911 data_len = nla_len(data);
912 if (data_len < 4) {
913 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
914 return -ERANGE;
915 }
916 if (data_len % 4) {
917 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
918 return -ERANGE;
919 }
920
921 new_len += sizeof(struct geneve_opt) + data_len;
922 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
923 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
924 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
925 return -ERANGE;
926 }
927 opt->length = data_len / 4;
928 memcpy(opt->opt_data, nla_data(data), data_len);
929 }
930
931 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
932 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
933 opt->opt_class = nla_get_be16(class);
934 }
935
936 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
937 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
938 opt->type = nla_get_u8(type);
939 }
940
941 return sizeof(struct geneve_opt) + data_len;
942}
943
944static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
945 struct fl_flow_key *mask,
946 struct netlink_ext_ack *extack)
947{
948 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
Jakub Kicinski63c82992018-11-09 21:06:26 -0800949 int err, option_len, key_depth, msk_depth = 0;
950
Johannes Berg8cb08172019-04-26 14:07:28 +0200951 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
952 TCA_FLOWER_KEY_ENC_OPTS_MAX,
953 enc_opts_policy, extack);
Jakub Kicinski63c82992018-11-09 21:06:26 -0800954 if (err)
955 return err;
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200956
957 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
958
959 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
Johannes Berg8cb08172019-04-26 14:07:28 +0200960 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
961 TCA_FLOWER_KEY_ENC_OPTS_MAX,
962 enc_opts_policy, extack);
Jakub Kicinski63c82992018-11-09 21:06:26 -0800963 if (err)
964 return err;
965
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +0200966 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
967 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
968 }
969
970 nla_for_each_attr(nla_opt_key, nla_enc_key,
971 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
972 switch (nla_type(nla_opt_key)) {
973 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
974 option_len = 0;
975 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
976 option_len = fl_set_geneve_opt(nla_opt_key, key,
977 key_depth, option_len,
978 extack);
979 if (option_len < 0)
980 return option_len;
981
982 key->enc_opts.len += option_len;
983 /* At the same time we need to parse through the mask
984 * in order to verify exact and mask attribute lengths.
985 */
986 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
987 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
988 msk_depth, option_len,
989 extack);
990 if (option_len < 0)
991 return option_len;
992
993 mask->enc_opts.len += option_len;
994 if (key->enc_opts.len != mask->enc_opts.len) {
995 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
996 return -EINVAL;
997 }
998
999 if (msk_depth)
1000 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1001 break;
1002 default:
1003 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1004 return -EINVAL;
1005 }
1006 }
1007
1008 return 0;
1009}
1010
Jiri Pirko77b99002015-05-12 14:56:21 +02001011static int fl_set_key(struct net *net, struct nlattr **tb,
Alexander Aring1057c552018-01-18 11:20:54 -05001012 struct fl_flow_key *key, struct fl_flow_key *mask,
1013 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001014{
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001015 __be16 ethertype;
Or Gerlitzd9724772016-12-22 14:28:15 +02001016 int ret = 0;
Brian Haleydd3aa3b2015-05-14 13:20:15 -04001017#ifdef CONFIG_NET_CLS_IND
Jiri Pirko77b99002015-05-12 14:56:21 +02001018 if (tb[TCA_FLOWER_INDEV]) {
Alexander Aring1057c552018-01-18 11:20:54 -05001019 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001020 if (err < 0)
1021 return err;
1022 key->indev_ifindex = err;
1023 mask->indev_ifindex = 0xffffffff;
1024 }
Brian Haleydd3aa3b2015-05-14 13:20:15 -04001025#endif
Jiri Pirko77b99002015-05-12 14:56:21 +02001026
1027 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1028 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1029 sizeof(key->eth.dst));
1030 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1031 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1032 sizeof(key->eth.src));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001033
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001034 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001035 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1036
Jianbo Liuaaab0832018-07-06 05:38:13 +00001037 if (eth_type_vlan(ethertype)) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001038 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1039 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1040 &mask->vlan);
1041
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00001042 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1043 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1044 if (eth_type_vlan(ethertype)) {
1045 fl_set_key_vlan(tb, ethertype,
1046 TCA_FLOWER_KEY_CVLAN_ID,
1047 TCA_FLOWER_KEY_CVLAN_PRIO,
1048 &key->cvlan, &mask->cvlan);
1049 fl_set_key_val(tb, &key->basic.n_proto,
1050 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1051 &mask->basic.n_proto,
1052 TCA_FLOWER_UNSPEC,
1053 sizeof(key->basic.n_proto));
1054 } else {
1055 key->basic.n_proto = ethertype;
1056 mask->basic.n_proto = cpu_to_be16(~0);
1057 }
Jianbo Liud64efd02018-07-06 05:38:16 +00001058 }
Arnd Bergmann0b498a52016-08-26 17:25:45 +02001059 } else {
1060 key->basic.n_proto = ethertype;
1061 mask->basic.n_proto = cpu_to_be16(~0);
1062 }
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001063 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001064
Jiri Pirko77b99002015-05-12 14:56:21 +02001065 if (key->basic.n_proto == htons(ETH_P_IP) ||
1066 key->basic.n_proto == htons(ETH_P_IPV6)) {
1067 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1068 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1069 sizeof(key->basic.ip_proto));
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001070 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
Jiri Pirko77b99002015-05-12 14:56:21 +02001071 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001072
1073 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1074 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001075 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001076 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1077 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1078 sizeof(key->ipv4.src));
1079 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1080 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1081 sizeof(key->ipv4.dst));
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001082 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1083 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001084 mask->control.addr_type = ~0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001085 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1086 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1087 sizeof(key->ipv6.src));
1088 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1089 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1090 sizeof(key->ipv6.dst));
1091 }
Jamal Hadi Salim66530bd2016-01-10 11:47:01 -05001092
Jiri Pirko77b99002015-05-12 14:56:21 +02001093 if (key->basic.ip_proto == IPPROTO_TCP) {
1094 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001095 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001096 sizeof(key->tp.src));
1097 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001098 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001099 sizeof(key->tp.dst));
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001100 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1101 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1102 sizeof(key->tcp.flags));
Jiri Pirko77b99002015-05-12 14:56:21 +02001103 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1104 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001105 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001106 sizeof(key->tp.src));
1107 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03001108 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02001109 sizeof(key->tp.dst));
Simon Horman5976c5f2016-11-03 13:24:21 +01001110 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1111 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1112 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1113 sizeof(key->tp.src));
1114 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1115 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1116 sizeof(key->tp.dst));
Simon Horman7b684882016-12-07 13:48:28 +01001117 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1118 key->basic.ip_proto == IPPROTO_ICMP) {
1119 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1120 &mask->icmp.type,
1121 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1122 sizeof(key->icmp.type));
1123 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1124 &mask->icmp.code,
1125 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1126 sizeof(key->icmp.code));
1127 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1128 key->basic.ip_proto == IPPROTO_ICMPV6) {
1129 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1130 &mask->icmp.type,
1131 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1132 sizeof(key->icmp.type));
Simon Horman040587a2017-01-30 16:19:02 +01001133 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
Simon Horman7b684882016-12-07 13:48:28 +01001134 &mask->icmp.code,
Simon Horman040587a2017-01-30 16:19:02 +01001135 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
Simon Horman7b684882016-12-07 13:48:28 +01001136 sizeof(key->icmp.code));
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001137 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1138 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
Benjamin LaHaise1a7fca62017-05-01 09:58:40 -04001139 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1140 if (ret)
1141 return ret;
Simon Horman99d31322017-01-11 14:05:43 +01001142 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1143 key->basic.n_proto == htons(ETH_P_RARP)) {
1144 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1145 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1146 sizeof(key->arp.sip));
1147 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1148 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1149 sizeof(key->arp.tip));
1150 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1151 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1152 sizeof(key->arp.op));
1153 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1154 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1155 sizeof(key->arp.sha));
1156 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1157 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1158 sizeof(key->arp.tha));
Jiri Pirko77b99002015-05-12 14:56:21 +02001159 }
1160
Amritha Nambiar5c722992018-11-12 16:15:55 -08001161 if (key->basic.ip_proto == IPPROTO_TCP ||
1162 key->basic.ip_proto == IPPROTO_UDP ||
1163 key->basic.ip_proto == IPPROTO_SCTP) {
1164 ret = fl_set_key_port_range(tb, key, mask);
1165 if (ret)
1166 return ret;
1167 }
1168
Amir Vadaibc3103f2016-09-08 16:23:47 +03001169 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1170 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1171 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001172 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001173 fl_set_key_val(tb, &key->enc_ipv4.src,
1174 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1175 &mask->enc_ipv4.src,
1176 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1177 sizeof(key->enc_ipv4.src));
1178 fl_set_key_val(tb, &key->enc_ipv4.dst,
1179 TCA_FLOWER_KEY_ENC_IPV4_DST,
1180 &mask->enc_ipv4.dst,
1181 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1182 sizeof(key->enc_ipv4.dst));
1183 }
1184
1185 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1186 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1187 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Paul Blakey970bfcd2016-12-14 19:00:57 +02001188 mask->enc_control.addr_type = ~0;
Amir Vadaibc3103f2016-09-08 16:23:47 +03001189 fl_set_key_val(tb, &key->enc_ipv6.src,
1190 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1191 &mask->enc_ipv6.src,
1192 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1193 sizeof(key->enc_ipv6.src));
1194 fl_set_key_val(tb, &key->enc_ipv6.dst,
1195 TCA_FLOWER_KEY_ENC_IPV6_DST,
1196 &mask->enc_ipv6.dst,
1197 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1198 sizeof(key->enc_ipv6.dst));
1199 }
1200
1201 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03001202 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
Amir Vadaibc3103f2016-09-08 16:23:47 +03001203 sizeof(key->enc_key_id.keyid));
1204
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001205 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1206 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1207 sizeof(key->enc_tp.src));
1208
1209 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1210 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1211 sizeof(key->enc_tp.dst));
1212
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001213 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1214
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001215 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1216 ret = fl_set_enc_opt(tb, key, mask, extack);
1217 if (ret)
1218 return ret;
1219 }
1220
Or Gerlitzd9724772016-12-22 14:28:15 +02001221 if (tb[TCA_FLOWER_KEY_FLAGS])
1222 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02001223
Or Gerlitzd9724772016-12-22 14:28:15 +02001224 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001225}
1226
Paul Blakey05cd2712018-04-30 14:28:30 +03001227static void fl_mask_copy(struct fl_flow_mask *dst,
1228 struct fl_flow_mask *src)
Jiri Pirko77b99002015-05-12 14:56:21 +02001229{
Paul Blakey05cd2712018-04-30 14:28:30 +03001230 const void *psrc = fl_key_get_start(&src->key, src);
1231 void *pdst = fl_key_get_start(&dst->key, src);
Jiri Pirko77b99002015-05-12 14:56:21 +02001232
Paul Blakey05cd2712018-04-30 14:28:30 +03001233 memcpy(pdst, psrc, fl_mask_range(src));
1234 dst->range = src->range;
Jiri Pirko77b99002015-05-12 14:56:21 +02001235}
1236
1237static const struct rhashtable_params fl_ht_params = {
1238 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1239 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1240 .automatic_shrinking = true,
1241};
1242
Paul Blakey05cd2712018-04-30 14:28:30 +03001243static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001244{
Paul Blakey05cd2712018-04-30 14:28:30 +03001245 mask->filter_ht_params = fl_ht_params;
1246 mask->filter_ht_params.key_len = fl_mask_range(mask);
1247 mask->filter_ht_params.key_offset += mask->range.start;
Jiri Pirko77b99002015-05-12 14:56:21 +02001248
Paul Blakey05cd2712018-04-30 14:28:30 +03001249 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
Jiri Pirko77b99002015-05-12 14:56:21 +02001250}
1251
1252#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
zhong jiangcb205a82018-09-19 19:32:11 +08001253#define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
Jiri Pirko77b99002015-05-12 14:56:21 +02001254
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001255#define FL_KEY_IS_MASKED(mask, member) \
1256 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1257 0, FL_KEY_MEMBER_SIZE(member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001258
1259#define FL_KEY_SET(keys, cnt, id, member) \
1260 do { \
1261 keys[cnt].key_id = id; \
1262 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1263 cnt++; \
1264 } while(0);
1265
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001266#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001267 do { \
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001268 if (FL_KEY_IS_MASKED(mask, member)) \
Jiri Pirko77b99002015-05-12 14:56:21 +02001269 FL_KEY_SET(keys, cnt, id, member); \
1270 } while(0);
1271
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001272static void fl_init_dissector(struct flow_dissector *dissector,
1273 struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02001274{
1275 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1276 size_t cnt = 0;
1277
Tom Herbert42aecaa2015-06-04 09:16:39 -07001278 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
Jiri Pirko77b99002015-05-12 14:56:21 +02001279 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001280 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001281 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001282 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001283 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001284 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion339ba872016-08-17 13:36:12 +03001285 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
Amritha Nambiar5c722992018-11-12 16:15:55 -08001286 if (FL_KEY_IS_MASKED(mask, tp) ||
1287 FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
1288 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001289 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001290 FLOW_DISSECTOR_KEY_IP, ip);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001291 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02001292 FLOW_DISSECTOR_KEY_TCP, tcp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001293 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman7b684882016-12-07 13:48:28 +01001294 FLOW_DISSECTOR_KEY_ICMP, icmp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001295 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Simon Horman99d31322017-01-11 14:05:43 +01001296 FLOW_DISSECTOR_KEY_ARP, arp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001297 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001298 FLOW_DISSECTOR_KEY_MPLS, mpls);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001299 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001300 FLOW_DISSECTOR_KEY_VLAN, vlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001301 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Jianbo Liud64efd02018-07-06 05:38:16 +00001302 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001303 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001304 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001305 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001306 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001307 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001308 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001309 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1310 FL_KEY_IS_MASKED(mask, enc_ipv6))
Hadar Hen Zion519d1052016-11-07 15:14:38 +02001311 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1312 enc_control);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001313 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02001314 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001315 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001316 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02001317 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1318 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
Jiri Pirko77b99002015-05-12 14:56:21 +02001319
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001320 skb_flow_dissector_init(dissector, keys, cnt);
Paul Blakey05cd2712018-04-30 14:28:30 +03001321}
1322
1323static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1324 struct fl_flow_mask *mask)
1325{
1326 struct fl_flow_mask *newmask;
1327 int err;
1328
1329 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1330 if (!newmask)
1331 return ERR_PTR(-ENOMEM);
1332
1333 fl_mask_copy(newmask, mask);
1334
Amritha Nambiar5c722992018-11-12 16:15:55 -08001335 if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
1336 (newmask->key.tp_min.src && newmask->key.tp_max.src))
1337 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1338
Paul Blakey05cd2712018-04-30 14:28:30 +03001339 err = fl_init_mask_hashtable(newmask);
1340 if (err)
1341 goto errout_free;
1342
Jiri Pirko33fb5cb2018-07-23 09:23:09 +02001343 fl_init_dissector(&newmask->dissector, &newmask->key);
Paul Blakey05cd2712018-04-30 14:28:30 +03001344
1345 INIT_LIST_HEAD_RCU(&newmask->filters);
1346
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001347 refcount_set(&newmask->refcnt, 1);
Vlad Buslov195c2342019-03-21 15:17:38 +02001348 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1349 &newmask->ht_node, mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001350 if (err)
1351 goto errout_destroy;
1352
Vlad Buslov195c2342019-03-21 15:17:38 +02001353 /* Wait until any potential concurrent users of mask are finished */
1354 synchronize_rcu();
1355
Vlad Buslov259e60f2019-03-21 15:17:39 +02001356 spin_lock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001357 list_add_tail_rcu(&newmask->list, &head->masks);
Vlad Buslov259e60f2019-03-21 15:17:39 +02001358 spin_unlock(&head->masks_lock);
Paul Blakey05cd2712018-04-30 14:28:30 +03001359
1360 return newmask;
1361
1362errout_destroy:
1363 rhashtable_destroy(&newmask->ht);
1364errout_free:
1365 kfree(newmask);
1366
1367 return ERR_PTR(err);
Jiri Pirko77b99002015-05-12 14:56:21 +02001368}
1369
1370static int fl_check_assign_mask(struct cls_fl_head *head,
Paul Blakey05cd2712018-04-30 14:28:30 +03001371 struct cls_fl_filter *fnew,
1372 struct cls_fl_filter *fold,
Jiri Pirko77b99002015-05-12 14:56:21 +02001373 struct fl_flow_mask *mask)
1374{
Paul Blakey05cd2712018-04-30 14:28:30 +03001375 struct fl_flow_mask *newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001376 int ret = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001377
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001378 rcu_read_lock();
Vlad Buslov195c2342019-03-21 15:17:38 +02001379
1380 /* Insert mask as temporary node to prevent concurrent creation of mask
1381 * with same key. Any concurrent lookups with same key will return
1382 * -EAGAIN because mask's refcnt is zero. It is safe to insert
1383 * stack-allocated 'mask' to masks hash table because we call
1384 * synchronize_rcu() before returning from this function (either in case
1385 * of error or after replacing it with heap-allocated mask in
1386 * fl_create_new_mask()).
1387 */
1388 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1389 &mask->ht_node,
1390 mask_ht_params);
Paul Blakey05cd2712018-04-30 14:28:30 +03001391 if (!fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001392 rcu_read_unlock();
1393
Vlad Buslov195c2342019-03-21 15:17:38 +02001394 if (fold) {
1395 ret = -EINVAL;
1396 goto errout_cleanup;
1397 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001398
1399 newmask = fl_create_new_mask(head, mask);
Vlad Buslov195c2342019-03-21 15:17:38 +02001400 if (IS_ERR(newmask)) {
1401 ret = PTR_ERR(newmask);
1402 goto errout_cleanup;
1403 }
Paul Blakey05cd2712018-04-30 14:28:30 +03001404
1405 fnew->mask = newmask;
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001406 return 0;
Vlad Buslov195c2342019-03-21 15:17:38 +02001407 } else if (IS_ERR(fnew->mask)) {
1408 ret = PTR_ERR(fnew->mask);
Paul Blakeyf6521c52018-06-03 10:06:14 +03001409 } else if (fold && fold->mask != fnew->mask) {
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001410 ret = -EINVAL;
1411 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1412 /* Mask was deleted concurrently, try again */
1413 ret = -EAGAIN;
Jiri Pirko77b99002015-05-12 14:56:21 +02001414 }
Vlad Buslovf48ef4d2019-03-21 15:17:37 +02001415 rcu_read_unlock();
1416 return ret;
Vlad Buslov195c2342019-03-21 15:17:38 +02001417
1418errout_cleanup:
1419 rhashtable_remove_fast(&head->ht, &mask->ht_node,
1420 mask_ht_params);
1421 /* Wait until any potential concurrent users of mask are finished */
1422 synchronize_rcu();
1423 return ret;
Jiri Pirko77b99002015-05-12 14:56:21 +02001424}
1425
1426static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1427 struct cls_fl_filter *f, struct fl_flow_mask *mask,
1428 unsigned long base, struct nlattr **tb,
Alexander Aring50a56192018-01-18 11:20:52 -05001429 struct nlattr *est, bool ovr,
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001430 struct fl_flow_tmplt *tmplt, bool rtnl_held,
Alexander Aring50a56192018-01-18 11:20:52 -05001431 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001432{
Jiri Pirko77b99002015-05-12 14:56:21 +02001433 int err;
1434
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001435 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
Vlad Buslovec6743a2019-02-11 10:55:43 +02001436 extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001437 if (err < 0)
1438 return err;
1439
1440 if (tb[TCA_FLOWER_CLASSID]) {
1441 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001442 if (!rtnl_held)
1443 rtnl_lock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001444 tcf_bind_filter(tp, &f->res, base);
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001445 if (!rtnl_held)
1446 rtnl_unlock();
Jiri Pirko77b99002015-05-12 14:56:21 +02001447 }
1448
Alexander Aring1057c552018-01-18 11:20:54 -05001449 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
Jiri Pirko77b99002015-05-12 14:56:21 +02001450 if (err)
Jiri Pirko45507522017-08-04 14:29:06 +02001451 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02001452
1453 fl_mask_update_range(mask);
1454 fl_set_masked_key(&f->mkey, &f->key, mask);
1455
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001456 if (!fl_mask_fits_tmplt(tmplt, mask)) {
1457 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1458 return -EINVAL;
1459 }
1460
Jiri Pirko77b99002015-05-12 14:56:21 +02001461 return 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001462}
1463
Vlad Buslov1f17f772019-04-05 20:56:26 +03001464static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1465 struct cls_fl_filter *fold,
1466 bool *in_ht)
1467{
1468 struct fl_flow_mask *mask = fnew->mask;
1469 int err;
1470
Vlad Buslov9e355522019-04-11 19:12:20 +03001471 err = rhashtable_lookup_insert_fast(&mask->ht,
1472 &fnew->ht_node,
1473 mask->filter_ht_params);
Vlad Buslov1f17f772019-04-05 20:56:26 +03001474 if (err) {
1475 *in_ht = false;
1476 /* It is okay if filter with same key exists when
1477 * overwriting.
1478 */
1479 return fold && err == -EEXIST ? 0 : err;
1480 }
1481
1482 *in_ht = true;
1483 return 0;
1484}
1485
Jiri Pirko77b99002015-05-12 14:56:21 +02001486static int fl_change(struct net *net, struct sk_buff *in_skb,
1487 struct tcf_proto *tp, unsigned long base,
1488 u32 handle, struct nlattr **tca,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001489 void **arg, bool ovr, bool rtnl_held,
1490 struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001491{
Vlad Buslove4746192019-03-21 15:17:33 +02001492 struct cls_fl_head *head = fl_head_dereference(tp);
WANG Cong8113c092017-08-04 21:31:43 -07001493 struct cls_fl_filter *fold = *arg;
Jiri Pirko77b99002015-05-12 14:56:21 +02001494 struct cls_fl_filter *fnew;
Ivan Vecera2cddd202019-01-16 16:53:52 +01001495 struct fl_flow_mask *mask;
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001496 struct nlattr **tb;
Vlad Buslov1f17f772019-04-05 20:56:26 +03001497 bool in_ht;
Jiri Pirko77b99002015-05-12 14:56:21 +02001498 int err;
1499
Vlad Buslov06177552019-03-21 15:17:35 +02001500 if (!tca[TCA_OPTIONS]) {
1501 err = -EINVAL;
1502 goto errout_fold;
1503 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001504
Ivan Vecera2cddd202019-01-16 16:53:52 +01001505 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
Vlad Buslov06177552019-03-21 15:17:35 +02001506 if (!mask) {
1507 err = -ENOBUFS;
1508 goto errout_fold;
1509 }
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001510
Ivan Vecera2cddd202019-01-16 16:53:52 +01001511 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1512 if (!tb) {
1513 err = -ENOBUFS;
1514 goto errout_mask_alloc;
1515 }
1516
Johannes Berg8cb08172019-04-26 14:07:28 +02001517 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1518 tca[TCA_OPTIONS], fl_policy, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02001519 if (err < 0)
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001520 goto errout_tb;
Jiri Pirko77b99002015-05-12 14:56:21 +02001521
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001522 if (fold && handle && fold->handle != handle) {
1523 err = -EINVAL;
1524 goto errout_tb;
1525 }
Jiri Pirko77b99002015-05-12 14:56:21 +02001526
1527 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001528 if (!fnew) {
1529 err = -ENOBUFS;
1530 goto errout_tb;
1531 }
Vlad Buslovc049d562019-04-24 09:53:31 +03001532 INIT_LIST_HEAD(&fnew->hw_list);
Vlad Buslov06177552019-03-21 15:17:35 +02001533 refcount_set(&fnew->refcnt, 1);
Jiri Pirko77b99002015-05-12 14:56:21 +02001534
Cong Wang14215102019-02-20 21:37:42 -08001535 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
WANG Congb9a24bb2016-08-19 12:36:54 -07001536 if (err < 0)
1537 goto errout;
Jiri Pirko77b99002015-05-12 14:56:21 +02001538
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001539 if (tb[TCA_FLOWER_FLAGS]) {
1540 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1541
1542 if (!tc_flags_valid(fnew->flags)) {
1543 err = -EINVAL;
1544 goto errout;
1545 }
1546 }
1547
1548 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001549 tp->chain->tmplt_priv, rtnl_held, extack);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001550 if (err)
1551 goto errout;
1552
1553 err = fl_check_assign_mask(head, fnew, fold, mask);
1554 if (err)
1555 goto errout;
1556
Vlad Buslov1f17f772019-04-05 20:56:26 +03001557 err = fl_ht_insert_unique(fnew, fold, &in_ht);
1558 if (err)
1559 goto errout_mask;
1560
Hadar Hen Zion79685212016-12-01 14:06:34 +02001561 if (!tc_skip_hw(fnew->flags)) {
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001562 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
Hadar Hen Zion79685212016-12-01 14:06:34 +02001563 if (err)
Vlad Buslov1f17f772019-04-05 20:56:26 +03001564 goto errout_ht;
Hadar Hen Zion79685212016-12-01 14:06:34 +02001565 }
Amir Vadai5b33f482016-03-08 12:42:29 +02001566
Or Gerlitz55593962017-02-16 10:31:13 +02001567 if (!tc_in_hw(fnew->flags))
1568 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1569
Vlad Buslov3d81e712019-03-21 15:17:42 +02001570 spin_lock(&tp->lock);
1571
Vlad Buslov272ffaa2019-03-21 15:17:41 +02001572 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1573 * proto again or create new one, if necessary.
1574 */
1575 if (tp->deleting) {
1576 err = -EAGAIN;
1577 goto errout_hw;
1578 }
1579
Amir Vadai5b33f482016-03-08 12:42:29 +02001580 if (fold) {
Vlad Buslovb2552b82019-03-21 15:17:36 +02001581 /* Fold filter was deleted concurrently. Retry lookup. */
1582 if (fold->deleted) {
1583 err = -EAGAIN;
1584 goto errout_hw;
1585 }
1586
Vlad Buslov620da482019-03-21 15:17:34 +02001587 fnew->handle = handle;
1588
Vlad Buslov1f17f772019-04-05 20:56:26 +03001589 if (!in_ht) {
1590 struct rhashtable_params params =
1591 fnew->mask->filter_ht_params;
1592
1593 err = rhashtable_insert_fast(&fnew->mask->ht,
1594 &fnew->ht_node,
1595 params);
1596 if (err)
1597 goto errout_hw;
1598 in_ht = true;
1599 }
Vlad Buslov620da482019-03-21 15:17:34 +02001600
Vlad Buslovc049d562019-04-24 09:53:31 +03001601 refcount_inc(&fnew->refcnt);
Roi Dayan599d2572018-12-19 18:07:56 +02001602 rhashtable_remove_fast(&fold->mask->ht,
1603 &fold->ht_node,
1604 fold->mask->filter_ht_params);
Matthew Wilcox234a4622017-11-28 09:56:36 -05001605 idr_replace(&head->handle_idr, fnew, fnew->handle);
Daniel Borkmannff3532f2015-07-17 22:38:44 +02001606 list_replace_rcu(&fold->list, &fnew->list);
Vlad Buslovb2552b82019-03-21 15:17:36 +02001607 fold->deleted = true;
Vlad Buslov620da482019-03-21 15:17:34 +02001608
Vlad Buslov3d81e712019-03-21 15:17:42 +02001609 spin_unlock(&tp->lock);
1610
Vlad Buslov99946772019-04-12 00:54:19 +03001611 fl_mask_put(head, fold->mask);
Vlad Buslov620da482019-03-21 15:17:34 +02001612 if (!tc_skip_hw(fold->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001613 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
Jiri Pirko77b99002015-05-12 14:56:21 +02001614 tcf_unbind_filter(tp, &fold->res);
Vlad Buslov06177552019-03-21 15:17:35 +02001615 /* Caller holds reference to fold, so refcnt is always > 0
1616 * after this.
1617 */
1618 refcount_dec(&fold->refcnt);
1619 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02001620 } else {
Vlad Buslov620da482019-03-21 15:17:34 +02001621 if (handle) {
1622 /* user specifies a handle and it doesn't exist */
1623 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1624 handle, GFP_ATOMIC);
Vlad Buslov9a2d9382019-03-21 15:17:40 +02001625
1626 /* Filter with specified handle was concurrently
1627 * inserted after initial check in cls_api. This is not
1628 * necessarily an error if NLM_F_EXCL is not set in
1629 * message flags. Returning EAGAIN will cause cls_api to
1630 * try to update concurrently inserted rule.
1631 */
1632 if (err == -ENOSPC)
1633 err = -EAGAIN;
Vlad Buslov620da482019-03-21 15:17:34 +02001634 } else {
1635 handle = 1;
1636 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1637 INT_MAX, GFP_ATOMIC);
1638 }
1639 if (err)
1640 goto errout_hw;
1641
Vlad Buslovc049d562019-04-24 09:53:31 +03001642 refcount_inc(&fnew->refcnt);
Vlad Buslov620da482019-03-21 15:17:34 +02001643 fnew->handle = handle;
Paul Blakey05cd2712018-04-30 14:28:30 +03001644 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
Vlad Buslov3d81e712019-03-21 15:17:42 +02001645 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02001646 }
1647
Vlad Buslov620da482019-03-21 15:17:34 +02001648 *arg = fnew;
1649
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001650 kfree(tb);
Ivan Vecera2cddd202019-01-16 16:53:52 +01001651 kfree(mask);
Jiri Pirko77b99002015-05-12 14:56:21 +02001652 return 0;
1653
Vlad Buslovc049d562019-04-24 09:53:31 +03001654errout_ht:
1655 spin_lock(&tp->lock);
Vlad Buslov620da482019-03-21 15:17:34 +02001656errout_hw:
Vlad Buslovc049d562019-04-24 09:53:31 +03001657 fnew->deleted = true;
Vlad Buslov3d81e712019-03-21 15:17:42 +02001658 spin_unlock(&tp->lock);
Vlad Buslov620da482019-03-21 15:17:34 +02001659 if (!tc_skip_hw(fnew->flags))
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001660 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
Vlad Buslov1f17f772019-04-05 20:56:26 +03001661 if (in_ht)
1662 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1663 fnew->mask->filter_ht_params);
Vlad Buslovecb3dea2019-03-06 16:22:12 +02001664errout_mask:
Vlad Buslov99946772019-04-12 00:54:19 +03001665 fl_mask_put(head, fnew->mask);
Jiri Pirko77b99002015-05-12 14:56:21 +02001666errout:
Vlad Buslovc049d562019-04-24 09:53:31 +03001667 __fl_put(fnew);
Arnd Bergmann39b7b6a2017-01-19 10:45:31 +01001668errout_tb:
1669 kfree(tb);
Ivan Vecera2cddd202019-01-16 16:53:52 +01001670errout_mask_alloc:
1671 kfree(mask);
Vlad Buslov06177552019-03-21 15:17:35 +02001672errout_fold:
1673 if (fold)
1674 __fl_put(fold);
Jiri Pirko77b99002015-05-12 14:56:21 +02001675 return err;
1676}
1677
Alexander Aring571acf22018-01-18 11:20:53 -05001678static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
Vlad Buslov12db03b2019-02-11 10:55:45 +02001679 bool rtnl_held, struct netlink_ext_ack *extack)
Jiri Pirko77b99002015-05-12 14:56:21 +02001680{
Vlad Buslove4746192019-03-21 15:17:33 +02001681 struct cls_fl_head *head = fl_head_dereference(tp);
WANG Cong8113c092017-08-04 21:31:43 -07001682 struct cls_fl_filter *f = arg;
Vlad Buslovb2552b82019-03-21 15:17:36 +02001683 bool last_on_mask;
1684 int err = 0;
Jiri Pirko77b99002015-05-12 14:56:21 +02001685
Vlad Buslovc24e43d82019-03-21 15:17:43 +02001686 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
Paul Blakey05cd2712018-04-30 14:28:30 +03001687 *last = list_empty(&head->masks);
Vlad Buslov06177552019-03-21 15:17:35 +02001688 __fl_put(f);
1689
Vlad Buslovb2552b82019-03-21 15:17:36 +02001690 return err;
Jiri Pirko77b99002015-05-12 14:56:21 +02001691}
1692
Vlad Buslov12db03b2019-02-11 10:55:45 +02001693static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1694 bool rtnl_held)
Jiri Pirko77b99002015-05-12 14:56:21 +02001695{
Jiri Pirko77b99002015-05-12 14:56:21 +02001696 struct cls_fl_filter *f;
1697
Vlad Buslov01683a12018-07-09 13:29:11 +03001698 arg->count = arg->skip;
1699
Vlad Buslov06177552019-03-21 15:17:35 +02001700 while ((f = fl_get_next_filter(tp, &arg->cookie)) != NULL) {
Vlad Buslov01683a12018-07-09 13:29:11 +03001701 if (arg->fn(tp, f, arg) < 0) {
Vlad Buslov06177552019-03-21 15:17:35 +02001702 __fl_put(f);
Vlad Buslov01683a12018-07-09 13:29:11 +03001703 arg->stop = 1;
1704 break;
Paul Blakey05cd2712018-04-30 14:28:30 +03001705 }
Vlad Buslov06177552019-03-21 15:17:35 +02001706 __fl_put(f);
1707 arg->cookie++;
Vlad Buslov01683a12018-07-09 13:29:11 +03001708 arg->count++;
Jiri Pirko77b99002015-05-12 14:56:21 +02001709 }
1710}
1711
Vlad Buslovc049d562019-04-24 09:53:31 +03001712static struct cls_fl_filter *
1713fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1714{
1715 struct cls_fl_head *head = fl_head_dereference(tp);
1716
1717 spin_lock(&tp->lock);
1718 if (list_empty(&head->hw_filters)) {
1719 spin_unlock(&tp->lock);
1720 return NULL;
1721 }
1722
1723 if (!f)
1724 f = list_entry(&head->hw_filters, struct cls_fl_filter,
1725 hw_list);
1726 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
1727 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
1728 spin_unlock(&tp->lock);
1729 return f;
1730 }
1731 }
1732
1733 spin_unlock(&tp->lock);
1734 return NULL;
1735}
1736
John Hurley31533cb2018-06-25 14:30:06 -07001737static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
1738 void *cb_priv, struct netlink_ext_ack *extack)
1739{
John Hurley31533cb2018-06-25 14:30:06 -07001740 struct tc_cls_flower_offload cls_flower = {};
1741 struct tcf_block *block = tp->chain->block;
Vlad Buslovc049d562019-04-24 09:53:31 +03001742 struct cls_fl_filter *f = NULL;
John Hurley31533cb2018-06-25 14:30:06 -07001743 int err;
1744
Vlad Buslovc049d562019-04-24 09:53:31 +03001745 /* hw_filters list can only be changed by hw offload functions after
1746 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1747 * iterating it.
1748 */
1749 ASSERT_RTNL();
John Hurley31533cb2018-06-25 14:30:06 -07001750
Vlad Buslovc049d562019-04-24 09:53:31 +03001751 while ((f = fl_get_next_hw_filter(tp, f, add))) {
John Hurley95e27a42019-04-02 23:53:20 +01001752 cls_flower.rule =
1753 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1754 if (!cls_flower.rule) {
1755 __fl_put(f);
1756 return -ENOMEM;
John Hurley31533cb2018-06-25 14:30:06 -07001757 }
John Hurley95e27a42019-04-02 23:53:20 +01001758
1759 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -07001760 extack);
John Hurley95e27a42019-04-02 23:53:20 +01001761 cls_flower.command = add ?
1762 TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
1763 cls_flower.cookie = (unsigned long)f;
1764 cls_flower.rule->match.dissector = &f->mask->dissector;
1765 cls_flower.rule->match.mask = &f->mask->key;
1766 cls_flower.rule->match.key = &f->mkey;
1767
1768 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
1769 if (err) {
1770 kfree(cls_flower.rule);
1771 if (tc_skip_sw(f->flags)) {
1772 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
1773 __fl_put(f);
1774 return err;
1775 }
1776 goto next_flow;
1777 }
1778
1779 cls_flower.classid = f->res.classid;
1780
1781 err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
1782 kfree(cls_flower.rule);
1783
1784 if (err) {
1785 if (add && tc_skip_sw(f->flags)) {
1786 __fl_put(f);
1787 return err;
1788 }
1789 goto next_flow;
1790 }
1791
1792 spin_lock(&tp->lock);
1793 tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags,
1794 add);
1795 spin_unlock(&tp->lock);
1796next_flow:
John Hurley95e27a42019-04-02 23:53:20 +01001797 __fl_put(f);
John Hurley31533cb2018-06-25 14:30:06 -07001798 }
1799
1800 return 0;
1801}
1802
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001803static int fl_hw_create_tmplt(struct tcf_chain *chain,
1804 struct fl_flow_tmplt *tmplt)
Jiri Pirko34738452018-07-23 09:23:11 +02001805{
1806 struct tc_cls_flower_offload cls_flower = {};
1807 struct tcf_block *block = chain->block;
Jiri Pirko34738452018-07-23 09:23:11 +02001808
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +01001809 cls_flower.rule = flow_rule_alloc(0);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001810 if (!cls_flower.rule)
1811 return -ENOMEM;
1812
Jiri Pirko34738452018-07-23 09:23:11 +02001813 cls_flower.common.chain_index = chain->index;
1814 cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
1815 cls_flower.cookie = (unsigned long) tmplt;
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001816 cls_flower.rule->match.dissector = &tmplt->dissector;
1817 cls_flower.rule->match.mask = &tmplt->mask;
1818 cls_flower.rule->match.key = &tmplt->dummy_key;
Jiri Pirko34738452018-07-23 09:23:11 +02001819
1820 /* We don't care if driver (any of them) fails to handle this
1821 * call. It serves just as a hint for it.
1822 */
Cong Wangaeb3fec2018-12-11 11:15:46 -08001823 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001824 kfree(cls_flower.rule);
1825
1826 return 0;
Jiri Pirko34738452018-07-23 09:23:11 +02001827}
1828
1829static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
1830 struct fl_flow_tmplt *tmplt)
1831{
1832 struct tc_cls_flower_offload cls_flower = {};
1833 struct tcf_block *block = chain->block;
1834
1835 cls_flower.common.chain_index = chain->index;
1836 cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY;
1837 cls_flower.cookie = (unsigned long) tmplt;
1838
Cong Wangaeb3fec2018-12-11 11:15:46 -08001839 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
Jiri Pirko34738452018-07-23 09:23:11 +02001840}
1841
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001842static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
1843 struct nlattr **tca,
1844 struct netlink_ext_ack *extack)
1845{
1846 struct fl_flow_tmplt *tmplt;
1847 struct nlattr **tb;
1848 int err;
1849
1850 if (!tca[TCA_OPTIONS])
1851 return ERR_PTR(-EINVAL);
1852
1853 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1854 if (!tb)
1855 return ERR_PTR(-ENOBUFS);
Johannes Berg8cb08172019-04-26 14:07:28 +02001856 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1857 tca[TCA_OPTIONS], fl_policy, NULL);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001858 if (err)
1859 goto errout_tb;
1860
1861 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03001862 if (!tmplt) {
1863 err = -ENOMEM;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001864 goto errout_tb;
Dan Carpenter1cbc36a52018-08-03 22:27:55 +03001865 }
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001866 tmplt->chain = chain;
1867 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
1868 if (err)
1869 goto errout_tmplt;
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001870
1871 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
1872
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001873 err = fl_hw_create_tmplt(chain, tmplt);
1874 if (err)
1875 goto errout_tmplt;
Jiri Pirko34738452018-07-23 09:23:11 +02001876
Pablo Neira Ayuso8f256622019-02-02 12:50:43 +01001877 kfree(tb);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001878 return tmplt;
1879
1880errout_tmplt:
1881 kfree(tmplt);
1882errout_tb:
1883 kfree(tb);
1884 return ERR_PTR(err);
1885}
1886
1887static void fl_tmplt_destroy(void *tmplt_priv)
1888{
1889 struct fl_flow_tmplt *tmplt = tmplt_priv;
1890
Cong Wang95278dd2018-10-02 12:50:19 -07001891 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
1892 kfree(tmplt);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02001893}
1894
Jiri Pirko77b99002015-05-12 14:56:21 +02001895static int fl_dump_key_val(struct sk_buff *skb,
1896 void *val, int val_type,
1897 void *mask, int mask_type, int len)
1898{
1899 int err;
1900
1901 if (!memchr_inv(mask, 0, len))
1902 return 0;
1903 err = nla_put(skb, val_type, len, val);
1904 if (err)
1905 return err;
1906 if (mask_type != TCA_FLOWER_UNSPEC) {
1907 err = nla_put(skb, mask_type, len, mask);
1908 if (err)
1909 return err;
1910 }
1911 return 0;
1912}
1913
Amritha Nambiar5c722992018-11-12 16:15:55 -08001914static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
1915 struct fl_flow_key *mask)
1916{
1917 if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
1918 &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
1919 sizeof(key->tp_min.dst)) ||
1920 fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
1921 &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
1922 sizeof(key->tp_max.dst)) ||
1923 fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
1924 &mask->tp_min.src, TCA_FLOWER_UNSPEC,
1925 sizeof(key->tp_min.src)) ||
1926 fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
1927 &mask->tp_max.src, TCA_FLOWER_UNSPEC,
1928 sizeof(key->tp_max.src)))
1929 return -1;
1930
1931 return 0;
1932}
1933
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04001934static int fl_dump_key_mpls(struct sk_buff *skb,
1935 struct flow_dissector_key_mpls *mpls_key,
1936 struct flow_dissector_key_mpls *mpls_mask)
1937{
1938 int err;
1939
1940 if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
1941 return 0;
1942 if (mpls_mask->mpls_ttl) {
1943 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
1944 mpls_key->mpls_ttl);
1945 if (err)
1946 return err;
1947 }
1948 if (mpls_mask->mpls_tc) {
1949 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
1950 mpls_key->mpls_tc);
1951 if (err)
1952 return err;
1953 }
1954 if (mpls_mask->mpls_label) {
1955 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
1956 mpls_key->mpls_label);
1957 if (err)
1958 return err;
1959 }
1960 if (mpls_mask->mpls_bos) {
1961 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
1962 mpls_key->mpls_bos);
1963 if (err)
1964 return err;
1965 }
1966 return 0;
1967}
1968
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001969static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001970 struct flow_dissector_key_ip *key,
1971 struct flow_dissector_key_ip *mask)
1972{
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03001973 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1974 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1975 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1976 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1977
1978 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
1979 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
Or Gerlitz4d80cc02017-06-01 21:37:38 +03001980 return -1;
1981
1982 return 0;
1983}
1984
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001985static int fl_dump_key_vlan(struct sk_buff *skb,
Jianbo Liud64efd02018-07-06 05:38:16 +00001986 int vlan_id_key, int vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001987 struct flow_dissector_key_vlan *vlan_key,
1988 struct flow_dissector_key_vlan *vlan_mask)
1989{
1990 int err;
1991
1992 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
1993 return 0;
1994 if (vlan_mask->vlan_id) {
Jianbo Liud64efd02018-07-06 05:38:16 +00001995 err = nla_put_u16(skb, vlan_id_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03001996 vlan_key->vlan_id);
1997 if (err)
1998 return err;
1999 }
2000 if (vlan_mask->vlan_priority) {
Jianbo Liud64efd02018-07-06 05:38:16 +00002001 err = nla_put_u8(skb, vlan_prio_key,
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002002 vlan_key->vlan_priority);
2003 if (err)
2004 return err;
2005 }
2006 return 0;
2007}
2008
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002009static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2010 u32 *flower_key, u32 *flower_mask,
2011 u32 flower_flag_bit, u32 dissector_flag_bit)
2012{
2013 if (dissector_mask & dissector_flag_bit) {
2014 *flower_mask |= flower_flag_bit;
2015 if (dissector_key & dissector_flag_bit)
2016 *flower_key |= flower_flag_bit;
2017 }
2018}
2019
2020static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2021{
2022 u32 key, mask;
2023 __be32 _key, _mask;
2024 int err;
2025
2026 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2027 return 0;
2028
2029 key = 0;
2030 mask = 0;
2031
2032 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2033 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
Pieter Jansen van Vuuren459d1532018-03-06 18:11:14 +01002034 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2035 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2036 FLOW_DIS_FIRST_FRAG);
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002037
2038 _key = cpu_to_be32(key);
2039 _mask = cpu_to_be32(mask);
2040
2041 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2042 if (err)
2043 return err;
2044
2045 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2046}
2047
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002048static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2049 struct flow_dissector_key_enc_opts *enc_opts)
2050{
2051 struct geneve_opt *opt;
2052 struct nlattr *nest;
2053 int opt_off = 0;
2054
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002055 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002056 if (!nest)
2057 goto nla_put_failure;
2058
2059 while (enc_opts->len > opt_off) {
2060 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2061
2062 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2063 opt->opt_class))
2064 goto nla_put_failure;
2065 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2066 opt->type))
2067 goto nla_put_failure;
2068 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2069 opt->length * 4, opt->opt_data))
2070 goto nla_put_failure;
2071
2072 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2073 }
2074 nla_nest_end(skb, nest);
2075 return 0;
2076
2077nla_put_failure:
2078 nla_nest_cancel(skb, nest);
2079 return -EMSGSIZE;
2080}
2081
2082static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2083 struct flow_dissector_key_enc_opts *enc_opts)
2084{
2085 struct nlattr *nest;
2086 int err;
2087
2088 if (!enc_opts->len)
2089 return 0;
2090
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002091 nest = nla_nest_start_noflag(skb, enc_opt_type);
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002092 if (!nest)
2093 goto nla_put_failure;
2094
2095 switch (enc_opts->dst_opt_type) {
2096 case TUNNEL_GENEVE_OPT:
2097 err = fl_dump_key_geneve_opt(skb, enc_opts);
2098 if (err)
2099 goto nla_put_failure;
2100 break;
2101 default:
2102 goto nla_put_failure;
2103 }
2104 nla_nest_end(skb, nest);
2105 return 0;
2106
2107nla_put_failure:
2108 nla_nest_cancel(skb, nest);
2109 return -EMSGSIZE;
2110}
2111
2112static int fl_dump_key_enc_opt(struct sk_buff *skb,
2113 struct flow_dissector_key_enc_opts *key_opts,
2114 struct flow_dissector_key_enc_opts *msk_opts)
2115{
2116 int err;
2117
2118 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2119 if (err)
2120 return err;
2121
2122 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2123}
2124
Jiri Pirkof5749082018-07-23 09:23:08 +02002125static int fl_dump_key(struct sk_buff *skb, struct net *net,
2126 struct fl_flow_key *key, struct fl_flow_key *mask)
Jiri Pirko77b99002015-05-12 14:56:21 +02002127{
Jiri Pirko77b99002015-05-12 14:56:21 +02002128 if (mask->indev_ifindex) {
2129 struct net_device *dev;
2130
2131 dev = __dev_get_by_index(net, key->indev_ifindex);
2132 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2133 goto nla_put_failure;
2134 }
2135
2136 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2137 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2138 sizeof(key->eth.dst)) ||
2139 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2140 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2141 sizeof(key->eth.src)) ||
2142 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2143 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2144 sizeof(key->basic.n_proto)))
2145 goto nla_put_failure;
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002146
Benjamin LaHaisea577d8f2017-04-22 16:52:47 -04002147 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2148 goto nla_put_failure;
2149
Jianbo Liud64efd02018-07-06 05:38:16 +00002150 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2151 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
Hadar Hen Zion9399ae92016-08-17 13:36:13 +03002152 goto nla_put_failure;
2153
Jianbo Liud64efd02018-07-06 05:38:16 +00002154 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2155 TCA_FLOWER_KEY_CVLAN_PRIO,
2156 &key->cvlan, &mask->cvlan) ||
2157 (mask->cvlan.vlan_tpid &&
Jianbo Liu158abbf2018-07-25 02:31:25 +00002158 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2159 key->cvlan.vlan_tpid)))
Jianbo Liud3069512018-07-06 05:38:15 +00002160 goto nla_put_failure;
2161
Jianbo Liu5e9a0fe2018-07-09 02:26:20 +00002162 if (mask->basic.n_proto) {
2163 if (mask->cvlan.vlan_tpid) {
2164 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2165 key->basic.n_proto))
2166 goto nla_put_failure;
2167 } else if (mask->vlan.vlan_tpid) {
2168 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2169 key->basic.n_proto))
2170 goto nla_put_failure;
2171 }
Jianbo Liud64efd02018-07-06 05:38:16 +00002172 }
2173
Jiri Pirko77b99002015-05-12 14:56:21 +02002174 if ((key->basic.n_proto == htons(ETH_P_IP) ||
2175 key->basic.n_proto == htons(ETH_P_IPV6)) &&
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002176 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
Jiri Pirko77b99002015-05-12 14:56:21 +02002177 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
Or Gerlitz4d80cc02017-06-01 21:37:38 +03002178 sizeof(key->basic.ip_proto)) ||
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002179 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
Jiri Pirko77b99002015-05-12 14:56:21 +02002180 goto nla_put_failure;
2181
Tom Herbertc3f83242015-06-04 09:16:40 -07002182 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002183 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2184 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2185 sizeof(key->ipv4.src)) ||
2186 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2187 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2188 sizeof(key->ipv4.dst))))
2189 goto nla_put_failure;
Tom Herbertc3f83242015-06-04 09:16:40 -07002190 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
Jiri Pirko77b99002015-05-12 14:56:21 +02002191 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2192 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2193 sizeof(key->ipv6.src)) ||
2194 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2195 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2196 sizeof(key->ipv6.dst))))
2197 goto nla_put_failure;
2198
2199 if (key->basic.ip_proto == IPPROTO_TCP &&
2200 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002201 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002202 sizeof(key->tp.src)) ||
2203 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002204 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
Jiri Pirkofdfc7dd2017-05-23 18:40:45 +02002205 sizeof(key->tp.dst)) ||
2206 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2207 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2208 sizeof(key->tcp.flags))))
Jiri Pirko77b99002015-05-12 14:56:21 +02002209 goto nla_put_failure;
2210 else if (key->basic.ip_proto == IPPROTO_UDP &&
2211 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002212 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002213 sizeof(key->tp.src)) ||
2214 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
Or Gerlitzaa72d702016-09-15 15:28:22 +03002215 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
Jiri Pirko77b99002015-05-12 14:56:21 +02002216 sizeof(key->tp.dst))))
2217 goto nla_put_failure;
Simon Horman5976c5f2016-11-03 13:24:21 +01002218 else if (key->basic.ip_proto == IPPROTO_SCTP &&
2219 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2220 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2221 sizeof(key->tp.src)) ||
2222 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2223 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2224 sizeof(key->tp.dst))))
2225 goto nla_put_failure;
Simon Horman7b684882016-12-07 13:48:28 +01002226 else if (key->basic.n_proto == htons(ETH_P_IP) &&
2227 key->basic.ip_proto == IPPROTO_ICMP &&
2228 (fl_dump_key_val(skb, &key->icmp.type,
2229 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2230 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2231 sizeof(key->icmp.type)) ||
2232 fl_dump_key_val(skb, &key->icmp.code,
2233 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2234 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2235 sizeof(key->icmp.code))))
2236 goto nla_put_failure;
2237 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2238 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2239 (fl_dump_key_val(skb, &key->icmp.type,
2240 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2241 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2242 sizeof(key->icmp.type)) ||
2243 fl_dump_key_val(skb, &key->icmp.code,
2244 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2245 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2246 sizeof(key->icmp.code))))
2247 goto nla_put_failure;
Simon Horman99d31322017-01-11 14:05:43 +01002248 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2249 key->basic.n_proto == htons(ETH_P_RARP)) &&
2250 (fl_dump_key_val(skb, &key->arp.sip,
2251 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2252 TCA_FLOWER_KEY_ARP_SIP_MASK,
2253 sizeof(key->arp.sip)) ||
2254 fl_dump_key_val(skb, &key->arp.tip,
2255 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2256 TCA_FLOWER_KEY_ARP_TIP_MASK,
2257 sizeof(key->arp.tip)) ||
2258 fl_dump_key_val(skb, &key->arp.op,
2259 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2260 TCA_FLOWER_KEY_ARP_OP_MASK,
2261 sizeof(key->arp.op)) ||
2262 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2263 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2264 sizeof(key->arp.sha)) ||
2265 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2266 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2267 sizeof(key->arp.tha))))
2268 goto nla_put_failure;
Jiri Pirko77b99002015-05-12 14:56:21 +02002269
Amritha Nambiar5c722992018-11-12 16:15:55 -08002270 if ((key->basic.ip_proto == IPPROTO_TCP ||
2271 key->basic.ip_proto == IPPROTO_UDP ||
2272 key->basic.ip_proto == IPPROTO_SCTP) &&
2273 fl_dump_key_port_range(skb, key, mask))
2274 goto nla_put_failure;
2275
Amir Vadaibc3103f2016-09-08 16:23:47 +03002276 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2277 (fl_dump_key_val(skb, &key->enc_ipv4.src,
2278 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2279 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2280 sizeof(key->enc_ipv4.src)) ||
2281 fl_dump_key_val(skb, &key->enc_ipv4.dst,
2282 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2283 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2284 sizeof(key->enc_ipv4.dst))))
2285 goto nla_put_failure;
2286 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2287 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2288 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2289 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2290 sizeof(key->enc_ipv6.src)) ||
2291 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2292 TCA_FLOWER_KEY_ENC_IPV6_DST,
2293 &mask->enc_ipv6.dst,
2294 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2295 sizeof(key->enc_ipv6.dst))))
2296 goto nla_put_failure;
2297
2298 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
Hadar Hen Zioneb523f42016-09-27 11:21:18 +03002299 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
Hadar Hen Zionf4d997f2016-11-07 15:14:39 +02002300 sizeof(key->enc_key_id)) ||
2301 fl_dump_key_val(skb, &key->enc_tp.src,
2302 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2303 &mask->enc_tp.src,
2304 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2305 sizeof(key->enc_tp.src)) ||
2306 fl_dump_key_val(skb, &key->enc_tp.dst,
2307 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2308 &mask->enc_tp.dst,
2309 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
Or Gerlitz0e2c17b2018-07-17 19:27:18 +03002310 sizeof(key->enc_tp.dst)) ||
Pieter Jansen van Vuuren0a6e7772018-08-07 17:36:01 +02002311 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2312 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
Amir Vadaibc3103f2016-09-08 16:23:47 +03002313 goto nla_put_failure;
2314
Or Gerlitzfaa3ffc2016-12-07 14:03:10 +02002315 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2316 goto nla_put_failure;
2317
Jiri Pirkof5749082018-07-23 09:23:08 +02002318 return 0;
2319
2320nla_put_failure:
2321 return -EMSGSIZE;
2322}
2323
2324static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +02002325 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
Jiri Pirkof5749082018-07-23 09:23:08 +02002326{
2327 struct cls_fl_filter *f = fh;
2328 struct nlattr *nest;
2329 struct fl_flow_key *key, *mask;
Vlad Buslov3d81e712019-03-21 15:17:42 +02002330 bool skip_hw;
Jiri Pirkof5749082018-07-23 09:23:08 +02002331
2332 if (!f)
2333 return skb->len;
2334
2335 t->tcm_handle = f->handle;
2336
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002337 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Jiri Pirkof5749082018-07-23 09:23:08 +02002338 if (!nest)
2339 goto nla_put_failure;
2340
Vlad Buslov3d81e712019-03-21 15:17:42 +02002341 spin_lock(&tp->lock);
2342
Jiri Pirkof5749082018-07-23 09:23:08 +02002343 if (f->res.classid &&
2344 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002345 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02002346
2347 key = &f->key;
2348 mask = &f->mask->key;
Vlad Buslov3d81e712019-03-21 15:17:42 +02002349 skip_hw = tc_skip_hw(f->flags);
Jiri Pirkof5749082018-07-23 09:23:08 +02002350
2351 if (fl_dump_key(skb, net, key, mask))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002352 goto nla_put_failure_locked;
Jiri Pirkof5749082018-07-23 09:23:08 +02002353
Or Gerlitz749e6722017-02-16 10:31:10 +02002354 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
Vlad Buslov3d81e712019-03-21 15:17:42 +02002355 goto nla_put_failure_locked;
2356
2357 spin_unlock(&tp->lock);
2358
2359 if (!skip_hw)
Vlad Buslovc24e43d82019-03-21 15:17:43 +02002360 fl_hw_update_stats(tp, f, rtnl_held);
Amir Vadaie69985c2016-06-05 17:11:18 +03002361
Vlad Buslov86c55362018-09-07 17:22:21 +03002362 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2363 goto nla_put_failure;
2364
Jiri Pirko77b99002015-05-12 14:56:21 +02002365 if (tcf_exts_dump(skb, &f->exts))
2366 goto nla_put_failure;
2367
2368 nla_nest_end(skb, nest);
2369
2370 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2371 goto nla_put_failure;
2372
2373 return skb->len;
2374
Vlad Buslov3d81e712019-03-21 15:17:42 +02002375nla_put_failure_locked:
2376 spin_unlock(&tp->lock);
Jiri Pirko77b99002015-05-12 14:56:21 +02002377nla_put_failure:
2378 nla_nest_cancel(skb, nest);
2379 return -1;
2380}
2381
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002382static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2383{
2384 struct fl_flow_tmplt *tmplt = tmplt_priv;
2385 struct fl_flow_key *key, *mask;
2386 struct nlattr *nest;
2387
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002388 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002389 if (!nest)
2390 goto nla_put_failure;
2391
2392 key = &tmplt->dummy_key;
2393 mask = &tmplt->mask;
2394
2395 if (fl_dump_key(skb, net, key, mask))
2396 goto nla_put_failure;
2397
2398 nla_nest_end(skb, nest);
2399
2400 return skb->len;
2401
2402nla_put_failure:
2403 nla_nest_cancel(skb, nest);
2404 return -EMSGSIZE;
2405}
2406
Cong Wang07d79fc2017-08-30 14:30:36 -07002407static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
2408{
2409 struct cls_fl_filter *f = fh;
2410
2411 if (f && f->res.classid == classid)
2412 f->res.class = cl;
2413}
2414
Jiri Pirko77b99002015-05-12 14:56:21 +02002415static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2416 .kind = "flower",
2417 .classify = fl_classify,
2418 .init = fl_init,
2419 .destroy = fl_destroy,
2420 .get = fl_get,
Vlad Buslov06177552019-03-21 15:17:35 +02002421 .put = fl_put,
Jiri Pirko77b99002015-05-12 14:56:21 +02002422 .change = fl_change,
2423 .delete = fl_delete,
2424 .walk = fl_walk,
John Hurley31533cb2018-06-25 14:30:06 -07002425 .reoffload = fl_reoffload,
Jiri Pirko77b99002015-05-12 14:56:21 +02002426 .dump = fl_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -07002427 .bind_class = fl_bind_class,
Jiri Pirkob95ec7e2018-07-23 09:23:10 +02002428 .tmplt_create = fl_tmplt_create,
2429 .tmplt_destroy = fl_tmplt_destroy,
2430 .tmplt_dump = fl_tmplt_dump,
Jiri Pirko77b99002015-05-12 14:56:21 +02002431 .owner = THIS_MODULE,
Vlad Buslov92149192019-03-21 15:17:44 +02002432 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
Jiri Pirko77b99002015-05-12 14:56:21 +02002433};
2434
2435static int __init cls_fl_init(void)
2436{
2437 return register_tcf_proto_ops(&cls_fl_ops);
2438}
2439
2440static void __exit cls_fl_exit(void)
2441{
2442 unregister_tcf_proto_ops(&cls_fl_ops);
2443}
2444
2445module_init(cls_fl_init);
2446module_exit(cls_fl_exit);
2447
2448MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2449MODULE_DESCRIPTION("Flower classifier");
2450MODULE_LICENSE("GPL v2");