blob: f218ccf1e2d9a651ad07c2a6276742b97d3b2102 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
14 *
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
19 *
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
22 *
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
26 *
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
29 *
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31 */
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/types.h>
36#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/errno.h>
John Fastabend1ce87722014-09-12 20:09:16 -070039#include <linux/percpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/skbuff.h>
Cong Wang7801db82014-07-17 17:34:53 -070042#include <linux/bitmap.h>
WANG Cong3cd904e2017-08-24 16:51:30 -070043#include <linux/netdevice.h>
44#include <linux/hash.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070045#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/act_api.h>
47#include <net/pkt_cls.h>
Cong Wange7614372017-09-25 10:13:51 -070048#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Eric Dumazetcc7ec452011-01-19 19:26:56 +000050struct tc_u_knode {
John Fastabend1ce87722014-09-12 20:09:16 -070051 struct tc_u_knode __rcu *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 u32 handle;
John Fastabend1ce87722014-09-12 20:09:16 -070053 struct tc_u_hnode __rcu *ht_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 struct tcf_exts exts;
55#ifdef CONFIG_NET_CLS_IND
WANG Cong2519a602014-01-09 16:14:02 -080056 int ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#endif
58 u8 fshift;
59 struct tcf_result res;
John Fastabend1ce87722014-09-12 20:09:16 -070060 struct tc_u_hnode __rcu *ht_down;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#ifdef CONFIG_CLS_U32_PERF
John Fastabend459d5f62014-09-12 20:08:47 -070062 struct tc_u32_pcnt __percpu *pf;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#endif
John Fastabend9e8ce792016-02-26 07:54:39 -080064 u32 flags;
John Hurley530d9952018-06-25 14:30:08 -070065 unsigned int in_hw_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#ifdef CONFIG_CLS_U32_MARK
John Fastabend459d5f62014-09-12 20:08:47 -070067 u32 val;
68 u32 mask;
69 u32 __percpu *pcpu_success;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#endif
John Fastabend1ce87722014-09-12 20:09:16 -070071 struct tcf_proto *tp;
Cong Wangaaa908f2018-05-23 15:26:53 -070072 struct rcu_work rwork;
John Fastabend4e2840e2014-09-17 11:11:46 -070073 /* The 'sel' field MUST be the last field in structure to allow for
74 * tc_u32_keys allocated at end of structure.
75 */
76 struct tc_u32_sel sel;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077};
78
Eric Dumazetcc7ec452011-01-19 19:26:56 +000079struct tc_u_hnode {
John Fastabend1ce87722014-09-12 20:09:16 -070080 struct tc_u_hnode __rcu *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 u32 handle;
82 u32 prio;
83 struct tc_u_common *tp_c;
84 int refcnt;
Eric Dumazetcc7ec452011-01-19 19:26:56 +000085 unsigned int divisor;
Cong Wange7614372017-09-25 10:13:51 -070086 struct idr handle_idr;
John Fastabend1ce87722014-09-12 20:09:16 -070087 struct rcu_head rcu;
Jakub Kicinskif40fe582018-01-24 12:54:22 -080088 u32 flags;
WANG Cong5778d392015-03-09 17:03:40 -070089 /* The 'ht' field MUST be the last field in structure to allow for
90 * more entries allocated at end of structure.
91 */
92 struct tc_u_knode __rcu *ht[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -070093};
94
Eric Dumazetcc7ec452011-01-19 19:26:56 +000095struct tc_u_common {
John Fastabend1ce87722014-09-12 20:09:16 -070096 struct tc_u_hnode __rcu *hlist;
Jiri Pirko339c21d2018-02-13 12:00:17 +010097 void *ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 int refcnt;
Cong Wange7614372017-09-25 10:13:51 -070099 struct idr handle_idr;
WANG Cong3cd904e2017-08-24 16:51:30 -0700100 struct hlist_node hnode;
John Fastabend1ce87722014-09-12 20:09:16 -0700101 struct rcu_head rcu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102};
103
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000104static inline unsigned int u32_hash_fold(__be32 key,
105 const struct tc_u32_sel *sel,
106 u8 fshift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000108 unsigned int h = ntohl(key & sel->hmask) >> fshift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110 return h;
111}
112
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400113static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
114 struct tcf_result *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
116 struct {
117 struct tc_u_knode *knode;
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700118 unsigned int off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 } stack[TC_U32_MAXDEPTH];
120
John Fastabend1ce87722014-09-12 20:09:16 -0700121 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700122 unsigned int off = skb_network_offset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 struct tc_u_knode *n;
124 int sdepth = 0;
125 int off2 = 0;
126 int sel = 0;
127#ifdef CONFIG_CLS_U32_PERF
128 int j;
129#endif
130 int i, r;
131
132next_ht:
John Fastabend1ce87722014-09-12 20:09:16 -0700133 n = rcu_dereference_bh(ht->ht[sel]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135next_knode:
136 if (n) {
137 struct tc_u32_key *key = n->sel.keys;
138
139#ifdef CONFIG_CLS_U32_PERF
John Fastabend459d5f62014-09-12 20:08:47 -0700140 __this_cpu_inc(n->pf->rcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 j = 0;
142#endif
143
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700144 if (tc_skip_sw(n->flags)) {
145 n = rcu_dereference_bh(n->next);
146 goto next_knode;
147 }
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149#ifdef CONFIG_CLS_U32_MARK
John Fastabend459d5f62014-09-12 20:08:47 -0700150 if ((skb->mark & n->mask) != n->val) {
John Fastabend1ce87722014-09-12 20:09:16 -0700151 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 goto next_knode;
153 } else {
John Fastabend459d5f62014-09-12 20:08:47 -0700154 __this_cpu_inc(*n->pcpu_success);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 }
156#endif
157
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000158 for (i = n->sel.nkeys; i > 0; i--, key++) {
stephen hemminger66d50d22010-08-02 13:44:13 +0000159 int toff = off + key->off + (off2 & key->offmask);
stephen hemminger86fce3b2011-02-20 16:14:23 +0000160 __be32 *data, hdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Dan Carpenter4e18b3e2010-10-04 02:28:36 +0000162 if (skb_headroom(skb) + toff > INT_MAX)
stephen hemminger66d50d22010-08-02 13:44:13 +0000163 goto out;
164
stephen hemminger86fce3b2011-02-20 16:14:23 +0000165 data = skb_header_pointer(skb, toff, 4, &hdata);
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700166 if (!data)
167 goto out;
168 if ((*data ^ key->val) & key->mask) {
John Fastabend1ce87722014-09-12 20:09:16 -0700169 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 goto next_knode;
171 }
172#ifdef CONFIG_CLS_U32_PERF
John Fastabend459d5f62014-09-12 20:08:47 -0700173 __this_cpu_inc(n->pf->kcnts[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 j++;
175#endif
176 }
John Fastabend1ce87722014-09-12 20:09:16 -0700177
178 ht = rcu_dereference_bh(n->ht_down);
179 if (!ht) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180check_terminal:
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000181 if (n->sel.flags & TC_U32_TERMINAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
183 *res = n->res;
184#ifdef CONFIG_NET_CLS_IND
WANG Cong2519a602014-01-09 16:14:02 -0800185 if (!tcf_match_indev(skb, n->ifindex)) {
John Fastabend1ce87722014-09-12 20:09:16 -0700186 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 goto next_knode;
188 }
189#endif
190#ifdef CONFIG_CLS_U32_PERF
John Fastabend459d5f62014-09-12 20:08:47 -0700191 __this_cpu_inc(n->pf->rhit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192#endif
193 r = tcf_exts_exec(skb, &n->exts, res);
194 if (r < 0) {
John Fastabend1ce87722014-09-12 20:09:16 -0700195 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 goto next_knode;
197 }
198
199 return r;
200 }
John Fastabend1ce87722014-09-12 20:09:16 -0700201 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 goto next_knode;
203 }
204
205 /* PUSH */
206 if (sdepth >= TC_U32_MAXDEPTH)
207 goto deadloop;
208 stack[sdepth].knode = n;
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700209 stack[sdepth].off = off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 sdepth++;
211
John Fastabend1ce87722014-09-12 20:09:16 -0700212 ht = rcu_dereference_bh(n->ht_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 sel = 0;
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700214 if (ht->divisor) {
stephen hemminger86fce3b2011-02-20 16:14:23 +0000215 __be32 *data, hdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700217 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
stephen hemminger86fce3b2011-02-20 16:14:23 +0000218 &hdata);
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700219 if (!data)
220 goto out;
221 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
222 n->fshift);
223 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000224 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 goto next_ht;
226
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000227 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 off2 = n->sel.off + 3;
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700229 if (n->sel.flags & TC_U32_VAROFFSET) {
stephen hemminger86fce3b2011-02-20 16:14:23 +0000230 __be16 *data, hdata;
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700231
232 data = skb_header_pointer(skb,
233 off + n->sel.offoff,
stephen hemminger86fce3b2011-02-20 16:14:23 +0000234 2, &hdata);
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700235 if (!data)
236 goto out;
237 off2 += ntohs(n->sel.offmask & *data) >>
238 n->sel.offshift;
239 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 off2 &= ~3;
241 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000242 if (n->sel.flags & TC_U32_EAT) {
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700243 off += off2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 off2 = 0;
245 }
246
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700247 if (off < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 goto next_ht;
249 }
250
251 /* POP */
252 if (sdepth--) {
253 n = stack[sdepth].knode;
John Fastabend1ce87722014-09-12 20:09:16 -0700254 ht = rcu_dereference_bh(n->ht_up);
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700255 off = stack[sdepth].off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 goto check_terminal;
257 }
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700258out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 return -1;
260
261deadloop:
Joe Perchese87cc472012-05-13 21:56:26 +0000262 net_warn_ratelimited("cls_u32: dead loop\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 return -1;
264}
265
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400266static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267{
268 struct tc_u_hnode *ht;
269
John Fastabend1ce87722014-09-12 20:09:16 -0700270 for (ht = rtnl_dereference(tp_c->hlist);
271 ht;
272 ht = rtnl_dereference(ht->next))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 if (ht->handle == handle)
274 break;
275
276 return ht;
277}
278
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400279static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000281 unsigned int sel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 struct tc_u_knode *n = NULL;
283
284 sel = TC_U32_HASH(handle);
285 if (sel > ht->divisor)
286 goto out;
287
John Fastabend1ce87722014-09-12 20:09:16 -0700288 for (n = rtnl_dereference(ht->ht[sel]);
289 n;
290 n = rtnl_dereference(n->next))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 if (n->handle == handle)
292 break;
293out:
294 return n;
295}
296
297
WANG Cong8113c092017-08-04 21:31:43 -0700298static void *u32_get(struct tcf_proto *tp, u32 handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299{
300 struct tc_u_hnode *ht;
301 struct tc_u_common *tp_c = tp->data;
302
303 if (TC_U32_HTID(handle) == TC_U32_ROOT)
John Fastabend1ce87722014-09-12 20:09:16 -0700304 ht = rtnl_dereference(tp->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 else
306 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
307
308 if (!ht)
WANG Cong8113c092017-08-04 21:31:43 -0700309 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
311 if (TC_U32_KEY(handle) == 0)
WANG Cong8113c092017-08-04 21:31:43 -0700312 return ht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
WANG Cong8113c092017-08-04 21:31:43 -0700314 return u32_lookup_key(ht, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315}
316
Matthew Wilcoxffdc2d92017-11-28 12:05:54 -0500317/* Protected by rtnl lock */
Cong Wange7614372017-09-25 10:13:51 -0700318static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319{
Matthew Wilcoxffdc2d92017-11-28 12:05:54 -0500320 int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
321 if (id < 0)
Cong Wange7614372017-09-25 10:13:51 -0700322 return 0;
Matthew Wilcoxffdc2d92017-11-28 12:05:54 -0500323 return (id | 0x800U) << 20;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324}
325
WANG Cong3cd904e2017-08-24 16:51:30 -0700326static struct hlist_head *tc_u_common_hash;
327
328#define U32_HASH_SHIFT 10
329#define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
330
Jiri Pirko339c21d2018-02-13 12:00:17 +0100331static void *tc_u_common_ptr(const struct tcf_proto *tp)
332{
333 struct tcf_block *block = tp->chain->block;
334
335 /* The block sharing is currently supported only
336 * for classless qdiscs. In that case we use block
337 * for tc_u_common identification. In case the
338 * block is not shared, block->q is a valid pointer
339 * and we can use that. That works for classful qdiscs.
340 */
341 if (tcf_block_shared(block))
342 return block;
343 else
344 return block->q;
345}
346
WANG Cong3cd904e2017-08-24 16:51:30 -0700347static unsigned int tc_u_hash(const struct tcf_proto *tp)
348{
Jiri Pirko339c21d2018-02-13 12:00:17 +0100349 return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT);
WANG Cong3cd904e2017-08-24 16:51:30 -0700350}
351
352static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
353{
354 struct tc_u_common *tc;
355 unsigned int h;
356
357 h = tc_u_hash(tp);
358 hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) {
Jiri Pirko339c21d2018-02-13 12:00:17 +0100359 if (tc->ptr == tc_u_common_ptr(tp))
WANG Cong3cd904e2017-08-24 16:51:30 -0700360 return tc;
361 }
362 return NULL;
363}
364
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365static int u32_init(struct tcf_proto *tp)
366{
367 struct tc_u_hnode *root_ht;
368 struct tc_u_common *tp_c;
WANG Cong3cd904e2017-08-24 16:51:30 -0700369 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
WANG Cong3cd904e2017-08-24 16:51:30 -0700371 tp_c = tc_u_common_find(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700373 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 if (root_ht == NULL)
375 return -ENOBUFS;
376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 root_ht->refcnt++;
Cong Wange7614372017-09-25 10:13:51 -0700378 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 root_ht->prio = tp->prio;
Cong Wange7614372017-09-25 10:13:51 -0700380 idr_init(&root_ht->handle_idr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382 if (tp_c == NULL) {
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700383 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 if (tp_c == NULL) {
385 kfree(root_ht);
386 return -ENOBUFS;
387 }
Jiri Pirko339c21d2018-02-13 12:00:17 +0100388 tp_c->ptr = tc_u_common_ptr(tp);
WANG Cong3cd904e2017-08-24 16:51:30 -0700389 INIT_HLIST_NODE(&tp_c->hnode);
Cong Wange7614372017-09-25 10:13:51 -0700390 idr_init(&tp_c->handle_idr);
WANG Cong3cd904e2017-08-24 16:51:30 -0700391
392 h = tc_u_hash(tp);
393 hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 }
395
396 tp_c->refcnt++;
John Fastabend1ce87722014-09-12 20:09:16 -0700397 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
398 rcu_assign_pointer(tp_c->hlist, root_ht);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 root_ht->tp_c = tp_c;
400
John Fastabend1ce87722014-09-12 20:09:16 -0700401 rcu_assign_pointer(tp->root, root_ht);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 tp->data = tp_c;
403 return 0;
404}
405
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400406static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
John Fastabendde5df632014-09-19 21:50:34 -0700407 bool free_pf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408{
Paolo Abenid7cdee52018-02-05 22:23:01 +0100409 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
410
WANG Cong18d02642014-09-25 10:26:37 -0700411 tcf_exts_destroy(&n->exts);
Cong Wang35c55fc2017-11-06 13:47:30 -0800412 tcf_exts_put_net(&n->exts);
Paolo Abenid7cdee52018-02-05 22:23:01 +0100413 if (ht && --ht->refcnt == 0)
414 kfree(ht);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415#ifdef CONFIG_CLS_U32_PERF
John Fastabendde5df632014-09-19 21:50:34 -0700416 if (free_pf)
417 free_percpu(n->pf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418#endif
John Fastabenda1ddcfe2014-09-19 21:50:04 -0700419#ifdef CONFIG_CLS_U32_MARK
John Fastabendde5df632014-09-19 21:50:34 -0700420 if (free_pf)
421 free_percpu(n->pcpu_success);
John Fastabenda1ddcfe2014-09-19 21:50:04 -0700422#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 kfree(n);
424 return 0;
425}
426
John Fastabendde5df632014-09-19 21:50:34 -0700427/* u32_delete_key_rcu should be called when free'ing a copied
428 * version of a tc_u_knode obtained from u32_init_knode(). When
429 * copies are obtained from u32_init_knode() the statistics are
430 * shared between the old and new copies to allow readers to
431 * continue to update the statistics during the copy. To support
432 * this the u32_delete_key_rcu variant does not free the percpu
433 * statistics.
434 */
Cong Wangc0d378e2017-10-26 18:24:36 -0700435static void u32_delete_key_work(struct work_struct *work)
436{
Cong Wangaaa908f2018-05-23 15:26:53 -0700437 struct tc_u_knode *key = container_of(to_rcu_work(work),
438 struct tc_u_knode,
439 rwork);
Cong Wangc0d378e2017-10-26 18:24:36 -0700440 rtnl_lock();
441 u32_destroy_key(key->tp, key, false);
442 rtnl_unlock();
443}
444
John Fastabendde5df632014-09-19 21:50:34 -0700445/* u32_delete_key_freepf_rcu is the rcu callback variant
446 * that free's the entire structure including the statistics
447 * percpu variables. Only use this if the key is not a copy
448 * returned by u32_init_knode(). See u32_delete_key_rcu()
449 * for the variant that should be used with keys return from
450 * u32_init_knode()
451 */
Cong Wangc0d378e2017-10-26 18:24:36 -0700452static void u32_delete_key_freepf_work(struct work_struct *work)
453{
Cong Wangaaa908f2018-05-23 15:26:53 -0700454 struct tc_u_knode *key = container_of(to_rcu_work(work),
455 struct tc_u_knode,
456 rwork);
Cong Wangc0d378e2017-10-26 18:24:36 -0700457 rtnl_lock();
458 u32_destroy_key(key->tp, key, true);
459 rtnl_unlock();
460}
461
Yang Yingliang82d567c2013-12-10 20:55:31 +0800462static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463{
John Fastabend1ce87722014-09-12 20:09:16 -0700464 struct tc_u_knode __rcu **kp;
465 struct tc_u_knode *pkp;
John Fastabenda96366bf2014-09-15 23:30:49 -0700466 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
468 if (ht) {
John Fastabend1ce87722014-09-12 20:09:16 -0700469 kp = &ht->ht[TC_U32_HASH(key->handle)];
470 for (pkp = rtnl_dereference(*kp); pkp;
471 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
472 if (pkp == key) {
473 RCU_INIT_POINTER(*kp, key->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
WANG Conga0efb802014-09-30 16:07:24 -0700475 tcf_unbind_filter(tp, &key->res);
Cong Wangf12c6432018-04-06 17:19:41 -0700476 idr_remove(&ht->handle_idr, key->handle);
Cong Wang35c55fc2017-11-06 13:47:30 -0800477 tcf_exts_get_net(&key->exts);
Cong Wangaaa908f2018-05-23 15:26:53 -0700478 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 return 0;
480 }
481 }
482 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700483 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 return 0;
485}
486
Jakub Kicinski458e7042018-01-24 12:54:23 -0800487static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
488 struct netlink_ext_ack *extack)
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800489{
Jiri Pirko245dc512017-10-19 15:50:35 +0200490 struct tcf_block *block = tp->chain->block;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200491 struct tc_cls_u32_offload cls_u32 = {};
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800492
Jakub Kicinski458e7042018-01-24 12:54:23 -0800493 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
Jiri Pirko77460412017-10-19 15:50:34 +0200494 cls_u32.command = TC_CLSU32_DELETE_HNODE;
495 cls_u32.hnode.divisor = h->divisor;
496 cls_u32.hnode.handle = h->handle;
497 cls_u32.hnode.prio = h->prio;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200498
Jiri Pirko245dc512017-10-19 15:50:35 +0200499 tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800500}
501
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400502static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
Quentin Monnet10a47e02018-01-19 17:44:45 -0800503 u32 flags, struct netlink_ext_ack *extack)
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800504{
Jiri Pirko245dc512017-10-19 15:50:35 +0200505 struct tcf_block *block = tp->chain->block;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200506 struct tc_cls_u32_offload cls_u32 = {};
Jiri Pirko245dc512017-10-19 15:50:35 +0200507 bool skip_sw = tc_skip_sw(flags);
508 bool offloaded = false;
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700509 int err;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800510
Jakub Kicinskif40fe582018-01-24 12:54:22 -0800511 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200512 cls_u32.command = TC_CLSU32_NEW_HNODE;
513 cls_u32.hnode.divisor = h->divisor;
514 cls_u32.hnode.handle = h->handle;
515 cls_u32.hnode.prio = h->prio;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800516
Jiri Pirko245dc512017-10-19 15:50:35 +0200517 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
518 if (err < 0) {
Jakub Kicinski458e7042018-01-24 12:54:23 -0800519 u32_clear_hw_hnode(tp, h, NULL);
Jakub Kicinskid47a0f32016-06-06 16:16:48 +0100520 return err;
Jiri Pirko245dc512017-10-19 15:50:35 +0200521 } else if (err > 0) {
522 offloaded = true;
523 }
524
525 if (skip_sw && !offloaded)
526 return -EINVAL;
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700527
528 return 0;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800529}
530
Jakub Kicinski458e7042018-01-24 12:54:23 -0800531static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
532 struct netlink_ext_ack *extack)
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800533{
Jiri Pirko245dc512017-10-19 15:50:35 +0200534 struct tcf_block *block = tp->chain->block;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200535 struct tc_cls_u32_offload cls_u32 = {};
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800536
Jakub Kicinski458e7042018-01-24 12:54:23 -0800537 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
Jiri Pirko77460412017-10-19 15:50:34 +0200538 cls_u32.command = TC_CLSU32_DELETE_KNODE;
Jiri Pirkocaa72602018-01-17 11:46:50 +0100539 cls_u32.knode.handle = n->handle;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800540
Jiri Pirko245dc512017-10-19 15:50:35 +0200541 tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100542 tcf_block_offload_dec(block, &n->flags);
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800543}
544
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400545static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
Quentin Monnet10a47e02018-01-19 17:44:45 -0800546 u32 flags, struct netlink_ext_ack *extack)
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800547{
Paolo Abeni058a6c02018-02-02 16:02:22 +0100548 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
Jiri Pirko245dc512017-10-19 15:50:35 +0200549 struct tcf_block *block = tp->chain->block;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200550 struct tc_cls_u32_offload cls_u32 = {};
Jiri Pirko245dc512017-10-19 15:50:35 +0200551 bool skip_sw = tc_skip_sw(flags);
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700552 int err;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800553
Jakub Kicinskif40fe582018-01-24 12:54:22 -0800554 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
Jiri Pirkode4784c2017-08-07 10:15:32 +0200555 cls_u32.command = TC_CLSU32_REPLACE_KNODE;
556 cls_u32.knode.handle = n->handle;
557 cls_u32.knode.fshift = n->fshift;
Jakub Kicinski201c44b2016-06-08 20:11:04 +0100558#ifdef CONFIG_CLS_U32_MARK
Jiri Pirkode4784c2017-08-07 10:15:32 +0200559 cls_u32.knode.val = n->val;
560 cls_u32.knode.mask = n->mask;
Jakub Kicinski201c44b2016-06-08 20:11:04 +0100561#else
Jiri Pirkode4784c2017-08-07 10:15:32 +0200562 cls_u32.knode.val = 0;
563 cls_u32.knode.mask = 0;
Jakub Kicinski201c44b2016-06-08 20:11:04 +0100564#endif
Jiri Pirkode4784c2017-08-07 10:15:32 +0200565 cls_u32.knode.sel = &n->sel;
566 cls_u32.knode.exts = &n->exts;
Jakub Kicinski201c44b2016-06-08 20:11:04 +0100567 if (n->ht_down)
Paolo Abeni058a6c02018-02-02 16:02:22 +0100568 cls_u32.knode.link_handle = ht->handle;
Jakub Kicinski201c44b2016-06-08 20:11:04 +0100569
Jiri Pirko245dc512017-10-19 15:50:35 +0200570 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
571 if (err < 0) {
Jakub Kicinski458e7042018-01-24 12:54:23 -0800572 u32_remove_hw_knode(tp, n, NULL);
Jakub Kicinski201c44b2016-06-08 20:11:04 +0100573 return err;
Jiri Pirko245dc512017-10-19 15:50:35 +0200574 } else if (err > 0) {
John Hurley530d9952018-06-25 14:30:08 -0700575 n->in_hw_count = err;
Jiri Pirkocaa72602018-01-17 11:46:50 +0100576 tcf_block_offload_inc(block, &n->flags);
Jiri Pirko245dc512017-10-19 15:50:35 +0200577 }
578
Colin Ian King0f04d052017-11-03 08:09:45 +0000579 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
Jiri Pirko245dc512017-10-19 15:50:35 +0200580 return -EINVAL;
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700581
582 return 0;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800583}
584
Jakub Kicinski458e7042018-01-24 12:54:23 -0800585static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
586 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587{
588 struct tc_u_knode *n;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000589 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000591 for (h = 0; h <= ht->divisor; h++) {
John Fastabend1ce87722014-09-12 20:09:16 -0700592 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
593 RCU_INIT_POINTER(ht->ht[h],
594 rtnl_dereference(n->next));
WANG Conga0efb802014-09-30 16:07:24 -0700595 tcf_unbind_filter(tp, &n->res);
Jakub Kicinski458e7042018-01-24 12:54:23 -0800596 u32_remove_hw_knode(tp, n, extack);
Matthew Wilcox9c160942017-11-28 09:48:43 -0500597 idr_remove(&ht->handle_idr, n->handle);
Cong Wang35c55fc2017-11-06 13:47:30 -0800598 if (tcf_exts_get_net(&n->exts))
Cong Wangaaa908f2018-05-23 15:26:53 -0700599 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
Cong Wang35c55fc2017-11-06 13:47:30 -0800600 else
601 u32_destroy_key(n->tp, n, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 }
603 }
604}
605
Jakub Kicinski458e7042018-01-24 12:54:23 -0800606static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
607 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608{
609 struct tc_u_common *tp_c = tp->data;
John Fastabend1ce87722014-09-12 20:09:16 -0700610 struct tc_u_hnode __rcu **hn;
611 struct tc_u_hnode *phn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700613 WARN_ON(ht->refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
Jakub Kicinski458e7042018-01-24 12:54:23 -0800615 u32_clear_hnode(tp, ht, extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
John Fastabend1ce87722014-09-12 20:09:16 -0700617 hn = &tp_c->hlist;
618 for (phn = rtnl_dereference(*hn);
619 phn;
620 hn = &phn->next, phn = rtnl_dereference(*hn)) {
621 if (phn == ht) {
Jakub Kicinski458e7042018-01-24 12:54:23 -0800622 u32_clear_hw_hnode(tp, ht, extack);
Cong Wange7614372017-09-25 10:13:51 -0700623 idr_destroy(&ht->handle_idr);
Matthew Wilcox9c160942017-11-28 09:48:43 -0500624 idr_remove(&tp_c->handle_idr, ht->handle);
John Fastabend1ce87722014-09-12 20:09:16 -0700625 RCU_INIT_POINTER(*hn, ht->next);
626 kfree_rcu(ht, rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 return 0;
628 }
629 }
630
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 return -ENOENT;
632}
633
Cong Wang1e052be2015-03-06 11:47:59 -0800634static bool ht_empty(struct tc_u_hnode *ht)
635{
636 unsigned int h;
637
638 for (h = 0; h <= ht->divisor; h++)
639 if (rcu_access_pointer(ht->ht[h]))
640 return false;
641
642 return true;
643}
644
Jakub Kicinski715df5e2018-01-24 12:54:13 -0800645static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646{
647 struct tc_u_common *tp_c = tp->data;
John Fastabend1ce87722014-09-12 20:09:16 -0700648 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700650 WARN_ON(root_ht == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
652 if (root_ht && --root_ht->refcnt == 0)
Jakub Kicinski458e7042018-01-24 12:54:23 -0800653 u32_destroy_hnode(tp, root_ht, extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
655 if (--tp_c->refcnt == 0) {
656 struct tc_u_hnode *ht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
WANG Cong3cd904e2017-08-24 16:51:30 -0700658 hlist_del(&tp_c->hnode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
John Fastabend1ce87722014-09-12 20:09:16 -0700660 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
Paolo Abenid7cdee52018-02-05 22:23:01 +0100661 u32_clear_hnode(tp, ht, extack);
John Fastabend1ce87722014-09-12 20:09:16 -0700662 RCU_INIT_POINTER(tp_c->hlist, ht->next);
Paolo Abenid7cdee52018-02-05 22:23:01 +0100663
664 /* u32_destroy_key() will later free ht for us, if it's
665 * still referenced by some knode
666 */
667 if (--ht->refcnt == 0)
668 kfree_rcu(ht, rcu);
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700669 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
Cong Wange7614372017-09-25 10:13:51 -0700671 idr_destroy(&tp_c->handle_idr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 kfree(tp_c);
673 }
674
675 tp->data = NULL;
676}
677
Alexander Aring571acf22018-01-18 11:20:53 -0500678static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
679 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680{
WANG Cong8113c092017-08-04 21:31:43 -0700681 struct tc_u_hnode *ht = arg;
John Fastabend1ce87722014-09-12 20:09:16 -0700682 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
WANG Cong763dbf62017-04-19 14:21:21 -0700683 struct tc_u_common *tp_c = tp->data;
684 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
686 if (ht == NULL)
WANG Cong763dbf62017-04-19 14:21:21 -0700687 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800689 if (TC_U32_KEY(ht->handle)) {
Jakub Kicinski458e7042018-01-24 12:54:23 -0800690 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
WANG Cong763dbf62017-04-19 14:21:21 -0700691 ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
692 goto out;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800693 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
Alexander Aring4b981db2018-01-18 11:20:55 -0500695 if (root_ht == ht) {
696 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 return -EINVAL;
Alexander Aring4b981db2018-01-18 11:20:55 -0500698 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
Jarek Poplawskie56cfad2008-04-12 18:37:13 -0700700 if (ht->refcnt == 1) {
701 ht->refcnt--;
Jakub Kicinski458e7042018-01-24 12:54:23 -0800702 u32_destroy_hnode(tp, ht, extack);
Jarek Poplawskie56cfad2008-04-12 18:37:13 -0700703 } else {
Alexander Aring4b981db2018-01-18 11:20:55 -0500704 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
Jarek Poplawskie56cfad2008-04-12 18:37:13 -0700705 return -EBUSY;
706 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
WANG Cong763dbf62017-04-19 14:21:21 -0700708out:
709 *last = true;
710 if (root_ht) {
711 if (root_ht->refcnt > 1) {
712 *last = false;
713 goto ret;
714 }
715 if (root_ht->refcnt == 1) {
716 if (!ht_empty(root_ht)) {
717 *last = false;
718 goto ret;
719 }
720 }
721 }
722
723 if (tp_c->refcnt > 1) {
724 *last = false;
725 goto ret;
726 }
727
728 if (tp_c->refcnt == 1) {
729 struct tc_u_hnode *ht;
730
731 for (ht = rtnl_dereference(tp_c->hlist);
732 ht;
733 ht = rtnl_dereference(ht->next))
734 if (!ht_empty(ht)) {
735 *last = false;
736 break;
737 }
738 }
739
740ret:
741 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742}
743
Cong Wange7614372017-09-25 10:13:51 -0700744static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745{
Matthew Wilcoxf730cb92017-11-28 13:45:02 -0500746 u32 index = htid | 0x800;
Cong Wange7614372017-09-25 10:13:51 -0700747 u32 max = htid | 0xFFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
Matthew Wilcoxf730cb92017-11-28 13:45:02 -0500749 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
750 index = htid + 1;
751 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
752 GFP_KERNEL))
753 index = max;
Cong Wange7614372017-09-25 10:13:51 -0700754 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Matthew Wilcoxf730cb92017-11-28 13:45:02 -0500756 return index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757}
758
Patrick McHardy6fa8c012008-01-23 20:36:12 -0800759static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
760 [TCA_U32_CLASSID] = { .type = NLA_U32 },
761 [TCA_U32_HASH] = { .type = NLA_U32 },
762 [TCA_U32_LINK] = { .type = NLA_U32 },
763 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
764 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
765 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
766 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
John Fastabend9e8ce792016-02-26 07:54:39 -0800767 [TCA_U32_FLAGS] = { .type = NLA_U32 },
Patrick McHardy6fa8c012008-01-23 20:36:12 -0800768};
769
Benjamin LaHaisec1b52732013-01-14 05:15:39 +0000770static int u32_set_parms(struct net *net, struct tcf_proto *tp,
771 unsigned long base, struct tc_u_hnode *ht,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800772 struct tc_u_knode *n, struct nlattr **tb,
Alexander Aring50a56192018-01-18 11:20:52 -0500773 struct nlattr *est, bool ovr,
774 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
WANG Congb9a24bb2016-08-19 12:36:54 -0700776 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
Alexander Aring50a56192018-01-18 11:20:52 -0500778 err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 if (err < 0)
780 return err;
781
Patrick McHardyadd93b62008-01-22 22:11:33 -0800782 if (tb[TCA_U32_LINK]) {
Patrick McHardy1587bac2008-01-23 20:35:03 -0800783 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
Patrick McHardy47a1a1d2008-11-19 08:03:09 +0000784 struct tc_u_hnode *ht_down = NULL, *ht_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785
Alexander Aring4b981db2018-01-18 11:20:55 -0500786 if (TC_U32_KEY(handle)) {
787 NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
Jiri Pirko705c7092017-08-04 14:29:14 +0200788 return -EINVAL;
Alexander Aring4b981db2018-01-18 11:20:55 -0500789 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
791 if (handle) {
792 ht_down = u32_lookup_ht(ht->tp_c, handle);
793
Alexander Aring4b981db2018-01-18 11:20:55 -0500794 if (!ht_down) {
795 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
Jiri Pirko705c7092017-08-04 14:29:14 +0200796 return -EINVAL;
Alexander Aring4b981db2018-01-18 11:20:55 -0500797 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 ht_down->refcnt++;
799 }
800
John Fastabend1ce87722014-09-12 20:09:16 -0700801 ht_old = rtnl_dereference(n->ht_down);
802 rcu_assign_pointer(n->ht_down, ht_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
Patrick McHardy47a1a1d2008-11-19 08:03:09 +0000804 if (ht_old)
805 ht_old->refcnt--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 }
Patrick McHardyadd93b62008-01-22 22:11:33 -0800807 if (tb[TCA_U32_CLASSID]) {
Patrick McHardy1587bac2008-01-23 20:35:03 -0800808 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 tcf_bind_filter(tp, &n->res, base);
810 }
811
812#ifdef CONFIG_NET_CLS_IND
Patrick McHardyadd93b62008-01-22 22:11:33 -0800813 if (tb[TCA_U32_INDEV]) {
WANG Cong2519a602014-01-09 16:14:02 -0800814 int ret;
Alexander Aring1057c552018-01-18 11:20:54 -0500815 ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
WANG Cong2519a602014-01-09 16:14:02 -0800816 if (ret < 0)
Jiri Pirko705c7092017-08-04 14:29:14 +0200817 return -EINVAL;
WANG Cong2519a602014-01-09 16:14:02 -0800818 n->ifindex = ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 }
820#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822}
823
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -0400824static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
John Fastabendde5df632014-09-19 21:50:34 -0700825 struct tc_u_knode *n)
826{
827 struct tc_u_knode __rcu **ins;
828 struct tc_u_knode *pins;
829 struct tc_u_hnode *ht;
830
831 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
832 ht = rtnl_dereference(tp->root);
833 else
834 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
835
836 ins = &ht->ht[TC_U32_HASH(n->handle)];
837
838 /* The node must always exist for it to be replaced if this is not the
839 * case then something went very wrong elsewhere.
840 */
841 for (pins = rtnl_dereference(*ins); ;
842 ins = &pins->next, pins = rtnl_dereference(*ins))
843 if (pins->handle == n->handle)
844 break;
845
Matthew Wilcox234a4622017-11-28 09:56:36 -0500846 idr_replace(&ht->handle_idr, n, n->handle);
John Fastabendde5df632014-09-19 21:50:34 -0700847 RCU_INIT_POINTER(n->next, pins->next);
848 rcu_assign_pointer(*ins, n);
849}
850
851static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
852 struct tc_u_knode *n)
853{
Paolo Abeni058a6c02018-02-02 16:02:22 +0100854 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
John Fastabendde5df632014-09-19 21:50:34 -0700855 struct tc_u32_sel *s = &n->sel;
Paolo Abeni058a6c02018-02-02 16:02:22 +0100856 struct tc_u_knode *new;
John Fastabendde5df632014-09-19 21:50:34 -0700857
858 new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
859 GFP_KERNEL);
860
861 if (!new)
862 return NULL;
863
864 RCU_INIT_POINTER(new->next, n->next);
865 new->handle = n->handle;
866 RCU_INIT_POINTER(new->ht_up, n->ht_up);
867
868#ifdef CONFIG_NET_CLS_IND
869 new->ifindex = n->ifindex;
870#endif
871 new->fshift = n->fshift;
872 new->res = n->res;
John Fastabend9e8ce792016-02-26 07:54:39 -0800873 new->flags = n->flags;
Paolo Abeni058a6c02018-02-02 16:02:22 +0100874 RCU_INIT_POINTER(new->ht_down, ht);
John Fastabendde5df632014-09-19 21:50:34 -0700875
876 /* bump reference count as long as we hold pointer to structure */
Paolo Abeni058a6c02018-02-02 16:02:22 +0100877 if (ht)
878 ht->refcnt++;
John Fastabendde5df632014-09-19 21:50:34 -0700879
880#ifdef CONFIG_CLS_U32_PERF
881 /* Statistics may be incremented by readers during update
882 * so we must keep them in tact. When the node is later destroyed
883 * a special destroy call must be made to not free the pf memory.
884 */
885 new->pf = n->pf;
886#endif
887
888#ifdef CONFIG_CLS_U32_MARK
889 new->val = n->val;
890 new->mask = n->mask;
891 /* Similarly success statistics must be moved as pointers */
892 new->pcpu_success = n->pcpu_success;
893#endif
894 new->tp = tp;
895 memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
896
WANG Congb9a24bb2016-08-19 12:36:54 -0700897 if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) {
898 kfree(new);
899 return NULL;
900 }
John Fastabendde5df632014-09-19 21:50:34 -0700901
902 return new;
903}
904
Benjamin LaHaisec1b52732013-01-14 05:15:39 +0000905static int u32_change(struct net *net, struct sk_buff *in_skb,
Eric W. Biedermanaf4c6642012-05-25 13:42:45 -0600906 struct tcf_proto *tp, unsigned long base, u32 handle,
Alexander Aring7306db32018-01-18 11:20:51 -0500907 struct nlattr **tca, void **arg, bool ovr,
908 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909{
910 struct tc_u_common *tp_c = tp->data;
911 struct tc_u_hnode *ht;
912 struct tc_u_knode *n;
913 struct tc_u32_sel *s;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800914 struct nlattr *opt = tca[TCA_OPTIONS];
915 struct nlattr *tb[TCA_U32_MAX + 1];
John Fastabend9e8ce792016-02-26 07:54:39 -0800916 u32 htid, flags = 0;
Kees Cook98c8f122018-08-25 22:58:01 -0700917 size_t sel_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 int err;
John Fastabend459d5f62014-09-12 20:08:47 -0700919#ifdef CONFIG_CLS_U32_PERF
920 size_t size;
921#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
Alexander Aring4b981db2018-01-18 11:20:55 -0500923 if (!opt) {
924 if (handle) {
925 NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
926 return -EINVAL;
927 } else {
928 return 0;
929 }
930 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
Alexander Aring4b981db2018-01-18 11:20:55 -0500932 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, extack);
Patrick McHardycee63722008-01-23 20:33:32 -0800933 if (err < 0)
934 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700936 if (tb[TCA_U32_FLAGS]) {
John Fastabend9e8ce792016-02-26 07:54:39 -0800937 flags = nla_get_u32(tb[TCA_U32_FLAGS]);
Alexander Aring4b981db2018-01-18 11:20:55 -0500938 if (!tc_flags_valid(flags)) {
939 NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
Jakub Kicinski1a0f7d22016-06-06 16:16:47 +0100940 return -EINVAL;
Alexander Aring4b981db2018-01-18 11:20:55 -0500941 }
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700942 }
John Fastabend9e8ce792016-02-26 07:54:39 -0800943
WANG Cong8113c092017-08-04 21:31:43 -0700944 n = *arg;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000945 if (n) {
John Fastabendde5df632014-09-19 21:50:34 -0700946 struct tc_u_knode *new;
947
Alexander Aring4b981db2018-01-18 11:20:55 -0500948 if (TC_U32_KEY(n->handle) == 0) {
949 NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 return -EINVAL;
Alexander Aring4b981db2018-01-18 11:20:55 -0500951 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
Ivan Veceraeb53f7a2018-02-08 16:10:39 +0100953 if ((n->flags ^ flags) &
954 ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
Alexander Aring4b981db2018-01-18 11:20:55 -0500955 NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
John Fastabend9e8ce792016-02-26 07:54:39 -0800956 return -EINVAL;
Alexander Aring4b981db2018-01-18 11:20:55 -0500957 }
John Fastabend9e8ce792016-02-26 07:54:39 -0800958
John Fastabendde5df632014-09-19 21:50:34 -0700959 new = u32_init_knode(tp, n);
960 if (!new)
961 return -ENOMEM;
962
963 err = u32_set_parms(net, tp, base,
964 rtnl_dereference(n->ht_up), new, tb,
Alexander Aring50a56192018-01-18 11:20:52 -0500965 tca[TCA_RATE], ovr, extack);
John Fastabendde5df632014-09-19 21:50:34 -0700966
967 if (err) {
968 u32_destroy_key(tp, new, false);
969 return err;
970 }
971
Quentin Monnet10a47e02018-01-19 17:44:45 -0800972 err = u32_replace_hw_knode(tp, new, flags, extack);
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700973 if (err) {
974 u32_destroy_key(tp, new, false);
975 return err;
976 }
977
Or Gerlitz24d3dc62017-02-16 10:31:15 +0200978 if (!tc_in_hw(new->flags))
979 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
980
John Fastabendde5df632014-09-19 21:50:34 -0700981 u32_replace_knode(tp, tp_c, new);
WANG Conga0efb802014-09-30 16:07:24 -0700982 tcf_unbind_filter(tp, &n->res);
Cong Wang35c55fc2017-11-06 13:47:30 -0800983 tcf_exts_get_net(&n->exts);
Cong Wangaaa908f2018-05-23 15:26:53 -0700984 tcf_queue_work(&n->rwork, u32_delete_key_work);
John Fastabendde5df632014-09-19 21:50:34 -0700985 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 }
987
Patrick McHardyadd93b62008-01-22 22:11:33 -0800988 if (tb[TCA_U32_DIVISOR]) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000989 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Alexander Aring4b981db2018-01-18 11:20:55 -0500991 if (--divisor > 0x100) {
992 NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 return -EINVAL;
Alexander Aring4b981db2018-01-18 11:20:55 -0500994 }
995 if (TC_U32_KEY(handle)) {
996 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 return -EINVAL;
Alexander Aring4b981db2018-01-18 11:20:55 -0500998 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000999 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 if (ht == NULL)
1001 return -ENOBUFS;
Cong Wange7614372017-09-25 10:13:51 -07001002 if (handle == 0) {
1003 handle = gen_new_htid(tp->data, ht);
1004 if (handle == 0) {
1005 kfree(ht);
1006 return -ENOMEM;
1007 }
1008 } else {
Matthew Wilcoxf730cb92017-11-28 13:45:02 -05001009 err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
1010 handle, GFP_KERNEL);
Cong Wange7614372017-09-25 10:13:51 -07001011 if (err) {
1012 kfree(ht);
1013 return err;
1014 }
1015 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 ht->tp_c = tp_c;
Jarek Poplawskie56cfad2008-04-12 18:37:13 -07001017 ht->refcnt = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 ht->divisor = divisor;
1019 ht->handle = handle;
1020 ht->prio = tp->prio;
Cong Wange7614372017-09-25 10:13:51 -07001021 idr_init(&ht->handle_idr);
Jakub Kicinskif40fe582018-01-24 12:54:22 -08001022 ht->flags = flags;
Jakub Kicinski6eef3802016-06-08 20:11:03 +01001023
Quentin Monnet10a47e02018-01-19 17:44:45 -08001024 err = u32_replace_hw_hnode(tp, ht, flags, extack);
Jakub Kicinski6eef3802016-06-08 20:11:03 +01001025 if (err) {
Matthew Wilcox9c160942017-11-28 09:48:43 -05001026 idr_remove(&tp_c->handle_idr, handle);
Jakub Kicinski6eef3802016-06-08 20:11:03 +01001027 kfree(ht);
1028 return err;
1029 }
1030
John Fastabend1ce87722014-09-12 20:09:16 -07001031 RCU_INIT_POINTER(ht->next, tp_c->hlist);
1032 rcu_assign_pointer(tp_c->hlist, ht);
WANG Cong8113c092017-08-04 21:31:43 -07001033 *arg = ht;
John Fastabenda1b7c5f2016-02-16 21:17:09 -08001034
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 return 0;
1036 }
1037
Patrick McHardyadd93b62008-01-22 22:11:33 -08001038 if (tb[TCA_U32_HASH]) {
Patrick McHardy1587bac2008-01-23 20:35:03 -08001039 htid = nla_get_u32(tb[TCA_U32_HASH]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
John Fastabend1ce87722014-09-12 20:09:16 -07001041 ht = rtnl_dereference(tp->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 htid = ht->handle;
1043 } else {
1044 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
Alexander Aring4b981db2018-01-18 11:20:55 -05001045 if (!ht) {
1046 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 return -EINVAL;
Alexander Aring4b981db2018-01-18 11:20:55 -05001048 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 }
1050 } else {
John Fastabend1ce87722014-09-12 20:09:16 -07001051 ht = rtnl_dereference(tp->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 htid = ht->handle;
1053 }
1054
Alexander Aring4b981db2018-01-18 11:20:55 -05001055 if (ht->divisor < TC_U32_HASH(htid)) {
1056 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 return -EINVAL;
Alexander Aring4b981db2018-01-18 11:20:55 -05001058 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059
1060 if (handle) {
Alexander Aring4b981db2018-01-18 11:20:55 -05001061 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
1062 NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 return -EINVAL;
Alexander Aring4b981db2018-01-18 11:20:55 -05001064 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 handle = htid | TC_U32_NODE(handle);
Matthew Wilcoxf730cb92017-11-28 13:45:02 -05001066 err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
Cong Wange7614372017-09-25 10:13:51 -07001067 GFP_KERNEL);
1068 if (err)
1069 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 } else
1071 handle = gen_new_kid(ht, htid);
1072
Cong Wange7614372017-09-25 10:13:51 -07001073 if (tb[TCA_U32_SEL] == NULL) {
Alexander Aring4b981db2018-01-18 11:20:55 -05001074 NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
Cong Wange7614372017-09-25 10:13:51 -07001075 err = -EINVAL;
1076 goto erridr;
1077 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
Patrick McHardyadd93b62008-01-22 22:11:33 -08001079 s = nla_data(tb[TCA_U32_SEL]);
Kees Cook98c8f122018-08-25 22:58:01 -07001080 sel_size = struct_size(s, keys, s->nkeys);
1081 if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1082 err = -EINVAL;
1083 goto erridr;
1084 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
Kees Cook98c8f122018-08-25 22:58:01 -07001086 n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
Cong Wange7614372017-09-25 10:13:51 -07001087 if (n == NULL) {
1088 err = -ENOBUFS;
1089 goto erridr;
1090 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092#ifdef CONFIG_CLS_U32_PERF
John Fastabend459d5f62014-09-12 20:08:47 -07001093 size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
1094 n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
1095 if (!n->pf) {
Cong Wange7614372017-09-25 10:13:51 -07001096 err = -ENOBUFS;
1097 goto errfree;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099#endif
1100
Kees Cook98c8f122018-08-25 22:58:01 -07001101 memcpy(&n->sel, s, sel_size);
John Fastabenda96366bf2014-09-15 23:30:49 -07001102 RCU_INIT_POINTER(n->ht_up, ht);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 n->handle = handle;
Radu Rendecb2268012007-11-10 21:54:50 -08001104 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
John Fastabend9e8ce792016-02-26 07:54:39 -08001105 n->flags = flags;
John Fastabend1ce87722014-09-12 20:09:16 -07001106 n->tp = tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
WANG Congb9a24bb2016-08-19 12:36:54 -07001108 err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
1109 if (err < 0)
1110 goto errout;
1111
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112#ifdef CONFIG_CLS_U32_MARK
John Fastabend459d5f62014-09-12 20:08:47 -07001113 n->pcpu_success = alloc_percpu(u32);
John Fastabenda1ddcfe2014-09-19 21:50:04 -07001114 if (!n->pcpu_success) {
1115 err = -ENOMEM;
1116 goto errout;
1117 }
John Fastabend459d5f62014-09-12 20:08:47 -07001118
Patrick McHardyadd93b62008-01-22 22:11:33 -08001119 if (tb[TCA_U32_MARK]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 struct tc_u32_mark *mark;
1121
Patrick McHardyadd93b62008-01-22 22:11:33 -08001122 mark = nla_data(tb[TCA_U32_MARK]);
John Fastabend459d5f62014-09-12 20:08:47 -07001123 n->val = mark->val;
1124 n->mask = mark->mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 }
1126#endif
1127
Alexander Aring50a56192018-01-18 11:20:52 -05001128 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr,
1129 extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 if (err == 0) {
John Fastabend1ce87722014-09-12 20:09:16 -07001131 struct tc_u_knode __rcu **ins;
1132 struct tc_u_knode *pins;
1133
Quentin Monnet10a47e02018-01-19 17:44:45 -08001134 err = u32_replace_hw_knode(tp, n, flags, extack);
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -07001135 if (err)
1136 goto errhw;
1137
Or Gerlitz24d3dc62017-02-16 10:31:15 +02001138 if (!tc_in_hw(n->flags))
1139 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1140
John Fastabend1ce87722014-09-12 20:09:16 -07001141 ins = &ht->ht[TC_U32_HASH(handle)];
1142 for (pins = rtnl_dereference(*ins); pins;
1143 ins = &pins->next, pins = rtnl_dereference(*ins))
1144 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 break;
1146
John Fastabend1ce87722014-09-12 20:09:16 -07001147 RCU_INIT_POINTER(n->next, pins);
1148 rcu_assign_pointer(*ins, n);
WANG Cong8113c092017-08-04 21:31:43 -07001149 *arg = n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 return 0;
1151 }
John Fastabenda1ddcfe2014-09-19 21:50:04 -07001152
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -07001153errhw:
John Fastabenda1ddcfe2014-09-19 21:50:04 -07001154#ifdef CONFIG_CLS_U32_MARK
1155 free_percpu(n->pcpu_success);
1156#endif
1157
WANG Congb9a24bb2016-08-19 12:36:54 -07001158errout:
1159 tcf_exts_destroy(&n->exts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160#ifdef CONFIG_CLS_U32_PERF
Cong Wange7614372017-09-25 10:13:51 -07001161errfree:
John Fastabend1ce87722014-09-12 20:09:16 -07001162 free_percpu(n->pf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163#endif
1164 kfree(n);
Cong Wange7614372017-09-25 10:13:51 -07001165erridr:
Matthew Wilcox9c160942017-11-28 09:48:43 -05001166 idr_remove(&ht->handle_idr, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 return err;
1168}
1169
1170static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1171{
1172 struct tc_u_common *tp_c = tp->data;
1173 struct tc_u_hnode *ht;
1174 struct tc_u_knode *n;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001175 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
1177 if (arg->stop)
1178 return;
1179
John Fastabend1ce87722014-09-12 20:09:16 -07001180 for (ht = rtnl_dereference(tp_c->hlist);
1181 ht;
1182 ht = rtnl_dereference(ht->next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 if (ht->prio != tp->prio)
1184 continue;
1185 if (arg->count >= arg->skip) {
WANG Cong8113c092017-08-04 21:31:43 -07001186 if (arg->fn(tp, ht, arg) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 arg->stop = 1;
1188 return;
1189 }
1190 }
1191 arg->count++;
1192 for (h = 0; h <= ht->divisor; h++) {
John Fastabend1ce87722014-09-12 20:09:16 -07001193 for (n = rtnl_dereference(ht->ht[h]);
1194 n;
1195 n = rtnl_dereference(n->next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 if (arg->count < arg->skip) {
1197 arg->count++;
1198 continue;
1199 }
WANG Cong8113c092017-08-04 21:31:43 -07001200 if (arg->fn(tp, n, arg) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 arg->stop = 1;
1202 return;
1203 }
1204 arg->count++;
1205 }
1206 }
1207 }
1208}
1209
John Hurley530d9952018-06-25 14:30:08 -07001210static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1211 bool add, tc_setup_cb_t *cb, void *cb_priv,
1212 struct netlink_ext_ack *extack)
1213{
1214 struct tc_cls_u32_offload cls_u32 = {};
1215 int err;
1216
1217 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1218 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1219 cls_u32.hnode.divisor = ht->divisor;
1220 cls_u32.hnode.handle = ht->handle;
1221 cls_u32.hnode.prio = ht->prio;
1222
1223 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1224 if (err && add && tc_skip_sw(ht->flags))
1225 return err;
1226
1227 return 0;
1228}
1229
1230static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1231 bool add, tc_setup_cb_t *cb, void *cb_priv,
1232 struct netlink_ext_ack *extack)
1233{
1234 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1235 struct tcf_block *block = tp->chain->block;
1236 struct tc_cls_u32_offload cls_u32 = {};
1237 int err;
1238
1239 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1240 cls_u32.command = add ?
1241 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1242 cls_u32.knode.handle = n->handle;
1243
1244 if (add) {
1245 cls_u32.knode.fshift = n->fshift;
1246#ifdef CONFIG_CLS_U32_MARK
1247 cls_u32.knode.val = n->val;
1248 cls_u32.knode.mask = n->mask;
1249#else
1250 cls_u32.knode.val = 0;
1251 cls_u32.knode.mask = 0;
1252#endif
1253 cls_u32.knode.sel = &n->sel;
1254 cls_u32.knode.exts = &n->exts;
1255 if (n->ht_down)
1256 cls_u32.knode.link_handle = ht->handle;
1257 }
1258
1259 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1260 if (err) {
1261 if (add && tc_skip_sw(n->flags))
1262 return err;
1263 return 0;
1264 }
1265
1266 tc_cls_offload_cnt_update(block, &n->in_hw_count, &n->flags, add);
1267
1268 return 0;
1269}
1270
1271static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
1272 void *cb_priv, struct netlink_ext_ack *extack)
1273{
1274 struct tc_u_common *tp_c = tp->data;
1275 struct tc_u_hnode *ht;
1276 struct tc_u_knode *n;
1277 unsigned int h;
1278 int err;
1279
1280 for (ht = rtnl_dereference(tp_c->hlist);
1281 ht;
1282 ht = rtnl_dereference(ht->next)) {
1283 if (ht->prio != tp->prio)
1284 continue;
1285
1286 /* When adding filters to a new dev, try to offload the
1287 * hashtable first. When removing, do the filters before the
1288 * hashtable.
1289 */
1290 if (add && !tc_skip_hw(ht->flags)) {
1291 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1292 extack);
1293 if (err)
1294 return err;
1295 }
1296
1297 for (h = 0; h <= ht->divisor; h++) {
1298 for (n = rtnl_dereference(ht->ht[h]);
1299 n;
1300 n = rtnl_dereference(n->next)) {
1301 if (tc_skip_hw(n->flags))
1302 continue;
1303
1304 err = u32_reoffload_knode(tp, n, add, cb,
1305 cb_priv, extack);
1306 if (err)
1307 return err;
1308 }
1309 }
1310
1311 if (!add && !tc_skip_hw(ht->flags))
1312 u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1313 }
1314
1315 return 0;
1316}
1317
Cong Wang07d79fc2017-08-30 14:30:36 -07001318static void u32_bind_class(void *fh, u32 classid, unsigned long cl)
1319{
1320 struct tc_u_knode *n = fh;
1321
1322 if (n && n->res.classid == classid)
1323 n->res.class = cl;
1324}
1325
WANG Cong8113c092017-08-04 21:31:43 -07001326static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -04001327 struct sk_buff *skb, struct tcmsg *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328{
WANG Cong8113c092017-08-04 21:31:43 -07001329 struct tc_u_knode *n = fh;
John Fastabend1ce87722014-09-12 20:09:16 -07001330 struct tc_u_hnode *ht_up, *ht_down;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001331 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332
1333 if (n == NULL)
1334 return skb->len;
1335
1336 t->tcm_handle = n->handle;
1337
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001338 nest = nla_nest_start(skb, TCA_OPTIONS);
1339 if (nest == NULL)
1340 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
1342 if (TC_U32_KEY(n->handle) == 0) {
WANG Cong8113c092017-08-04 21:31:43 -07001343 struct tc_u_hnode *ht = fh;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001344 u32 divisor = ht->divisor + 1;
1345
David S. Miller1b34ec42012-03-29 05:11:39 -04001346 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1347 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 } else {
John Fastabend459d5f62014-09-12 20:08:47 -07001349#ifdef CONFIG_CLS_U32_PERF
1350 struct tc_u32_pcnt *gpf;
John Fastabend459d5f62014-09-12 20:08:47 -07001351 int cpu;
John Fastabend80aab732014-09-15 23:30:26 -07001352#endif
John Fastabend459d5f62014-09-12 20:08:47 -07001353
David S. Miller1b34ec42012-03-29 05:11:39 -04001354 if (nla_put(skb, TCA_U32_SEL,
1355 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
1356 &n->sel))
1357 goto nla_put_failure;
John Fastabend1ce87722014-09-12 20:09:16 -07001358
1359 ht_up = rtnl_dereference(n->ht_up);
1360 if (ht_up) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 u32 htid = n->handle & 0xFFFFF000;
David S. Miller1b34ec42012-03-29 05:11:39 -04001362 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1363 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 }
David S. Miller1b34ec42012-03-29 05:11:39 -04001365 if (n->res.classid &&
1366 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1367 goto nla_put_failure;
John Fastabend1ce87722014-09-12 20:09:16 -07001368
1369 ht_down = rtnl_dereference(n->ht_down);
1370 if (ht_down &&
1371 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
David S. Miller1b34ec42012-03-29 05:11:39 -04001372 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
John Fastabend9e8ce792016-02-26 07:54:39 -08001374 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1375 goto nla_put_failure;
1376
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377#ifdef CONFIG_CLS_U32_MARK
John Fastabend459d5f62014-09-12 20:08:47 -07001378 if ((n->val || n->mask)) {
1379 struct tc_u32_mark mark = {.val = n->val,
1380 .mask = n->mask,
1381 .success = 0};
John Fastabend80aab732014-09-15 23:30:26 -07001382 int cpum;
John Fastabend459d5f62014-09-12 20:08:47 -07001383
John Fastabend80aab732014-09-15 23:30:26 -07001384 for_each_possible_cpu(cpum) {
1385 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
John Fastabend459d5f62014-09-12 20:08:47 -07001386
1387 mark.success += cnt;
1388 }
1389
1390 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1391 goto nla_put_failure;
1392 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393#endif
1394
WANG Cong5da57f42013-12-15 20:15:07 -08001395 if (tcf_exts_dump(skb, &n->exts) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -08001396 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
1398#ifdef CONFIG_NET_CLS_IND
WANG Cong2519a602014-01-09 16:14:02 -08001399 if (n->ifindex) {
1400 struct net_device *dev;
1401 dev = __dev_get_by_index(net, n->ifindex);
1402 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1403 goto nla_put_failure;
1404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405#endif
1406#ifdef CONFIG_CLS_U32_PERF
John Fastabend459d5f62014-09-12 20:08:47 -07001407 gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
1408 n->sel.nkeys * sizeof(u64),
1409 GFP_KERNEL);
1410 if (!gpf)
1411 goto nla_put_failure;
1412
1413 for_each_possible_cpu(cpu) {
1414 int i;
1415 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1416
1417 gpf->rcnt += pf->rcnt;
1418 gpf->rhit += pf->rhit;
1419 for (i = 0; i < n->sel.nkeys; i++)
1420 gpf->kcnts[i] += pf->kcnts[i];
1421 }
1422
Nicolas Dichtel98545182016-04-26 10:06:18 +02001423 if (nla_put_64bit(skb, TCA_U32_PCNT,
1424 sizeof(struct tc_u32_pcnt) +
1425 n->sel.nkeys * sizeof(u64),
1426 gpf, TCA_U32_PAD)) {
John Fastabend459d5f62014-09-12 20:08:47 -07001427 kfree(gpf);
David S. Miller1b34ec42012-03-29 05:11:39 -04001428 goto nla_put_failure;
John Fastabend459d5f62014-09-12 20:08:47 -07001429 }
1430 kfree(gpf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431#endif
1432 }
1433
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001434 nla_nest_end(skb, nest);
1435
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 if (TC_U32_KEY(n->handle))
WANG Cong5da57f42013-12-15 20:15:07 -08001437 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -08001438 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 return skb->len;
1440
Patrick McHardyadd93b62008-01-22 22:11:33 -08001441nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001442 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 return -1;
1444}
1445
Patrick McHardy2eb9d752008-01-22 22:10:42 -08001446static struct tcf_proto_ops cls_u32_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 .kind = "u32",
1448 .classify = u32_classify,
1449 .init = u32_init,
1450 .destroy = u32_destroy,
1451 .get = u32_get,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 .change = u32_change,
1453 .delete = u32_delete,
1454 .walk = u32_walk,
John Hurley530d9952018-06-25 14:30:08 -07001455 .reoffload = u32_reoffload,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 .dump = u32_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -07001457 .bind_class = u32_bind_class,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 .owner = THIS_MODULE,
1459};
1460
1461static int __init init_u32(void)
1462{
WANG Cong3cd904e2017-08-24 16:51:30 -07001463 int i, ret;
1464
stephen hemminger6ff9c362010-05-12 06:37:05 +00001465 pr_info("u32 classifier\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466#ifdef CONFIG_CLS_U32_PERF
stephen hemminger6ff9c362010-05-12 06:37:05 +00001467 pr_info(" Performance counters on\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469#ifdef CONFIG_NET_CLS_IND
stephen hemminger6ff9c362010-05-12 06:37:05 +00001470 pr_info(" input device check on\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471#endif
1472#ifdef CONFIG_NET_CLS_ACT
stephen hemminger6ff9c362010-05-12 06:37:05 +00001473 pr_info(" Actions configured\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474#endif
WANG Cong3cd904e2017-08-24 16:51:30 -07001475 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1476 sizeof(struct hlist_head),
1477 GFP_KERNEL);
1478 if (!tc_u_common_hash)
1479 return -ENOMEM;
1480
1481 for (i = 0; i < U32_HASH_SIZE; i++)
1482 INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1483
1484 ret = register_tcf_proto_ops(&cls_u32_ops);
1485 if (ret)
1486 kvfree(tc_u_common_hash);
1487 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488}
1489
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001490static void __exit exit_u32(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491{
1492 unregister_tcf_proto_ops(&cls_u32_ops);
WANG Cong3cd904e2017-08-24 16:51:30 -07001493 kvfree(tc_u_common_hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494}
1495
1496module_init(init_u32)
1497module_exit(exit_u32)
1498MODULE_LICENSE("GPL");