blob: 8cffe5a27007204d9dbd6fc7c17eabc75f599aca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
14 *
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
19 *
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
22 *
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
26 *
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
29 *
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31 */
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/types.h>
36#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/errno.h>
John Fastabend1ce87722014-09-12 20:09:16 -070039#include <linux/percpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/skbuff.h>
Cong Wang7801db82014-07-17 17:34:53 -070042#include <linux/bitmap.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070043#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/act_api.h>
45#include <net/pkt_cls.h>
46
Eric Dumazetcc7ec452011-01-19 19:26:56 +000047struct tc_u_knode {
John Fastabend1ce87722014-09-12 20:09:16 -070048 struct tc_u_knode __rcu *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 u32 handle;
John Fastabend1ce87722014-09-12 20:09:16 -070050 struct tc_u_hnode __rcu *ht_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 struct tcf_exts exts;
52#ifdef CONFIG_NET_CLS_IND
WANG Cong2519a602014-01-09 16:14:02 -080053 int ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#endif
55 u8 fshift;
56 struct tcf_result res;
John Fastabend1ce87722014-09-12 20:09:16 -070057 struct tc_u_hnode __rcu *ht_down;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#ifdef CONFIG_CLS_U32_PERF
John Fastabend459d5f62014-09-12 20:08:47 -070059 struct tc_u32_pcnt __percpu *pf;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#endif
61#ifdef CONFIG_CLS_U32_MARK
John Fastabend459d5f62014-09-12 20:08:47 -070062 u32 val;
63 u32 mask;
64 u32 __percpu *pcpu_success;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#endif
John Fastabend1ce87722014-09-12 20:09:16 -070066 struct tcf_proto *tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 struct tc_u32_sel sel;
John Fastabend1ce87722014-09-12 20:09:16 -070068 struct rcu_head rcu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069};
70
Eric Dumazetcc7ec452011-01-19 19:26:56 +000071struct tc_u_hnode {
John Fastabend1ce87722014-09-12 20:09:16 -070072 struct tc_u_hnode __rcu *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 u32 handle;
74 u32 prio;
75 struct tc_u_common *tp_c;
76 int refcnt;
Eric Dumazetcc7ec452011-01-19 19:26:56 +000077 unsigned int divisor;
John Fastabend1ce87722014-09-12 20:09:16 -070078 struct tc_u_knode __rcu *ht[1];
79 struct rcu_head rcu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080};
81
Eric Dumazetcc7ec452011-01-19 19:26:56 +000082struct tc_u_common {
John Fastabend1ce87722014-09-12 20:09:16 -070083 struct tc_u_hnode __rcu *hlist;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 struct Qdisc *q;
85 int refcnt;
86 u32 hgenerator;
John Fastabend1ce87722014-09-12 20:09:16 -070087 struct rcu_head rcu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088};
89
Eric Dumazetcc7ec452011-01-19 19:26:56 +000090static inline unsigned int u32_hash_fold(__be32 key,
91 const struct tc_u32_sel *sel,
92 u8 fshift)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Eric Dumazetcc7ec452011-01-19 19:26:56 +000094 unsigned int h = ntohl(key & sel->hmask) >> fshift;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96 return h;
97}
98
Eric Dumazetdc7f9f62011-07-05 23:25:42 +000099static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100{
101 struct {
102 struct tc_u_knode *knode;
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700103 unsigned int off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 } stack[TC_U32_MAXDEPTH];
105
John Fastabend1ce87722014-09-12 20:09:16 -0700106 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700107 unsigned int off = skb_network_offset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 struct tc_u_knode *n;
109 int sdepth = 0;
110 int off2 = 0;
111 int sel = 0;
112#ifdef CONFIG_CLS_U32_PERF
113 int j;
114#endif
115 int i, r;
116
117next_ht:
John Fastabend1ce87722014-09-12 20:09:16 -0700118 n = rcu_dereference_bh(ht->ht[sel]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120next_knode:
121 if (n) {
122 struct tc_u32_key *key = n->sel.keys;
123
124#ifdef CONFIG_CLS_U32_PERF
John Fastabend459d5f62014-09-12 20:08:47 -0700125 __this_cpu_inc(n->pf->rcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 j = 0;
127#endif
128
129#ifdef CONFIG_CLS_U32_MARK
John Fastabend459d5f62014-09-12 20:08:47 -0700130 if ((skb->mark & n->mask) != n->val) {
John Fastabend1ce87722014-09-12 20:09:16 -0700131 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 goto next_knode;
133 } else {
John Fastabend459d5f62014-09-12 20:08:47 -0700134 __this_cpu_inc(*n->pcpu_success);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 }
136#endif
137
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000138 for (i = n->sel.nkeys; i > 0; i--, key++) {
stephen hemminger66d50d22010-08-02 13:44:13 +0000139 int toff = off + key->off + (off2 & key->offmask);
stephen hemminger86fce3b2011-02-20 16:14:23 +0000140 __be32 *data, hdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Dan Carpenter4e18b3e2010-10-04 02:28:36 +0000142 if (skb_headroom(skb) + toff > INT_MAX)
stephen hemminger66d50d22010-08-02 13:44:13 +0000143 goto out;
144
stephen hemminger86fce3b2011-02-20 16:14:23 +0000145 data = skb_header_pointer(skb, toff, 4, &hdata);
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700146 if (!data)
147 goto out;
148 if ((*data ^ key->val) & key->mask) {
John Fastabend1ce87722014-09-12 20:09:16 -0700149 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 goto next_knode;
151 }
152#ifdef CONFIG_CLS_U32_PERF
John Fastabend459d5f62014-09-12 20:08:47 -0700153 __this_cpu_inc(n->pf->kcnts[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 j++;
155#endif
156 }
John Fastabend1ce87722014-09-12 20:09:16 -0700157
158 ht = rcu_dereference_bh(n->ht_down);
159 if (!ht) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160check_terminal:
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000161 if (n->sel.flags & TC_U32_TERMINAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 *res = n->res;
164#ifdef CONFIG_NET_CLS_IND
WANG Cong2519a602014-01-09 16:14:02 -0800165 if (!tcf_match_indev(skb, n->ifindex)) {
John Fastabend1ce87722014-09-12 20:09:16 -0700166 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 goto next_knode;
168 }
169#endif
170#ifdef CONFIG_CLS_U32_PERF
John Fastabend459d5f62014-09-12 20:08:47 -0700171 __this_cpu_inc(n->pf->rhit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172#endif
173 r = tcf_exts_exec(skb, &n->exts, res);
174 if (r < 0) {
John Fastabend1ce87722014-09-12 20:09:16 -0700175 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 goto next_knode;
177 }
178
179 return r;
180 }
John Fastabend1ce87722014-09-12 20:09:16 -0700181 n = rcu_dereference_bh(n->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 goto next_knode;
183 }
184
185 /* PUSH */
186 if (sdepth >= TC_U32_MAXDEPTH)
187 goto deadloop;
188 stack[sdepth].knode = n;
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700189 stack[sdepth].off = off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 sdepth++;
191
John Fastabend1ce87722014-09-12 20:09:16 -0700192 ht = rcu_dereference_bh(n->ht_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 sel = 0;
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700194 if (ht->divisor) {
stephen hemminger86fce3b2011-02-20 16:14:23 +0000195 __be32 *data, hdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700197 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
stephen hemminger86fce3b2011-02-20 16:14:23 +0000198 &hdata);
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700199 if (!data)
200 goto out;
201 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
202 n->fshift);
203 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000204 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 goto next_ht;
206
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000207 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 off2 = n->sel.off + 3;
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700209 if (n->sel.flags & TC_U32_VAROFFSET) {
stephen hemminger86fce3b2011-02-20 16:14:23 +0000210 __be16 *data, hdata;
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700211
212 data = skb_header_pointer(skb,
213 off + n->sel.offoff,
stephen hemminger86fce3b2011-02-20 16:14:23 +0000214 2, &hdata);
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700215 if (!data)
216 goto out;
217 off2 += ntohs(n->sel.offmask & *data) >>
218 n->sel.offshift;
219 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 off2 &= ~3;
221 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000222 if (n->sel.flags & TC_U32_EAT) {
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700223 off += off2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 off2 = 0;
225 }
226
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700227 if (off < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 goto next_ht;
229 }
230
231 /* POP */
232 if (sdepth--) {
233 n = stack[sdepth].knode;
John Fastabend1ce87722014-09-12 20:09:16 -0700234 ht = rcu_dereference_bh(n->ht_up);
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700235 off = stack[sdepth].off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 goto check_terminal;
237 }
Changli Gaofbc2e7d2010-06-02 07:32:42 -0700238out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 return -1;
240
241deadloop:
Joe Perchese87cc472012-05-13 21:56:26 +0000242 net_warn_ratelimited("cls_u32: dead loop\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 return -1;
244}
245
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000246static struct tc_u_hnode *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
248{
249 struct tc_u_hnode *ht;
250
John Fastabend1ce87722014-09-12 20:09:16 -0700251 for (ht = rtnl_dereference(tp_c->hlist);
252 ht;
253 ht = rtnl_dereference(ht->next))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 if (ht->handle == handle)
255 break;
256
257 return ht;
258}
259
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000260static struct tc_u_knode *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
262{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000263 unsigned int sel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 struct tc_u_knode *n = NULL;
265
266 sel = TC_U32_HASH(handle);
267 if (sel > ht->divisor)
268 goto out;
269
John Fastabend1ce87722014-09-12 20:09:16 -0700270 for (n = rtnl_dereference(ht->ht[sel]);
271 n;
272 n = rtnl_dereference(n->next))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 if (n->handle == handle)
274 break;
275out:
276 return n;
277}
278
279
280static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
281{
282 struct tc_u_hnode *ht;
283 struct tc_u_common *tp_c = tp->data;
284
285 if (TC_U32_HTID(handle) == TC_U32_ROOT)
John Fastabend1ce87722014-09-12 20:09:16 -0700286 ht = rtnl_dereference(tp->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 else
288 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
289
290 if (!ht)
291 return 0;
292
293 if (TC_U32_KEY(handle) == 0)
294 return (unsigned long)ht;
295
296 return (unsigned long)u32_lookup_key(ht, handle);
297}
298
299static void u32_put(struct tcf_proto *tp, unsigned long f)
300{
301}
302
303static u32 gen_new_htid(struct tc_u_common *tp_c)
304{
305 int i = 0x800;
306
John Fastabend1ce87722014-09-12 20:09:16 -0700307 /* hgenerator only used inside rtnl lock it is safe to increment
308 * without read _copy_ update semantics
309 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 do {
311 if (++tp_c->hgenerator == 0x7FF)
312 tp_c->hgenerator = 1;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000313 } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
315 return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
316}
317
318static int u32_init(struct tcf_proto *tp)
319{
320 struct tc_u_hnode *root_ht;
321 struct tc_u_common *tp_c;
322
David S. Miller72b25a92008-07-18 20:54:17 -0700323 tp_c = tp->q->u32_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700325 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 if (root_ht == NULL)
327 return -ENOBUFS;
328
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 root_ht->divisor = 0;
330 root_ht->refcnt++;
331 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
332 root_ht->prio = tp->prio;
333
334 if (tp_c == NULL) {
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700335 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 if (tp_c == NULL) {
337 kfree(root_ht);
338 return -ENOBUFS;
339 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 tp_c->q = tp->q;
David S. Miller72b25a92008-07-18 20:54:17 -0700341 tp->q->u32_node = tp_c;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 }
343
344 tp_c->refcnt++;
John Fastabend1ce87722014-09-12 20:09:16 -0700345 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
346 rcu_assign_pointer(tp_c->hlist, root_ht);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 root_ht->tp_c = tp_c;
348
John Fastabend1ce87722014-09-12 20:09:16 -0700349 rcu_assign_pointer(tp->root, root_ht);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 tp->data = tp_c;
351 return 0;
352}
353
354static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
355{
356 tcf_unbind_filter(tp, &n->res);
357 tcf_exts_destroy(tp, &n->exts);
358 if (n->ht_down)
359 n->ht_down->refcnt--;
360#ifdef CONFIG_CLS_U32_PERF
John Fastabend459d5f62014-09-12 20:08:47 -0700361 free_percpu(n->pf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362#endif
363 kfree(n);
364 return 0;
365}
366
John Fastabend1ce87722014-09-12 20:09:16 -0700367static void u32_delete_key_rcu(struct rcu_head *rcu)
368{
369 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
370
371 u32_destroy_key(key->tp, key);
372}
373
Yang Yingliang82d567c2013-12-10 20:55:31 +0800374static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
John Fastabend1ce87722014-09-12 20:09:16 -0700376 struct tc_u_knode __rcu **kp;
377 struct tc_u_knode *pkp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 struct tc_u_hnode *ht = key->ht_up;
379
380 if (ht) {
John Fastabend1ce87722014-09-12 20:09:16 -0700381 kp = &ht->ht[TC_U32_HASH(key->handle)];
382 for (pkp = rtnl_dereference(*kp); pkp;
383 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
384 if (pkp == key) {
385 RCU_INIT_POINTER(*kp, key->next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
John Fastabend1ce87722014-09-12 20:09:16 -0700387 call_rcu(&key->rcu, u32_delete_key_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 return 0;
389 }
390 }
391 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700392 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 return 0;
394}
395
John Fastabend1ce87722014-09-12 20:09:16 -0700396static void u32_clear_hnode(struct tc_u_hnode *ht)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397{
398 struct tc_u_knode *n;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000399 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000401 for (h = 0; h <= ht->divisor; h++) {
John Fastabend1ce87722014-09-12 20:09:16 -0700402 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
403 RCU_INIT_POINTER(ht->ht[h],
404 rtnl_dereference(n->next));
405 call_rcu(&n->rcu, u32_delete_key_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 }
407 }
408}
409
410static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
411{
412 struct tc_u_common *tp_c = tp->data;
John Fastabend1ce87722014-09-12 20:09:16 -0700413 struct tc_u_hnode __rcu **hn;
414 struct tc_u_hnode *phn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700416 WARN_ON(ht->refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
John Fastabend1ce87722014-09-12 20:09:16 -0700418 u32_clear_hnode(ht);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
John Fastabend1ce87722014-09-12 20:09:16 -0700420 hn = &tp_c->hlist;
421 for (phn = rtnl_dereference(*hn);
422 phn;
423 hn = &phn->next, phn = rtnl_dereference(*hn)) {
424 if (phn == ht) {
425 RCU_INIT_POINTER(*hn, ht->next);
426 kfree_rcu(ht, rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 return 0;
428 }
429 }
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 return -ENOENT;
432}
433
434static void u32_destroy(struct tcf_proto *tp)
435{
436 struct tc_u_common *tp_c = tp->data;
John Fastabend1ce87722014-09-12 20:09:16 -0700437 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700439 WARN_ON(root_ht == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
441 if (root_ht && --root_ht->refcnt == 0)
442 u32_destroy_hnode(tp, root_ht);
443
444 if (--tp_c->refcnt == 0) {
445 struct tc_u_hnode *ht;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
David S. Miller72b25a92008-07-18 20:54:17 -0700447 tp->q->u32_node = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
John Fastabend1ce87722014-09-12 20:09:16 -0700449 for (ht = rtnl_dereference(tp_c->hlist);
450 ht;
451 ht = rtnl_dereference(ht->next)) {
Jarek Poplawskie56cfad2008-04-12 18:37:13 -0700452 ht->refcnt--;
John Fastabend1ce87722014-09-12 20:09:16 -0700453 u32_clear_hnode(ht);
Jarek Poplawskie56cfad2008-04-12 18:37:13 -0700454 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
John Fastabend1ce87722014-09-12 20:09:16 -0700456 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
457 RCU_INIT_POINTER(tp_c->hlist, ht->next);
458 kfree_rcu(ht, rcu);
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700459 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
461 kfree(tp_c);
462 }
463
464 tp->data = NULL;
465}
466
467static int u32_delete(struct tcf_proto *tp, unsigned long arg)
468{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000469 struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
John Fastabend1ce87722014-09-12 20:09:16 -0700470 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472 if (ht == NULL)
473 return 0;
474
475 if (TC_U32_KEY(ht->handle))
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000476 return u32_delete_key(tp, (struct tc_u_knode *)ht);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
John Fastabend1ce87722014-09-12 20:09:16 -0700478 if (root_ht == ht)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 return -EINVAL;
480
Jarek Poplawskie56cfad2008-04-12 18:37:13 -0700481 if (ht->refcnt == 1) {
482 ht->refcnt--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 u32_destroy_hnode(tp, ht);
Jarek Poplawskie56cfad2008-04-12 18:37:13 -0700484 } else {
485 return -EBUSY;
486 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
488 return 0;
489}
490
Cong Wang7801db82014-07-17 17:34:53 -0700491#define NR_U32_NODE (1<<12)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
493{
494 struct tc_u_knode *n;
Cong Wang7801db82014-07-17 17:34:53 -0700495 unsigned long i;
496 unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
497 GFP_KERNEL);
498 if (!bitmap)
499 return handle | 0xFFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
John Fastabend1ce87722014-09-12 20:09:16 -0700501 for (n = rtnl_dereference(ht->ht[TC_U32_HASH(handle)]);
502 n;
503 n = rtnl_dereference(n->next))
Cong Wang7801db82014-07-17 17:34:53 -0700504 set_bit(TC_U32_NODE(n->handle), bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Cong Wang7801db82014-07-17 17:34:53 -0700506 i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
507 if (i >= NR_U32_NODE)
508 i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
509
510 kfree(bitmap);
511 return handle | (i >= NR_U32_NODE ? 0xFFF : i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512}
513
Patrick McHardy6fa8c012008-01-23 20:36:12 -0800514static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
515 [TCA_U32_CLASSID] = { .type = NLA_U32 },
516 [TCA_U32_HASH] = { .type = NLA_U32 },
517 [TCA_U32_LINK] = { .type = NLA_U32 },
518 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
519 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
520 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
521 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
522};
523
Benjamin LaHaisec1b52732013-01-14 05:15:39 +0000524static int u32_set_parms(struct net *net, struct tcf_proto *tp,
525 unsigned long base, struct tc_u_hnode *ht,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800526 struct tc_u_knode *n, struct nlattr **tb,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700527 struct nlattr *est, bool ovr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528{
529 int err;
530 struct tcf_exts e;
531
WANG Cong5da57f42013-12-15 20:15:07 -0800532 tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700533 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 if (err < 0)
535 return err;
536
537 err = -EINVAL;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800538 if (tb[TCA_U32_LINK]) {
Patrick McHardy1587bac2008-01-23 20:35:03 -0800539 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
Patrick McHardy47a1a1d2008-11-19 08:03:09 +0000540 struct tc_u_hnode *ht_down = NULL, *ht_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
542 if (TC_U32_KEY(handle))
543 goto errout;
544
545 if (handle) {
546 ht_down = u32_lookup_ht(ht->tp_c, handle);
547
548 if (ht_down == NULL)
549 goto errout;
550 ht_down->refcnt++;
551 }
552
John Fastabend1ce87722014-09-12 20:09:16 -0700553 ht_old = rtnl_dereference(n->ht_down);
554 rcu_assign_pointer(n->ht_down, ht_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
Patrick McHardy47a1a1d2008-11-19 08:03:09 +0000556 if (ht_old)
557 ht_old->refcnt--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 }
Patrick McHardyadd93b62008-01-22 22:11:33 -0800559 if (tb[TCA_U32_CLASSID]) {
Patrick McHardy1587bac2008-01-23 20:35:03 -0800560 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 tcf_bind_filter(tp, &n->res, base);
562 }
563
564#ifdef CONFIG_NET_CLS_IND
Patrick McHardyadd93b62008-01-22 22:11:33 -0800565 if (tb[TCA_U32_INDEV]) {
WANG Cong2519a602014-01-09 16:14:02 -0800566 int ret;
567 ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
568 if (ret < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 goto errout;
WANG Cong2519a602014-01-09 16:14:02 -0800570 n->ifindex = ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 }
572#endif
573 tcf_exts_change(tp, &n->exts, &e);
574
575 return 0;
576errout:
577 tcf_exts_destroy(tp, &e);
578 return err;
579}
580
Benjamin LaHaisec1b52732013-01-14 05:15:39 +0000581static int u32_change(struct net *net, struct sk_buff *in_skb,
Eric W. Biedermanaf4c6642012-05-25 13:42:45 -0600582 struct tcf_proto *tp, unsigned long base, u32 handle,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800583 struct nlattr **tca,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700584 unsigned long *arg, bool ovr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585{
586 struct tc_u_common *tp_c = tp->data;
587 struct tc_u_hnode *ht;
588 struct tc_u_knode *n;
589 struct tc_u32_sel *s;
Patrick McHardyadd93b62008-01-22 22:11:33 -0800590 struct nlattr *opt = tca[TCA_OPTIONS];
591 struct nlattr *tb[TCA_U32_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 u32 htid;
593 int err;
John Fastabend459d5f62014-09-12 20:08:47 -0700594#ifdef CONFIG_CLS_U32_PERF
595 size_t size;
596#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
598 if (opt == NULL)
599 return handle ? -EINVAL : 0;
600
Patrick McHardy6fa8c012008-01-23 20:36:12 -0800601 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
Patrick McHardycee63722008-01-23 20:33:32 -0800602 if (err < 0)
603 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000605 n = (struct tc_u_knode *)*arg;
606 if (n) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 if (TC_U32_KEY(n->handle) == 0)
608 return -EINVAL;
609
Benjamin LaHaisec1b52732013-01-14 05:15:39 +0000610 return u32_set_parms(net, tp, base, n->ht_up, n, tb,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700611 tca[TCA_RATE], ovr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 }
613
Patrick McHardyadd93b62008-01-22 22:11:33 -0800614 if (tb[TCA_U32_DIVISOR]) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000615 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617 if (--divisor > 0x100)
618 return -EINVAL;
619 if (TC_U32_KEY(handle))
620 return -EINVAL;
621 if (handle == 0) {
622 handle = gen_new_htid(tp->data);
623 if (handle == 0)
624 return -ENOMEM;
625 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000626 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 if (ht == NULL)
628 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 ht->tp_c = tp_c;
Jarek Poplawskie56cfad2008-04-12 18:37:13 -0700630 ht->refcnt = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 ht->divisor = divisor;
632 ht->handle = handle;
633 ht->prio = tp->prio;
John Fastabend1ce87722014-09-12 20:09:16 -0700634 RCU_INIT_POINTER(ht->next, tp_c->hlist);
635 rcu_assign_pointer(tp_c->hlist, ht);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 *arg = (unsigned long)ht;
637 return 0;
638 }
639
Patrick McHardyadd93b62008-01-22 22:11:33 -0800640 if (tb[TCA_U32_HASH]) {
Patrick McHardy1587bac2008-01-23 20:35:03 -0800641 htid = nla_get_u32(tb[TCA_U32_HASH]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
John Fastabend1ce87722014-09-12 20:09:16 -0700643 ht = rtnl_dereference(tp->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 htid = ht->handle;
645 } else {
646 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
647 if (ht == NULL)
648 return -EINVAL;
649 }
650 } else {
John Fastabend1ce87722014-09-12 20:09:16 -0700651 ht = rtnl_dereference(tp->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 htid = ht->handle;
653 }
654
655 if (ht->divisor < TC_U32_HASH(htid))
656 return -EINVAL;
657
658 if (handle) {
659 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
660 return -EINVAL;
661 handle = htid | TC_U32_NODE(handle);
662 } else
663 handle = gen_new_kid(ht, htid);
664
Patrick McHardy6fa8c012008-01-23 20:36:12 -0800665 if (tb[TCA_U32_SEL] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 return -EINVAL;
667
Patrick McHardyadd93b62008-01-22 22:11:33 -0800668 s = nla_data(tb[TCA_U32_SEL]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700670 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 if (n == NULL)
672 return -ENOBUFS;
673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674#ifdef CONFIG_CLS_U32_PERF
John Fastabend459d5f62014-09-12 20:08:47 -0700675 size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
676 n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
677 if (!n->pf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 kfree(n);
679 return -ENOBUFS;
680 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681#endif
682
683 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
684 n->ht_up = ht;
685 n->handle = handle;
Radu Rendecb2268012007-11-10 21:54:50 -0800686 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
WANG Cong5da57f42013-12-15 20:15:07 -0800687 tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
John Fastabend1ce87722014-09-12 20:09:16 -0700688 n->tp = tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
690#ifdef CONFIG_CLS_U32_MARK
John Fastabend459d5f62014-09-12 20:08:47 -0700691 n->pcpu_success = alloc_percpu(u32);
692
Patrick McHardyadd93b62008-01-22 22:11:33 -0800693 if (tb[TCA_U32_MARK]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 struct tc_u32_mark *mark;
695
Patrick McHardyadd93b62008-01-22 22:11:33 -0800696 mark = nla_data(tb[TCA_U32_MARK]);
John Fastabend459d5f62014-09-12 20:08:47 -0700697 n->val = mark->val;
698 n->mask = mark->mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 }
700#endif
701
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700702 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 if (err == 0) {
John Fastabend1ce87722014-09-12 20:09:16 -0700704 struct tc_u_knode __rcu **ins;
705 struct tc_u_knode *pins;
706
707 ins = &ht->ht[TC_U32_HASH(handle)];
708 for (pins = rtnl_dereference(*ins); pins;
709 ins = &pins->next, pins = rtnl_dereference(*ins))
710 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 break;
712
John Fastabend1ce87722014-09-12 20:09:16 -0700713 RCU_INIT_POINTER(n->next, pins);
714 rcu_assign_pointer(*ins, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715
716 *arg = (unsigned long)n;
717 return 0;
718 }
719#ifdef CONFIG_CLS_U32_PERF
John Fastabend1ce87722014-09-12 20:09:16 -0700720 free_percpu(n->pf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721#endif
722 kfree(n);
723 return err;
724}
725
726static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
727{
728 struct tc_u_common *tp_c = tp->data;
729 struct tc_u_hnode *ht;
730 struct tc_u_knode *n;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000731 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
733 if (arg->stop)
734 return;
735
John Fastabend1ce87722014-09-12 20:09:16 -0700736 for (ht = rtnl_dereference(tp_c->hlist);
737 ht;
738 ht = rtnl_dereference(ht->next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 if (ht->prio != tp->prio)
740 continue;
741 if (arg->count >= arg->skip) {
742 if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
743 arg->stop = 1;
744 return;
745 }
746 }
747 arg->count++;
748 for (h = 0; h <= ht->divisor; h++) {
John Fastabend1ce87722014-09-12 20:09:16 -0700749 for (n = rtnl_dereference(ht->ht[h]);
750 n;
751 n = rtnl_dereference(n->next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 if (arg->count < arg->skip) {
753 arg->count++;
754 continue;
755 }
756 if (arg->fn(tp, (unsigned long)n, arg) < 0) {
757 arg->stop = 1;
758 return;
759 }
760 arg->count++;
761 }
762 }
763 }
764}
765
WANG Cong832d1d52014-01-09 16:14:01 -0800766static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 struct sk_buff *skb, struct tcmsg *t)
768{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000769 struct tc_u_knode *n = (struct tc_u_knode *)fh;
John Fastabend1ce87722014-09-12 20:09:16 -0700770 struct tc_u_hnode *ht_up, *ht_down;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800771 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772
773 if (n == NULL)
774 return skb->len;
775
776 t->tcm_handle = n->handle;
777
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800778 nest = nla_nest_start(skb, TCA_OPTIONS);
779 if (nest == NULL)
780 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
782 if (TC_U32_KEY(n->handle) == 0) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000783 struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
784 u32 divisor = ht->divisor + 1;
785
David S. Miller1b34ec42012-03-29 05:11:39 -0400786 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
787 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 } else {
John Fastabend459d5f62014-09-12 20:08:47 -0700789#ifdef CONFIG_CLS_U32_PERF
790 struct tc_u32_pcnt *gpf;
John Fastabend459d5f62014-09-12 20:08:47 -0700791 int cpu;
John Fastabend80aab732014-09-15 23:30:26 -0700792#endif
John Fastabend459d5f62014-09-12 20:08:47 -0700793
David S. Miller1b34ec42012-03-29 05:11:39 -0400794 if (nla_put(skb, TCA_U32_SEL,
795 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
796 &n->sel))
797 goto nla_put_failure;
John Fastabend1ce87722014-09-12 20:09:16 -0700798
799 ht_up = rtnl_dereference(n->ht_up);
800 if (ht_up) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 u32 htid = n->handle & 0xFFFFF000;
David S. Miller1b34ec42012-03-29 05:11:39 -0400802 if (nla_put_u32(skb, TCA_U32_HASH, htid))
803 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 }
David S. Miller1b34ec42012-03-29 05:11:39 -0400805 if (n->res.classid &&
806 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
807 goto nla_put_failure;
John Fastabend1ce87722014-09-12 20:09:16 -0700808
809 ht_down = rtnl_dereference(n->ht_down);
810 if (ht_down &&
811 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
David S. Miller1b34ec42012-03-29 05:11:39 -0400812 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
814#ifdef CONFIG_CLS_U32_MARK
John Fastabend459d5f62014-09-12 20:08:47 -0700815 if ((n->val || n->mask)) {
816 struct tc_u32_mark mark = {.val = n->val,
817 .mask = n->mask,
818 .success = 0};
John Fastabend80aab732014-09-15 23:30:26 -0700819 int cpum;
John Fastabend459d5f62014-09-12 20:08:47 -0700820
John Fastabend80aab732014-09-15 23:30:26 -0700821 for_each_possible_cpu(cpum) {
822 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
John Fastabend459d5f62014-09-12 20:08:47 -0700823
824 mark.success += cnt;
825 }
826
827 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
828 goto nla_put_failure;
829 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830#endif
831
WANG Cong5da57f42013-12-15 20:15:07 -0800832 if (tcf_exts_dump(skb, &n->exts) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -0800833 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
835#ifdef CONFIG_NET_CLS_IND
WANG Cong2519a602014-01-09 16:14:02 -0800836 if (n->ifindex) {
837 struct net_device *dev;
838 dev = __dev_get_by_index(net, n->ifindex);
839 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
840 goto nla_put_failure;
841 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842#endif
843#ifdef CONFIG_CLS_U32_PERF
John Fastabend459d5f62014-09-12 20:08:47 -0700844 gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
845 n->sel.nkeys * sizeof(u64),
846 GFP_KERNEL);
847 if (!gpf)
848 goto nla_put_failure;
849
850 for_each_possible_cpu(cpu) {
851 int i;
852 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
853
854 gpf->rcnt += pf->rcnt;
855 gpf->rhit += pf->rhit;
856 for (i = 0; i < n->sel.nkeys; i++)
857 gpf->kcnts[i] += pf->kcnts[i];
858 }
859
David S. Miller1b34ec42012-03-29 05:11:39 -0400860 if (nla_put(skb, TCA_U32_PCNT,
861 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
John Fastabend459d5f62014-09-12 20:08:47 -0700862 gpf)) {
863 kfree(gpf);
David S. Miller1b34ec42012-03-29 05:11:39 -0400864 goto nla_put_failure;
John Fastabend459d5f62014-09-12 20:08:47 -0700865 }
866 kfree(gpf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867#endif
868 }
869
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800870 nla_nest_end(skb, nest);
871
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 if (TC_U32_KEY(n->handle))
WANG Cong5da57f42013-12-15 20:15:07 -0800873 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
Patrick McHardyadd93b62008-01-22 22:11:33 -0800874 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 return skb->len;
876
Patrick McHardyadd93b62008-01-22 22:11:33 -0800877nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -0800878 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 return -1;
880}
881
Patrick McHardy2eb9d752008-01-22 22:10:42 -0800882static struct tcf_proto_ops cls_u32_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 .kind = "u32",
884 .classify = u32_classify,
885 .init = u32_init,
886 .destroy = u32_destroy,
887 .get = u32_get,
888 .put = u32_put,
889 .change = u32_change,
890 .delete = u32_delete,
891 .walk = u32_walk,
892 .dump = u32_dump,
893 .owner = THIS_MODULE,
894};
895
896static int __init init_u32(void)
897{
stephen hemminger6ff9c362010-05-12 06:37:05 +0000898 pr_info("u32 classifier\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899#ifdef CONFIG_CLS_U32_PERF
stephen hemminger6ff9c362010-05-12 06:37:05 +0000900 pr_info(" Performance counters on\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902#ifdef CONFIG_NET_CLS_IND
stephen hemminger6ff9c362010-05-12 06:37:05 +0000903 pr_info(" input device check on\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904#endif
905#ifdef CONFIG_NET_CLS_ACT
stephen hemminger6ff9c362010-05-12 06:37:05 +0000906 pr_info(" Actions configured\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907#endif
908 return register_tcf_proto_ops(&cls_u32_ops);
909}
910
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900911static void __exit exit_u32(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912{
913 unregister_tcf_proto_ops(&cls_u32_ops);
914}
915
916module_init(init_u32)
917module_exit(exit_u32)
918MODULE_LICENSE("GPL");