blob: 00dd5c4c1d0a7c2dd7eed5beecdc400c23d5fa7c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __NET_PKT_CLS_H
2#define __NET_PKT_CLS_H
3
4#include <linux/pkt_cls.h>
5#include <net/sch_generic.h>
6#include <net/act_api.h>
7
8/* Basic packet classifier frontend definitions. */
9
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000010struct tcf_walker {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 int stop;
12 int skip;
13 int count;
14 int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *);
15};
16
Joe Perches5c152572013-07-30 22:47:13 -070017int register_tcf_proto_ops(struct tcf_proto_ops *ops);
18int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20static inline unsigned long
21__cls_set_class(unsigned long *clp, unsigned long cl)
22{
WANG Conga0efb802014-09-30 16:07:24 -070023 return xchg(clp, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070024}
25
26static inline unsigned long
27cls_set_class(struct tcf_proto *tp, unsigned long *clp,
28 unsigned long cl)
29{
30 unsigned long old_cl;
31
32 tcf_tree_lock(tp);
33 old_cl = __cls_set_class(clp, cl);
34 tcf_tree_unlock(tp);
35
36 return old_cl;
37}
38
39static inline void
40tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
41{
42 unsigned long cl;
43
44 cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid);
45 cl = cls_set_class(tp, &r->class, cl);
46 if (cl)
47 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
48}
49
50static inline void
51tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
52{
53 unsigned long cl;
54
55 if ((cl = __cls_set_class(&r->class, 0)) != 0)
56 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
57}
58
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000059struct tcf_exts {
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -080061 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
62 struct list_head actions;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#endif
WANG Cong5da57f42013-12-15 20:15:07 -080064 /* Map to export classifier specific extension TLV types to the
65 * generic extensions API. Unsupported extensions must be set to 0.
66 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 int action;
68 int police;
69};
70
WANG Cong5da57f42013-12-15 20:15:07 -080071static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police)
WANG Cong33be6272013-12-15 20:15:05 -080072{
73#ifdef CONFIG_NET_CLS_ACT
WANG Cong5da57f42013-12-15 20:15:07 -080074 exts->type = 0;
WANG Cong33be6272013-12-15 20:15:05 -080075 INIT_LIST_HEAD(&exts->actions);
76#endif
WANG Cong5da57f42013-12-15 20:15:07 -080077 exts->action = action;
78 exts->police = police;
WANG Cong33be6272013-12-15 20:15:05 -080079}
80
Linus Torvalds1da177e2005-04-16 15:20:36 -070081/**
82 * tcf_exts_is_predicative - check if a predicative extension is present
83 * @exts: tc filter extensions handle
84 *
85 * Returns 1 if a predicative extension is present, i.e. an extension which
86 * might cause further actions and thus overrule the regular tcf_result.
87 */
88static inline int
89tcf_exts_is_predicative(struct tcf_exts *exts)
90{
91#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -080092 return !list_empty(&exts->actions);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#else
94 return 0;
95#endif
96}
97
98/**
99 * tcf_exts_is_available - check if at least one extension is present
100 * @exts: tc filter extensions handle
101 *
102 * Returns 1 if at least one extension is present.
103 */
104static inline int
105tcf_exts_is_available(struct tcf_exts *exts)
106{
107 /* All non-predicative extensions must be added here. */
108 return tcf_exts_is_predicative(exts);
109}
110
111/**
112 * tcf_exts_exec - execute tc filter extensions
113 * @skb: socket buffer
114 * @exts: tc filter extensions handle
115 * @res: desired result
116 *
117 * Executes all configured extensions. Returns 0 on a normal execution,
118 * a negative number if the filter must be considered unmatched or
119 * a positive action code (TC_ACT_*) which must be returned to the
120 * underlying layer.
121 */
122static inline int
123tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
124 struct tcf_result *res)
125{
126#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -0800127 if (!list_empty(&exts->actions))
128 return tcf_action_exec(skb, &exts->actions, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 return 0;
131}
132
WANG Cong2734437e2016-08-13 22:34:59 -0700133#ifdef CONFIG_NET_CLS_ACT
134
135#define tc_no_actions(_exts) \
136 (list_empty(&(_exts)->actions))
137
138#define tc_for_each_action(_a, _exts) \
139 list_for_each_entry(_a, &(_exts)->actions, list)
140
141#define tc_single_action(_exts) \
142 (list_is_singular(&(_exts)->actions))
143
144#else /* CONFIG_NET_CLS_ACT */
145
146#define tc_no_actions(_exts) true
147#define tc_for_each_action(_a, _exts) while ((void)(_a), 0)
148#define tc_single_action(_exts) false
149
150#endif /* CONFIG_NET_CLS_ACT */
151
Joe Perches5c152572013-07-30 22:47:13 -0700152int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
153 struct nlattr **tb, struct nlattr *rate_tlv,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700154 struct tcf_exts *exts, bool ovr);
WANG Cong18d02642014-09-25 10:26:37 -0700155void tcf_exts_destroy(struct tcf_exts *exts);
Joe Perches5c152572013-07-30 22:47:13 -0700156void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
157 struct tcf_exts *src);
WANG Cong5da57f42013-12-15 20:15:07 -0800158int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
159int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161/**
162 * struct tcf_pkt_info - packet information
163 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000164struct tcf_pkt_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 unsigned char * ptr;
166 int nexthdr;
167};
168
169#ifdef CONFIG_NET_EMATCH
170
171struct tcf_ematch_ops;
172
173/**
174 * struct tcf_ematch - extended match (ematch)
175 *
176 * @matchid: identifier to allow userspace to reidentify a match
177 * @flags: flags specifying attributes and the relation to other matches
178 * @ops: the operations lookup table of the corresponding ematch module
179 * @datalen: length of the ematch specific configuration data
180 * @data: ematch specific data
181 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000182struct tcf_ematch {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 struct tcf_ematch_ops * ops;
184 unsigned long data;
185 unsigned int datalen;
186 u16 matchid;
187 u16 flags;
John Fastabend82a470f2014-10-05 21:27:53 -0700188 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189};
190
191static inline int tcf_em_is_container(struct tcf_ematch *em)
192{
193 return !em->ops;
194}
195
196static inline int tcf_em_is_simple(struct tcf_ematch *em)
197{
198 return em->flags & TCF_EM_SIMPLE;
199}
200
201static inline int tcf_em_is_inverted(struct tcf_ematch *em)
202{
203 return em->flags & TCF_EM_INVERT;
204}
205
206static inline int tcf_em_last_match(struct tcf_ematch *em)
207{
208 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
209}
210
211static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
212{
213 if (tcf_em_last_match(em))
214 return 1;
215
216 if (result == 0 && em->flags & TCF_EM_REL_AND)
217 return 1;
218
219 if (result != 0 && em->flags & TCF_EM_REL_OR)
220 return 1;
221
222 return 0;
223}
224
225/**
226 * struct tcf_ematch_tree - ematch tree handle
227 *
228 * @hdr: ematch tree header supplied by userspace
229 * @matches: array of ematches
230 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000231struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 struct tcf_ematch_tree_hdr hdr;
233 struct tcf_ematch * matches;
234
235};
236
237/**
238 * struct tcf_ematch_ops - ematch module operations
239 *
240 * @kind: identifier (kind) of this ematch module
241 * @datalen: length of expected configuration data (optional)
242 * @change: called during validation (optional)
243 * @match: called during ematch tree evaluation, must return 1/0
244 * @destroy: called during destroyage (optional)
245 * @dump: called during dumping process (optional)
246 * @owner: owner, must be set to THIS_MODULE
247 * @link: link to previous/next ematch module (internal use)
248 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000249struct tcf_ematch_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 int kind;
251 int datalen;
John Fastabend82a470f2014-10-05 21:27:53 -0700252 int (*change)(struct net *net, void *,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 int, struct tcf_ematch *);
254 int (*match)(struct sk_buff *, struct tcf_ematch *,
255 struct tcf_pkt_info *);
John Fastabend82a470f2014-10-05 21:27:53 -0700256 void (*destroy)(struct tcf_ematch *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 int (*dump)(struct sk_buff *, struct tcf_ematch *);
258 struct module *owner;
259 struct list_head link;
260};
261
Joe Perches5c152572013-07-30 22:47:13 -0700262int tcf_em_register(struct tcf_ematch_ops *);
263void tcf_em_unregister(struct tcf_ematch_ops *);
264int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
265 struct tcf_ematch_tree *);
John Fastabend82a470f2014-10-05 21:27:53 -0700266void tcf_em_tree_destroy(struct tcf_ematch_tree *);
Joe Perches5c152572013-07-30 22:47:13 -0700267int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
268int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
269 struct tcf_pkt_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271/**
272 * tcf_em_tree_change - replace ematch tree of a running classifier
273 *
274 * @tp: classifier kind handle
275 * @dst: destination ematch tree variable
276 * @src: source ematch tree (temporary tree from tcf_em_tree_validate)
277 *
278 * This functions replaces the ematch tree in @dst with the ematch
279 * tree in @src. The classifier in charge of the ematch tree may be
280 * running.
281 */
282static inline void tcf_em_tree_change(struct tcf_proto *tp,
283 struct tcf_ematch_tree *dst,
284 struct tcf_ematch_tree *src)
285{
286 tcf_tree_lock(tp);
287 memcpy(dst, src, sizeof(*dst));
288 tcf_tree_unlock(tp);
289}
290
291/**
292 * tcf_em_tree_match - evaulate an ematch tree
293 *
294 * @skb: socket buffer of the packet in question
295 * @tree: ematch tree to be used for evaluation
296 * @info: packet information examined by classifier
297 *
298 * This function matches @skb against the ematch tree in @tree by going
299 * through all ematches respecting their logic relations returning
300 * as soon as the result is obvious.
301 *
302 * Returns 1 if the ematch tree as-one matches, no ematches are configured
303 * or ematch is not enabled in the kernel, otherwise 0 is returned.
304 */
305static inline int tcf_em_tree_match(struct sk_buff *skb,
306 struct tcf_ematch_tree *tree,
307 struct tcf_pkt_info *info)
308{
309 if (tree->hdr.nmatches)
310 return __tcf_em_tree_match(skb, tree, info);
311 else
312 return 1;
313}
314
Patrick McHardydb3d99c2007-07-11 19:46:26 -0700315#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
316
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317#else /* CONFIG_NET_EMATCH */
318
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000319struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320};
321
322#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
John Fastabend82a470f2014-10-05 21:27:53 -0700323#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324#define tcf_em_tree_dump(skb, t, tlv) (0)
325#define tcf_em_tree_change(tp, dst, src) do { } while(0)
326#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
327
328#endif /* CONFIG_NET_EMATCH */
329
330static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
331{
332 switch (layer) {
333 case TCF_LAYER_LINK:
334 return skb->data;
335 case TCF_LAYER_NETWORK:
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700336 return skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 case TCF_LAYER_TRANSPORT:
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -0700338 return skb_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 }
340
341 return NULL;
342}
343
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700344static inline int tcf_valid_offset(const struct sk_buff *skb,
345 const unsigned char *ptr, const int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
David S. Millerda521b22010-12-21 12:43:16 -0800347 return likely((ptr + len) <= skb_tail_pointer(skb) &&
348 ptr >= skb->head &&
349 (ptr <= (ptr + len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350}
351
352#ifdef CONFIG_NET_CLS_IND
Denis V. Lunev0eeb8ff2007-12-04 01:15:45 -0800353#include <net/net_namespace.h>
354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355static inline int
WANG Cong2519a602014-01-09 16:14:02 -0800356tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357{
WANG Cong2519a602014-01-09 16:14:02 -0800358 char indev[IFNAMSIZ];
Patrick McHardyc01003c2007-03-29 11:46:52 -0700359 struct net_device *dev;
360
WANG Cong2519a602014-01-09 16:14:02 -0800361 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
362 return -EINVAL;
363 dev = __dev_get_by_name(net, indev);
364 if (!dev)
365 return -ENODEV;
366 return dev->ifindex;
367}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
WANG Cong2519a602014-01-09 16:14:02 -0800369static inline bool
370tcf_match_indev(struct sk_buff *skb, int ifindex)
371{
372 if (!ifindex)
373 return true;
374 if (!skb->skb_iif)
375 return false;
376 return ifindex == skb->skb_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378#endif /* CONFIG_NET_CLS_IND */
379
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800380struct tc_cls_u32_knode {
381 struct tcf_exts *exts;
John Fastabende0148602016-02-17 14:59:30 -0800382 struct tc_u32_sel *sel;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800383 u32 handle;
384 u32 val;
385 u32 mask;
386 u32 link_handle;
John Fastabende0148602016-02-17 14:59:30 -0800387 u8 fshift;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800388};
389
390struct tc_cls_u32_hnode {
391 u32 handle;
392 u32 prio;
393 unsigned int divisor;
394};
395
396enum tc_clsu32_command {
397 TC_CLSU32_NEW_KNODE,
398 TC_CLSU32_REPLACE_KNODE,
399 TC_CLSU32_DELETE_KNODE,
400 TC_CLSU32_NEW_HNODE,
401 TC_CLSU32_REPLACE_HNODE,
402 TC_CLSU32_DELETE_HNODE,
403};
404
405struct tc_cls_u32_offload {
406 /* knode values */
407 enum tc_clsu32_command command;
408 union {
409 struct tc_cls_u32_knode knode;
410 struct tc_cls_u32_hnode hnode;
411 };
412};
413
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200414static inline bool tc_should_offload(const struct net_device *dev,
415 const struct tcf_proto *tp, u32 flags)
John Fastabend6843e7a2016-02-26 07:53:49 -0800416{
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200417 const struct Qdisc *sch = tp->q;
418 const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
419
John Fastabend2b6ab0d2016-02-26 07:54:13 -0800420 if (!(dev->features & NETIF_F_HW_TC))
421 return false;
John Fastabend9e8ce792016-02-26 07:54:39 -0800422 if (flags & TCA_CLS_FLAGS_SKIP_HW)
423 return false;
John Fastabend9e8ce792016-02-26 07:54:39 -0800424 if (!dev->netdev_ops->ndo_setup_tc)
425 return false;
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200426 if (cops && cops->tcf_cl_offload)
427 return cops->tcf_cl_offload(tp->classid);
John Fastabend9e8ce792016-02-26 07:54:39 -0800428
429 return true;
John Fastabend6843e7a2016-02-26 07:53:49 -0800430}
431
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700432static inline bool tc_skip_sw(u32 flags)
433{
434 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
435}
436
437/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
438static inline bool tc_flags_valid(u32 flags)
439{
440 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
441 return false;
442
443 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
444 return false;
445
446 return true;
447}
448
Amir Vadai5b33f482016-03-08 12:42:29 +0200449enum tc_fl_command {
450 TC_CLSFLOWER_REPLACE,
451 TC_CLSFLOWER_DESTROY,
Amir Vadai10cbc682016-05-13 12:55:37 +0000452 TC_CLSFLOWER_STATS,
Amir Vadai5b33f482016-03-08 12:42:29 +0200453};
454
455struct tc_cls_flower_offload {
456 enum tc_fl_command command;
Amir Vadai8208d212016-03-11 11:08:45 +0200457 unsigned long cookie;
Amir Vadai5b33f482016-03-08 12:42:29 +0200458 struct flow_dissector *dissector;
459 struct fl_flow_key *mask;
460 struct fl_flow_key *key;
461 struct tcf_exts *exts;
462};
463
Yotam Gigib87f7932016-07-21 12:03:12 +0200464enum tc_matchall_command {
465 TC_CLSMATCHALL_REPLACE,
466 TC_CLSMATCHALL_DESTROY,
467};
468
469struct tc_cls_matchall_offload {
470 enum tc_matchall_command command;
471 struct tcf_exts *exts;
472 unsigned long cookie;
473};
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475#endif