blob: ffaddf72108ea31a3071c10788a444c409aee042 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __NET_PKT_CLS_H
2#define __NET_PKT_CLS_H
3
4#include <linux/pkt_cls.h>
5#include <net/sch_generic.h>
6#include <net/act_api.h>
7
8/* Basic packet classifier frontend definitions. */
9
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000010struct tcf_walker {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 int stop;
12 int skip;
13 int count;
14 int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *);
15};
16
Joe Perches5c152572013-07-30 22:47:13 -070017int register_tcf_proto_ops(struct tcf_proto_ops *ops);
18int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Jiri Pirko8ae70032017-02-15 11:57:50 +010020#ifdef CONFIG_NET_CLS
WANG Cong367a8ce2017-05-23 09:42:37 -070021struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
22 bool create);
Jiri Pirko5bc17012017-05-17 11:08:01 +020023void tcf_chain_put(struct tcf_chain *chain);
Jiri Pirko6529eab2017-05-17 11:07:55 +020024int tcf_block_get(struct tcf_block **p_block,
25 struct tcf_proto __rcu **p_filter_chain);
26void tcf_block_put(struct tcf_block *block);
Jiri Pirko87d83092017-05-17 11:07:54 +020027int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
28 struct tcf_result *res, bool compat_mode);
29
Jiri Pirko8ae70032017-02-15 11:57:50 +010030#else
Jiri Pirko6529eab2017-05-17 11:07:55 +020031static inline
32int tcf_block_get(struct tcf_block **p_block,
33 struct tcf_proto __rcu **p_filter_chain)
34{
35 return 0;
36}
37
38static inline void tcf_block_put(struct tcf_block *block)
Jiri Pirko8ae70032017-02-15 11:57:50 +010039{
40}
Jiri Pirko87d83092017-05-17 11:07:54 +020041
42static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
43 struct tcf_result *res, bool compat_mode)
44{
45 return TC_ACT_UNSPEC;
46}
Jiri Pirko8ae70032017-02-15 11:57:50 +010047#endif
Jiri Pirkocf1facd2017-02-09 14:38:56 +010048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049static inline unsigned long
50__cls_set_class(unsigned long *clp, unsigned long cl)
51{
WANG Conga0efb802014-09-30 16:07:24 -070052 return xchg(clp, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053}
54
55static inline unsigned long
56cls_set_class(struct tcf_proto *tp, unsigned long *clp,
57 unsigned long cl)
58{
59 unsigned long old_cl;
60
61 tcf_tree_lock(tp);
62 old_cl = __cls_set_class(clp, cl);
63 tcf_tree_unlock(tp);
64
65 return old_cl;
66}
67
68static inline void
69tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
70{
71 unsigned long cl;
72
73 cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid);
74 cl = cls_set_class(tp, &r->class, cl);
75 if (cl)
76 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
77}
78
79static inline void
80tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
81{
82 unsigned long cl;
83
84 if ((cl = __cls_set_class(&r->class, 0)) != 0)
85 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
86}
87
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000088struct tcf_exts {
Linus Torvalds1da177e2005-04-16 15:20:36 -070089#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -080090 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
WANG Cong22dc13c2016-08-13 22:35:00 -070091 int nr_actions;
92 struct tc_action **actions;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#endif
WANG Cong5da57f42013-12-15 20:15:07 -080094 /* Map to export classifier specific extension TLV types to the
95 * generic extensions API. Unsupported extensions must be set to 0.
96 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 int action;
98 int police;
99};
100
WANG Congb9a24bb2016-08-19 12:36:54 -0700101static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
WANG Cong33be6272013-12-15 20:15:05 -0800102{
103#ifdef CONFIG_NET_CLS_ACT
WANG Cong5da57f42013-12-15 20:15:07 -0800104 exts->type = 0;
WANG Cong22dc13c2016-08-13 22:35:00 -0700105 exts->nr_actions = 0;
106 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
107 GFP_KERNEL);
WANG Congb9a24bb2016-08-19 12:36:54 -0700108 if (!exts->actions)
109 return -ENOMEM;
WANG Cong33be6272013-12-15 20:15:05 -0800110#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800111 exts->action = action;
112 exts->police = police;
WANG Congb9a24bb2016-08-19 12:36:54 -0700113 return 0;
WANG Cong33be6272013-12-15 20:15:05 -0800114}
115
WANG Cong22dc13c2016-08-13 22:35:00 -0700116static inline void tcf_exts_to_list(const struct tcf_exts *exts,
117 struct list_head *actions)
118{
119#ifdef CONFIG_NET_CLS_ACT
120 int i;
121
122 for (i = 0; i < exts->nr_actions; i++) {
123 struct tc_action *a = exts->actions[i];
124
Hadar Hen Zionfa5effe2016-09-27 11:09:51 +0300125 list_add_tail(&a->list, actions);
WANG Cong22dc13c2016-08-13 22:35:00 -0700126 }
127#endif
128}
129
Jakub Kicinskid897a632017-05-31 08:06:43 -0700130static inline void
131tcf_exts_stats_update(const struct tcf_exts *exts,
132 u64 bytes, u64 packets, u64 lastuse)
133{
134#ifdef CONFIG_NET_CLS_ACT
135 int i;
136
137 preempt_disable();
138
139 for (i = 0; i < exts->nr_actions; i++) {
140 struct tc_action *a = exts->actions[i];
141
142 tcf_action_stats_update(a, bytes, packets, lastuse);
143 }
144
145 preempt_enable();
146#endif
147}
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149/**
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200150 * tcf_exts_has_actions - check if at least one action is present
151 * @exts: tc filter extensions handle
152 *
153 * Returns true if at least one action is present.
154 */
155static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
156{
WANG Cong2734437e2016-08-13 22:34:59 -0700157#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200158 return exts->nr_actions;
159#else
160 return false;
161#endif
162}
WANG Cong2734437e2016-08-13 22:34:59 -0700163
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200164/**
165 * tcf_exts_has_one_action - check if exactly one action is present
166 * @exts: tc filter extensions handle
167 *
168 * Returns true if exactly one action is present.
169 */
170static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
171{
172#ifdef CONFIG_NET_CLS_ACT
173 return exts->nr_actions == 1;
174#else
175 return false;
176#endif
177}
WANG Cong2734437e2016-08-13 22:34:59 -0700178
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200179/**
180 * tcf_exts_exec - execute tc filter extensions
181 * @skb: socket buffer
182 * @exts: tc filter extensions handle
183 * @res: desired result
184 *
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200185 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200186 * a negative number if the filter must be considered unmatched or
187 * a positive action code (TC_ACT_*) which must be returned to the
188 * underlying layer.
189 */
190static inline int
191tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
192 struct tcf_result *res)
193{
194#ifdef CONFIG_NET_CLS_ACT
Jiri Pirkoec1a9cc2017-08-04 14:29:02 +0200195 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200196#endif
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200197 return TC_ACT_OK;
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200198}
199
Joe Perches5c152572013-07-30 22:47:13 -0700200int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
201 struct nlattr **tb, struct nlattr *rate_tlv,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700202 struct tcf_exts *exts, bool ovr);
WANG Cong18d02642014-09-25 10:26:37 -0700203void tcf_exts_destroy(struct tcf_exts *exts);
Jiri Pirko9b0d4442017-08-04 14:29:15 +0200204void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
WANG Cong5da57f42013-12-15 20:15:07 -0800205int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
206int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200207int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
208 struct net_device **hw_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210/**
211 * struct tcf_pkt_info - packet information
212 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000213struct tcf_pkt_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 unsigned char * ptr;
215 int nexthdr;
216};
217
218#ifdef CONFIG_NET_EMATCH
219
220struct tcf_ematch_ops;
221
222/**
223 * struct tcf_ematch - extended match (ematch)
224 *
225 * @matchid: identifier to allow userspace to reidentify a match
226 * @flags: flags specifying attributes and the relation to other matches
227 * @ops: the operations lookup table of the corresponding ematch module
228 * @datalen: length of the ematch specific configuration data
229 * @data: ematch specific data
230 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000231struct tcf_ematch {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 struct tcf_ematch_ops * ops;
233 unsigned long data;
234 unsigned int datalen;
235 u16 matchid;
236 u16 flags;
John Fastabend82a470f2014-10-05 21:27:53 -0700237 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238};
239
240static inline int tcf_em_is_container(struct tcf_ematch *em)
241{
242 return !em->ops;
243}
244
245static inline int tcf_em_is_simple(struct tcf_ematch *em)
246{
247 return em->flags & TCF_EM_SIMPLE;
248}
249
250static inline int tcf_em_is_inverted(struct tcf_ematch *em)
251{
252 return em->flags & TCF_EM_INVERT;
253}
254
255static inline int tcf_em_last_match(struct tcf_ematch *em)
256{
257 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
258}
259
260static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
261{
262 if (tcf_em_last_match(em))
263 return 1;
264
265 if (result == 0 && em->flags & TCF_EM_REL_AND)
266 return 1;
267
268 if (result != 0 && em->flags & TCF_EM_REL_OR)
269 return 1;
270
271 return 0;
272}
273
274/**
275 * struct tcf_ematch_tree - ematch tree handle
276 *
277 * @hdr: ematch tree header supplied by userspace
278 * @matches: array of ematches
279 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000280struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 struct tcf_ematch_tree_hdr hdr;
282 struct tcf_ematch * matches;
283
284};
285
286/**
287 * struct tcf_ematch_ops - ematch module operations
288 *
289 * @kind: identifier (kind) of this ematch module
290 * @datalen: length of expected configuration data (optional)
291 * @change: called during validation (optional)
292 * @match: called during ematch tree evaluation, must return 1/0
293 * @destroy: called during destroyage (optional)
294 * @dump: called during dumping process (optional)
295 * @owner: owner, must be set to THIS_MODULE
296 * @link: link to previous/next ematch module (internal use)
297 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000298struct tcf_ematch_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 int kind;
300 int datalen;
John Fastabend82a470f2014-10-05 21:27:53 -0700301 int (*change)(struct net *net, void *,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 int, struct tcf_ematch *);
303 int (*match)(struct sk_buff *, struct tcf_ematch *,
304 struct tcf_pkt_info *);
John Fastabend82a470f2014-10-05 21:27:53 -0700305 void (*destroy)(struct tcf_ematch *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 int (*dump)(struct sk_buff *, struct tcf_ematch *);
307 struct module *owner;
308 struct list_head link;
309};
310
Joe Perches5c152572013-07-30 22:47:13 -0700311int tcf_em_register(struct tcf_ematch_ops *);
312void tcf_em_unregister(struct tcf_ematch_ops *);
313int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
314 struct tcf_ematch_tree *);
John Fastabend82a470f2014-10-05 21:27:53 -0700315void tcf_em_tree_destroy(struct tcf_ematch_tree *);
Joe Perches5c152572013-07-30 22:47:13 -0700316int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
317int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
318 struct tcf_pkt_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
320/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 * tcf_em_tree_match - evaulate an ematch tree
322 *
323 * @skb: socket buffer of the packet in question
324 * @tree: ematch tree to be used for evaluation
325 * @info: packet information examined by classifier
326 *
327 * This function matches @skb against the ematch tree in @tree by going
328 * through all ematches respecting their logic relations returning
329 * as soon as the result is obvious.
330 *
331 * Returns 1 if the ematch tree as-one matches, no ematches are configured
332 * or ematch is not enabled in the kernel, otherwise 0 is returned.
333 */
334static inline int tcf_em_tree_match(struct sk_buff *skb,
335 struct tcf_ematch_tree *tree,
336 struct tcf_pkt_info *info)
337{
338 if (tree->hdr.nmatches)
339 return __tcf_em_tree_match(skb, tree, info);
340 else
341 return 1;
342}
343
Patrick McHardydb3d99c2007-07-11 19:46:26 -0700344#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346#else /* CONFIG_NET_EMATCH */
347
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000348struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349};
350
351#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
John Fastabend82a470f2014-10-05 21:27:53 -0700352#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353#define tcf_em_tree_dump(skb, t, tlv) (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
355
356#endif /* CONFIG_NET_EMATCH */
357
358static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
359{
360 switch (layer) {
361 case TCF_LAYER_LINK:
362 return skb->data;
363 case TCF_LAYER_NETWORK:
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700364 return skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 case TCF_LAYER_TRANSPORT:
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -0700366 return skb_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 }
368
369 return NULL;
370}
371
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700372static inline int tcf_valid_offset(const struct sk_buff *skb,
373 const unsigned char *ptr, const int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374{
David S. Millerda521b22010-12-21 12:43:16 -0800375 return likely((ptr + len) <= skb_tail_pointer(skb) &&
376 ptr >= skb->head &&
377 (ptr <= (ptr + len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378}
379
380#ifdef CONFIG_NET_CLS_IND
Denis V. Lunev0eeb8ff2007-12-04 01:15:45 -0800381#include <net/net_namespace.h>
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383static inline int
WANG Cong2519a602014-01-09 16:14:02 -0800384tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385{
WANG Cong2519a602014-01-09 16:14:02 -0800386 char indev[IFNAMSIZ];
Patrick McHardyc01003c2007-03-29 11:46:52 -0700387 struct net_device *dev;
388
WANG Cong2519a602014-01-09 16:14:02 -0800389 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
390 return -EINVAL;
391 dev = __dev_get_by_name(net, indev);
392 if (!dev)
393 return -ENODEV;
394 return dev->ifindex;
395}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
WANG Cong2519a602014-01-09 16:14:02 -0800397static inline bool
398tcf_match_indev(struct sk_buff *skb, int ifindex)
399{
400 if (!ifindex)
401 return true;
402 if (!skb->skb_iif)
403 return false;
404 return ifindex == skb->skb_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405}
406#endif /* CONFIG_NET_CLS_IND */
407
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200408struct tc_cls_common_offload {
409 u32 handle;
410 u32 chain_index;
411 __be16 protocol;
412};
413
414static inline void
415tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
416 const struct tcf_proto *tp)
417{
418 cls_common->handle = tp->q->handle;
419 cls_common->chain_index = tp->chain->index;
420 cls_common->protocol = tp->protocol;
421}
422
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800423struct tc_cls_u32_knode {
424 struct tcf_exts *exts;
John Fastabende0148602016-02-17 14:59:30 -0800425 struct tc_u32_sel *sel;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800426 u32 handle;
427 u32 val;
428 u32 mask;
429 u32 link_handle;
John Fastabende0148602016-02-17 14:59:30 -0800430 u8 fshift;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800431};
432
433struct tc_cls_u32_hnode {
434 u32 handle;
435 u32 prio;
436 unsigned int divisor;
437};
438
439enum tc_clsu32_command {
440 TC_CLSU32_NEW_KNODE,
441 TC_CLSU32_REPLACE_KNODE,
442 TC_CLSU32_DELETE_KNODE,
443 TC_CLSU32_NEW_HNODE,
444 TC_CLSU32_REPLACE_HNODE,
445 TC_CLSU32_DELETE_HNODE,
446};
447
448struct tc_cls_u32_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200449 struct tc_cls_common_offload common;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800450 /* knode values */
451 enum tc_clsu32_command command;
452 union {
453 struct tc_cls_u32_knode knode;
454 struct tc_cls_u32_hnode hnode;
455 };
456};
457
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200458static inline bool tc_can_offload(const struct net_device *dev,
459 const struct tcf_proto *tp)
John Fastabend6843e7a2016-02-26 07:53:49 -0800460{
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200461 const struct Qdisc *sch = tp->q;
462 const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
463
John Fastabend2b6ab0d2016-02-26 07:54:13 -0800464 if (!(dev->features & NETIF_F_HW_TC))
465 return false;
John Fastabend9e8ce792016-02-26 07:54:39 -0800466 if (!dev->netdev_ops->ndo_setup_tc)
467 return false;
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200468 if (cops && cops->tcf_cl_offload)
469 return cops->tcf_cl_offload(tp->classid);
John Fastabend9e8ce792016-02-26 07:54:39 -0800470
471 return true;
John Fastabend6843e7a2016-02-26 07:53:49 -0800472}
473
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200474static inline bool tc_skip_hw(u32 flags)
475{
476 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
477}
478
479static inline bool tc_should_offload(const struct net_device *dev,
480 const struct tcf_proto *tp, u32 flags)
481{
482 if (tc_skip_hw(flags))
483 return false;
484 return tc_can_offload(dev, tp);
485}
486
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700487static inline bool tc_skip_sw(u32 flags)
488{
489 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
490}
491
492/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
493static inline bool tc_flags_valid(u32 flags)
494{
495 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
496 return false;
497
498 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
499 return false;
500
501 return true;
502}
503
Or Gerlitze6960282017-02-16 10:31:12 +0200504static inline bool tc_in_hw(u32 flags)
505{
506 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
507}
508
Amir Vadai5b33f482016-03-08 12:42:29 +0200509enum tc_fl_command {
510 TC_CLSFLOWER_REPLACE,
511 TC_CLSFLOWER_DESTROY,
Amir Vadai10cbc682016-05-13 12:55:37 +0000512 TC_CLSFLOWER_STATS,
Amir Vadai5b33f482016-03-08 12:42:29 +0200513};
514
515struct tc_cls_flower_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200516 struct tc_cls_common_offload common;
Amir Vadai5b33f482016-03-08 12:42:29 +0200517 enum tc_fl_command command;
Jiri Pirko69ca05c2017-02-03 10:29:08 +0100518 u32 prio;
Amir Vadai8208d212016-03-11 11:08:45 +0200519 unsigned long cookie;
Amir Vadai5b33f482016-03-08 12:42:29 +0200520 struct flow_dissector *dissector;
521 struct fl_flow_key *mask;
522 struct fl_flow_key *key;
523 struct tcf_exts *exts;
Jiri Pirko3e0e8262017-08-07 10:15:19 +0200524 bool egress_dev;
Amir Vadai5b33f482016-03-08 12:42:29 +0200525};
526
Yotam Gigib87f7932016-07-21 12:03:12 +0200527enum tc_matchall_command {
528 TC_CLSMATCHALL_REPLACE,
529 TC_CLSMATCHALL_DESTROY,
530};
531
532struct tc_cls_matchall_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200533 struct tc_cls_common_offload common;
Yotam Gigib87f7932016-07-21 12:03:12 +0200534 enum tc_matchall_command command;
535 struct tcf_exts *exts;
536 unsigned long cookie;
537};
538
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100539enum tc_clsbpf_command {
540 TC_CLSBPF_ADD,
541 TC_CLSBPF_REPLACE,
542 TC_CLSBPF_DESTROY,
Jakub Kicinski68d64062016-09-21 11:44:02 +0100543 TC_CLSBPF_STATS,
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100544};
545
546struct tc_cls_bpf_offload {
Jiri Pirko5fd9fc42017-08-07 10:15:29 +0200547 struct tc_cls_common_offload common;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100548 enum tc_clsbpf_command command;
549 struct tcf_exts *exts;
550 struct bpf_prog *prog;
551 const char *name;
552 bool exts_integrated;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100553 u32 gen_flags;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100554};
555
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500556
557/* This structure holds cookie structure that is passed from user
558 * to the kernel for actions and classifiers
559 */
560struct tc_cookie {
561 u8 *data;
562 u32 len;
563};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564#endif