blob: b8959c9a190d4cd6123f892ed6bca64257e0950c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __NET_PKT_CLS_H
2#define __NET_PKT_CLS_H
3
4#include <linux/pkt_cls.h>
5#include <net/sch_generic.h>
6#include <net/act_api.h>
7
8/* Basic packet classifier frontend definitions. */
9
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000010struct tcf_walker {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 int stop;
12 int skip;
13 int count;
14 int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *);
15};
16
Joe Perches5c152572013-07-30 22:47:13 -070017int register_tcf_proto_ops(struct tcf_proto_ops *ops);
18int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Jiri Pirko8ae70032017-02-15 11:57:50 +010020#ifdef CONFIG_NET_CLS
WANG Cong367a8ce2017-05-23 09:42:37 -070021struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
22 bool create);
Jiri Pirko5bc17012017-05-17 11:08:01 +020023void tcf_chain_put(struct tcf_chain *chain);
Jiri Pirko6529eab2017-05-17 11:07:55 +020024int tcf_block_get(struct tcf_block **p_block,
25 struct tcf_proto __rcu **p_filter_chain);
26void tcf_block_put(struct tcf_block *block);
Jiri Pirko87d83092017-05-17 11:07:54 +020027int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
28 struct tcf_result *res, bool compat_mode);
29
Jiri Pirko8ae70032017-02-15 11:57:50 +010030#else
Jiri Pirko6529eab2017-05-17 11:07:55 +020031static inline
32int tcf_block_get(struct tcf_block **p_block,
33 struct tcf_proto __rcu **p_filter_chain)
34{
35 return 0;
36}
37
38static inline void tcf_block_put(struct tcf_block *block)
Jiri Pirko8ae70032017-02-15 11:57:50 +010039{
40}
Jiri Pirko87d83092017-05-17 11:07:54 +020041
42static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
43 struct tcf_result *res, bool compat_mode)
44{
45 return TC_ACT_UNSPEC;
46}
Jiri Pirko8ae70032017-02-15 11:57:50 +010047#endif
Jiri Pirkocf1facd2017-02-09 14:38:56 +010048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049static inline unsigned long
50__cls_set_class(unsigned long *clp, unsigned long cl)
51{
WANG Conga0efb802014-09-30 16:07:24 -070052 return xchg(clp, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053}
54
55static inline unsigned long
56cls_set_class(struct tcf_proto *tp, unsigned long *clp,
57 unsigned long cl)
58{
59 unsigned long old_cl;
60
61 tcf_tree_lock(tp);
62 old_cl = __cls_set_class(clp, cl);
63 tcf_tree_unlock(tp);
64
65 return old_cl;
66}
67
68static inline void
69tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
70{
71 unsigned long cl;
72
73 cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid);
74 cl = cls_set_class(tp, &r->class, cl);
75 if (cl)
76 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
77}
78
79static inline void
80tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
81{
82 unsigned long cl;
83
84 if ((cl = __cls_set_class(&r->class, 0)) != 0)
85 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
86}
87
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000088struct tcf_exts {
Linus Torvalds1da177e2005-04-16 15:20:36 -070089#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -080090 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
WANG Cong22dc13c2016-08-13 22:35:00 -070091 int nr_actions;
92 struct tc_action **actions;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#endif
WANG Cong5da57f42013-12-15 20:15:07 -080094 /* Map to export classifier specific extension TLV types to the
95 * generic extensions API. Unsupported extensions must be set to 0.
96 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 int action;
98 int police;
99};
100
WANG Congb9a24bb2016-08-19 12:36:54 -0700101static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
WANG Cong33be6272013-12-15 20:15:05 -0800102{
103#ifdef CONFIG_NET_CLS_ACT
WANG Cong5da57f42013-12-15 20:15:07 -0800104 exts->type = 0;
WANG Cong22dc13c2016-08-13 22:35:00 -0700105 exts->nr_actions = 0;
106 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
107 GFP_KERNEL);
WANG Congb9a24bb2016-08-19 12:36:54 -0700108 if (!exts->actions)
109 return -ENOMEM;
WANG Cong33be6272013-12-15 20:15:05 -0800110#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800111 exts->action = action;
112 exts->police = police;
WANG Congb9a24bb2016-08-19 12:36:54 -0700113 return 0;
WANG Cong33be6272013-12-15 20:15:05 -0800114}
115
WANG Cong22dc13c2016-08-13 22:35:00 -0700116static inline void tcf_exts_to_list(const struct tcf_exts *exts,
117 struct list_head *actions)
118{
119#ifdef CONFIG_NET_CLS_ACT
120 int i;
121
122 for (i = 0; i < exts->nr_actions; i++) {
123 struct tc_action *a = exts->actions[i];
124
Hadar Hen Zionfa5effe2016-09-27 11:09:51 +0300125 list_add_tail(&a->list, actions);
WANG Cong22dc13c2016-08-13 22:35:00 -0700126 }
127#endif
128}
129
Jakub Kicinskid897a632017-05-31 08:06:43 -0700130static inline void
131tcf_exts_stats_update(const struct tcf_exts *exts,
132 u64 bytes, u64 packets, u64 lastuse)
133{
134#ifdef CONFIG_NET_CLS_ACT
135 int i;
136
137 preempt_disable();
138
139 for (i = 0; i < exts->nr_actions; i++) {
140 struct tc_action *a = exts->actions[i];
141
142 tcf_action_stats_update(a, bytes, packets, lastuse);
143 }
144
145 preempt_enable();
146#endif
147}
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149/**
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200150 * tcf_exts_has_actions - check if at least one action is present
151 * @exts: tc filter extensions handle
152 *
153 * Returns true if at least one action is present.
154 */
155static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
156{
WANG Cong2734437e2016-08-13 22:34:59 -0700157#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200158 return exts->nr_actions;
159#else
160 return false;
161#endif
162}
WANG Cong2734437e2016-08-13 22:34:59 -0700163
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200164/**
165 * tcf_exts_has_one_action - check if exactly one action is present
166 * @exts: tc filter extensions handle
167 *
168 * Returns true if exactly one action is present.
169 */
170static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
171{
172#ifdef CONFIG_NET_CLS_ACT
173 return exts->nr_actions == 1;
174#else
175 return false;
176#endif
177}
WANG Cong2734437e2016-08-13 22:34:59 -0700178
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200179/**
180 * tcf_exts_exec - execute tc filter extensions
181 * @skb: socket buffer
182 * @exts: tc filter extensions handle
183 * @res: desired result
184 *
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200185 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200186 * a negative number if the filter must be considered unmatched or
187 * a positive action code (TC_ACT_*) which must be returned to the
188 * underlying layer.
189 */
190static inline int
191tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
192 struct tcf_result *res)
193{
194#ifdef CONFIG_NET_CLS_ACT
Jiri Pirkoec1a9cc2017-08-04 14:29:02 +0200195 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200196#endif
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200197 return TC_ACT_OK;
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200198}
199
Joe Perches5c152572013-07-30 22:47:13 -0700200int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
201 struct nlattr **tb, struct nlattr *rate_tlv,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700202 struct tcf_exts *exts, bool ovr);
WANG Cong18d02642014-09-25 10:26:37 -0700203void tcf_exts_destroy(struct tcf_exts *exts);
Joe Perches5c152572013-07-30 22:47:13 -0700204void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
205 struct tcf_exts *src);
WANG Cong5da57f42013-12-15 20:15:07 -0800206int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
207int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200208int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
209 struct net_device **hw_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211/**
212 * struct tcf_pkt_info - packet information
213 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000214struct tcf_pkt_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 unsigned char * ptr;
216 int nexthdr;
217};
218
219#ifdef CONFIG_NET_EMATCH
220
221struct tcf_ematch_ops;
222
223/**
224 * struct tcf_ematch - extended match (ematch)
225 *
226 * @matchid: identifier to allow userspace to reidentify a match
227 * @flags: flags specifying attributes and the relation to other matches
228 * @ops: the operations lookup table of the corresponding ematch module
229 * @datalen: length of the ematch specific configuration data
230 * @data: ematch specific data
231 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000232struct tcf_ematch {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 struct tcf_ematch_ops * ops;
234 unsigned long data;
235 unsigned int datalen;
236 u16 matchid;
237 u16 flags;
John Fastabend82a470f2014-10-05 21:27:53 -0700238 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239};
240
241static inline int tcf_em_is_container(struct tcf_ematch *em)
242{
243 return !em->ops;
244}
245
246static inline int tcf_em_is_simple(struct tcf_ematch *em)
247{
248 return em->flags & TCF_EM_SIMPLE;
249}
250
251static inline int tcf_em_is_inverted(struct tcf_ematch *em)
252{
253 return em->flags & TCF_EM_INVERT;
254}
255
256static inline int tcf_em_last_match(struct tcf_ematch *em)
257{
258 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
259}
260
261static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
262{
263 if (tcf_em_last_match(em))
264 return 1;
265
266 if (result == 0 && em->flags & TCF_EM_REL_AND)
267 return 1;
268
269 if (result != 0 && em->flags & TCF_EM_REL_OR)
270 return 1;
271
272 return 0;
273}
274
275/**
276 * struct tcf_ematch_tree - ematch tree handle
277 *
278 * @hdr: ematch tree header supplied by userspace
279 * @matches: array of ematches
280 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000281struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 struct tcf_ematch_tree_hdr hdr;
283 struct tcf_ematch * matches;
284
285};
286
287/**
288 * struct tcf_ematch_ops - ematch module operations
289 *
290 * @kind: identifier (kind) of this ematch module
291 * @datalen: length of expected configuration data (optional)
292 * @change: called during validation (optional)
293 * @match: called during ematch tree evaluation, must return 1/0
294 * @destroy: called during destroyage (optional)
295 * @dump: called during dumping process (optional)
296 * @owner: owner, must be set to THIS_MODULE
297 * @link: link to previous/next ematch module (internal use)
298 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000299struct tcf_ematch_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 int kind;
301 int datalen;
John Fastabend82a470f2014-10-05 21:27:53 -0700302 int (*change)(struct net *net, void *,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 int, struct tcf_ematch *);
304 int (*match)(struct sk_buff *, struct tcf_ematch *,
305 struct tcf_pkt_info *);
John Fastabend82a470f2014-10-05 21:27:53 -0700306 void (*destroy)(struct tcf_ematch *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 int (*dump)(struct sk_buff *, struct tcf_ematch *);
308 struct module *owner;
309 struct list_head link;
310};
311
Joe Perches5c152572013-07-30 22:47:13 -0700312int tcf_em_register(struct tcf_ematch_ops *);
313void tcf_em_unregister(struct tcf_ematch_ops *);
314int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
315 struct tcf_ematch_tree *);
John Fastabend82a470f2014-10-05 21:27:53 -0700316void tcf_em_tree_destroy(struct tcf_ematch_tree *);
Joe Perches5c152572013-07-30 22:47:13 -0700317int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
318int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
319 struct tcf_pkt_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
321/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 * tcf_em_tree_match - evaulate an ematch tree
323 *
324 * @skb: socket buffer of the packet in question
325 * @tree: ematch tree to be used for evaluation
326 * @info: packet information examined by classifier
327 *
328 * This function matches @skb against the ematch tree in @tree by going
329 * through all ematches respecting their logic relations returning
330 * as soon as the result is obvious.
331 *
332 * Returns 1 if the ematch tree as-one matches, no ematches are configured
333 * or ematch is not enabled in the kernel, otherwise 0 is returned.
334 */
335static inline int tcf_em_tree_match(struct sk_buff *skb,
336 struct tcf_ematch_tree *tree,
337 struct tcf_pkt_info *info)
338{
339 if (tree->hdr.nmatches)
340 return __tcf_em_tree_match(skb, tree, info);
341 else
342 return 1;
343}
344
Patrick McHardydb3d99c2007-07-11 19:46:26 -0700345#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347#else /* CONFIG_NET_EMATCH */
348
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000349struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350};
351
352#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
John Fastabend82a470f2014-10-05 21:27:53 -0700353#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354#define tcf_em_tree_dump(skb, t, tlv) (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
356
357#endif /* CONFIG_NET_EMATCH */
358
359static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
360{
361 switch (layer) {
362 case TCF_LAYER_LINK:
363 return skb->data;
364 case TCF_LAYER_NETWORK:
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700365 return skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 case TCF_LAYER_TRANSPORT:
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -0700367 return skb_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 }
369
370 return NULL;
371}
372
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700373static inline int tcf_valid_offset(const struct sk_buff *skb,
374 const unsigned char *ptr, const int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
David S. Millerda521b22010-12-21 12:43:16 -0800376 return likely((ptr + len) <= skb_tail_pointer(skb) &&
377 ptr >= skb->head &&
378 (ptr <= (ptr + len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379}
380
381#ifdef CONFIG_NET_CLS_IND
Denis V. Lunev0eeb8ff2007-12-04 01:15:45 -0800382#include <net/net_namespace.h>
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384static inline int
WANG Cong2519a602014-01-09 16:14:02 -0800385tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386{
WANG Cong2519a602014-01-09 16:14:02 -0800387 char indev[IFNAMSIZ];
Patrick McHardyc01003c2007-03-29 11:46:52 -0700388 struct net_device *dev;
389
WANG Cong2519a602014-01-09 16:14:02 -0800390 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
391 return -EINVAL;
392 dev = __dev_get_by_name(net, indev);
393 if (!dev)
394 return -ENODEV;
395 return dev->ifindex;
396}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
WANG Cong2519a602014-01-09 16:14:02 -0800398static inline bool
399tcf_match_indev(struct sk_buff *skb, int ifindex)
400{
401 if (!ifindex)
402 return true;
403 if (!skb->skb_iif)
404 return false;
405 return ifindex == skb->skb_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406}
407#endif /* CONFIG_NET_CLS_IND */
408
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800409struct tc_cls_u32_knode {
410 struct tcf_exts *exts;
John Fastabende0148602016-02-17 14:59:30 -0800411 struct tc_u32_sel *sel;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800412 u32 handle;
413 u32 val;
414 u32 mask;
415 u32 link_handle;
John Fastabende0148602016-02-17 14:59:30 -0800416 u8 fshift;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800417};
418
419struct tc_cls_u32_hnode {
420 u32 handle;
421 u32 prio;
422 unsigned int divisor;
423};
424
425enum tc_clsu32_command {
426 TC_CLSU32_NEW_KNODE,
427 TC_CLSU32_REPLACE_KNODE,
428 TC_CLSU32_DELETE_KNODE,
429 TC_CLSU32_NEW_HNODE,
430 TC_CLSU32_REPLACE_HNODE,
431 TC_CLSU32_DELETE_HNODE,
432};
433
434struct tc_cls_u32_offload {
435 /* knode values */
436 enum tc_clsu32_command command;
437 union {
438 struct tc_cls_u32_knode knode;
439 struct tc_cls_u32_hnode hnode;
440 };
441};
442
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200443static inline bool tc_can_offload(const struct net_device *dev,
444 const struct tcf_proto *tp)
John Fastabend6843e7a2016-02-26 07:53:49 -0800445{
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200446 const struct Qdisc *sch = tp->q;
447 const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
448
John Fastabend2b6ab0d2016-02-26 07:54:13 -0800449 if (!(dev->features & NETIF_F_HW_TC))
450 return false;
John Fastabend9e8ce792016-02-26 07:54:39 -0800451 if (!dev->netdev_ops->ndo_setup_tc)
452 return false;
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200453 if (cops && cops->tcf_cl_offload)
454 return cops->tcf_cl_offload(tp->classid);
John Fastabend9e8ce792016-02-26 07:54:39 -0800455
456 return true;
John Fastabend6843e7a2016-02-26 07:53:49 -0800457}
458
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200459static inline bool tc_skip_hw(u32 flags)
460{
461 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
462}
463
464static inline bool tc_should_offload(const struct net_device *dev,
465 const struct tcf_proto *tp, u32 flags)
466{
467 if (tc_skip_hw(flags))
468 return false;
469 return tc_can_offload(dev, tp);
470}
471
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700472static inline bool tc_skip_sw(u32 flags)
473{
474 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
475}
476
477/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
478static inline bool tc_flags_valid(u32 flags)
479{
480 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
481 return false;
482
483 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
484 return false;
485
486 return true;
487}
488
Or Gerlitze6960282017-02-16 10:31:12 +0200489static inline bool tc_in_hw(u32 flags)
490{
491 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
492}
493
Amir Vadai5b33f482016-03-08 12:42:29 +0200494enum tc_fl_command {
495 TC_CLSFLOWER_REPLACE,
496 TC_CLSFLOWER_DESTROY,
Amir Vadai10cbc682016-05-13 12:55:37 +0000497 TC_CLSFLOWER_STATS,
Amir Vadai5b33f482016-03-08 12:42:29 +0200498};
499
500struct tc_cls_flower_offload {
501 enum tc_fl_command command;
Jiri Pirko69ca05c2017-02-03 10:29:08 +0100502 u32 prio;
Amir Vadai8208d212016-03-11 11:08:45 +0200503 unsigned long cookie;
Amir Vadai5b33f482016-03-08 12:42:29 +0200504 struct flow_dissector *dissector;
505 struct fl_flow_key *mask;
506 struct fl_flow_key *key;
507 struct tcf_exts *exts;
508};
509
Yotam Gigib87f7932016-07-21 12:03:12 +0200510enum tc_matchall_command {
511 TC_CLSMATCHALL_REPLACE,
512 TC_CLSMATCHALL_DESTROY,
513};
514
515struct tc_cls_matchall_offload {
516 enum tc_matchall_command command;
517 struct tcf_exts *exts;
518 unsigned long cookie;
519};
520
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100521enum tc_clsbpf_command {
522 TC_CLSBPF_ADD,
523 TC_CLSBPF_REPLACE,
524 TC_CLSBPF_DESTROY,
Jakub Kicinski68d64062016-09-21 11:44:02 +0100525 TC_CLSBPF_STATS,
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100526};
527
528struct tc_cls_bpf_offload {
529 enum tc_clsbpf_command command;
530 struct tcf_exts *exts;
531 struct bpf_prog *prog;
532 const char *name;
533 bool exts_integrated;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100534 u32 gen_flags;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100535};
536
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500537
538/* This structure holds cookie structure that is passed from user
539 * to the kernel for actions and classifiers
540 */
541struct tc_cookie {
542 u8 *data;
543 u32 len;
544};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545#endif