blob: 269fd78bb0ae0a88b2affa858a75bd646322a9fc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __NET_PKT_CLS_H
2#define __NET_PKT_CLS_H
3
4#include <linux/pkt_cls.h>
5#include <net/sch_generic.h>
6#include <net/act_api.h>
7
8/* Basic packet classifier frontend definitions. */
9
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000010struct tcf_walker {
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 int stop;
12 int skip;
13 int count;
14 int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *);
15};
16
Joe Perches5c152572013-07-30 22:47:13 -070017int register_tcf_proto_ops(struct tcf_proto_ops *ops);
18int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Jiri Pirko8ae70032017-02-15 11:57:50 +010020#ifdef CONFIG_NET_CLS
Jiri Pirkocf1facd2017-02-09 14:38:56 +010021void tcf_destroy_chain(struct tcf_proto __rcu **fl);
Jiri Pirko8ae70032017-02-15 11:57:50 +010022#else
23static inline void tcf_destroy_chain(struct tcf_proto __rcu **fl)
24{
25}
26#endif
Jiri Pirkocf1facd2017-02-09 14:38:56 +010027
Linus Torvalds1da177e2005-04-16 15:20:36 -070028static inline unsigned long
29__cls_set_class(unsigned long *clp, unsigned long cl)
30{
WANG Conga0efb802014-09-30 16:07:24 -070031 return xchg(clp, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032}
33
34static inline unsigned long
35cls_set_class(struct tcf_proto *tp, unsigned long *clp,
36 unsigned long cl)
37{
38 unsigned long old_cl;
39
40 tcf_tree_lock(tp);
41 old_cl = __cls_set_class(clp, cl);
42 tcf_tree_unlock(tp);
43
44 return old_cl;
45}
46
47static inline void
48tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
49{
50 unsigned long cl;
51
52 cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid);
53 cl = cls_set_class(tp, &r->class, cl);
54 if (cl)
55 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
56}
57
58static inline void
59tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
60{
61 unsigned long cl;
62
63 if ((cl = __cls_set_class(&r->class, 0)) != 0)
64 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
65}
66
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000067struct tcf_exts {
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -080069 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
WANG Cong22dc13c2016-08-13 22:35:00 -070070 int nr_actions;
71 struct tc_action **actions;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#endif
WANG Cong5da57f42013-12-15 20:15:07 -080073 /* Map to export classifier specific extension TLV types to the
74 * generic extensions API. Unsupported extensions must be set to 0.
75 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 int action;
77 int police;
78};
79
WANG Congb9a24bb2016-08-19 12:36:54 -070080static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
WANG Cong33be6272013-12-15 20:15:05 -080081{
82#ifdef CONFIG_NET_CLS_ACT
WANG Cong5da57f42013-12-15 20:15:07 -080083 exts->type = 0;
WANG Cong22dc13c2016-08-13 22:35:00 -070084 exts->nr_actions = 0;
85 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
86 GFP_KERNEL);
WANG Congb9a24bb2016-08-19 12:36:54 -070087 if (!exts->actions)
88 return -ENOMEM;
WANG Cong33be6272013-12-15 20:15:05 -080089#endif
WANG Cong5da57f42013-12-15 20:15:07 -080090 exts->action = action;
91 exts->police = police;
WANG Congb9a24bb2016-08-19 12:36:54 -070092 return 0;
WANG Cong33be6272013-12-15 20:15:05 -080093}
94
Linus Torvalds1da177e2005-04-16 15:20:36 -070095/**
96 * tcf_exts_is_predicative - check if a predicative extension is present
97 * @exts: tc filter extensions handle
98 *
99 * Returns 1 if a predicative extension is present, i.e. an extension which
100 * might cause further actions and thus overrule the regular tcf_result.
101 */
102static inline int
103tcf_exts_is_predicative(struct tcf_exts *exts)
104{
105#ifdef CONFIG_NET_CLS_ACT
WANG Cong22dc13c2016-08-13 22:35:00 -0700106 return exts->nr_actions;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#else
108 return 0;
109#endif
110}
111
112/**
113 * tcf_exts_is_available - check if at least one extension is present
114 * @exts: tc filter extensions handle
115 *
116 * Returns 1 if at least one extension is present.
117 */
118static inline int
119tcf_exts_is_available(struct tcf_exts *exts)
120{
121 /* All non-predicative extensions must be added here. */
122 return tcf_exts_is_predicative(exts);
123}
124
WANG Cong22dc13c2016-08-13 22:35:00 -0700125static inline void tcf_exts_to_list(const struct tcf_exts *exts,
126 struct list_head *actions)
127{
128#ifdef CONFIG_NET_CLS_ACT
129 int i;
130
131 for (i = 0; i < exts->nr_actions; i++) {
132 struct tc_action *a = exts->actions[i];
133
Hadar Hen Zionfa5effe2016-09-27 11:09:51 +0300134 list_add_tail(&a->list, actions);
WANG Cong22dc13c2016-08-13 22:35:00 -0700135 }
136#endif
137}
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/**
140 * tcf_exts_exec - execute tc filter extensions
141 * @skb: socket buffer
142 * @exts: tc filter extensions handle
143 * @res: desired result
144 *
145 * Executes all configured extensions. Returns 0 on a normal execution,
146 * a negative number if the filter must be considered unmatched or
147 * a positive action code (TC_ACT_*) which must be returned to the
148 * underlying layer.
149 */
150static inline int
151tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
152 struct tcf_result *res)
153{
154#ifdef CONFIG_NET_CLS_ACT
WANG Cong22dc13c2016-08-13 22:35:00 -0700155 if (exts->nr_actions)
156 return tcf_action_exec(skb, exts->actions, exts->nr_actions,
157 res);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 return 0;
160}
161
WANG Cong2734437e2016-08-13 22:34:59 -0700162#ifdef CONFIG_NET_CLS_ACT
163
WANG Cong22dc13c2016-08-13 22:35:00 -0700164#define tc_no_actions(_exts) ((_exts)->nr_actions == 0)
165#define tc_single_action(_exts) ((_exts)->nr_actions == 1)
WANG Cong2734437e2016-08-13 22:34:59 -0700166
167#else /* CONFIG_NET_CLS_ACT */
168
169#define tc_no_actions(_exts) true
WANG Cong2734437e2016-08-13 22:34:59 -0700170#define tc_single_action(_exts) false
171
172#endif /* CONFIG_NET_CLS_ACT */
173
Joe Perches5c152572013-07-30 22:47:13 -0700174int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
175 struct nlattr **tb, struct nlattr *rate_tlv,
Cong Wang2f7ef2f2014-04-25 13:54:06 -0700176 struct tcf_exts *exts, bool ovr);
WANG Cong18d02642014-09-25 10:26:37 -0700177void tcf_exts_destroy(struct tcf_exts *exts);
Joe Perches5c152572013-07-30 22:47:13 -0700178void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
179 struct tcf_exts *src);
WANG Cong5da57f42013-12-15 20:15:07 -0800180int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
181int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
Hadar Hen Zion7091d8c2016-12-01 14:06:37 +0200182int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
183 struct net_device **hw_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
185/**
186 * struct tcf_pkt_info - packet information
187 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000188struct tcf_pkt_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 unsigned char * ptr;
190 int nexthdr;
191};
192
193#ifdef CONFIG_NET_EMATCH
194
195struct tcf_ematch_ops;
196
197/**
198 * struct tcf_ematch - extended match (ematch)
199 *
200 * @matchid: identifier to allow userspace to reidentify a match
201 * @flags: flags specifying attributes and the relation to other matches
202 * @ops: the operations lookup table of the corresponding ematch module
203 * @datalen: length of the ematch specific configuration data
204 * @data: ematch specific data
205 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000206struct tcf_ematch {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 struct tcf_ematch_ops * ops;
208 unsigned long data;
209 unsigned int datalen;
210 u16 matchid;
211 u16 flags;
John Fastabend82a470f2014-10-05 21:27:53 -0700212 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213};
214
215static inline int tcf_em_is_container(struct tcf_ematch *em)
216{
217 return !em->ops;
218}
219
220static inline int tcf_em_is_simple(struct tcf_ematch *em)
221{
222 return em->flags & TCF_EM_SIMPLE;
223}
224
225static inline int tcf_em_is_inverted(struct tcf_ematch *em)
226{
227 return em->flags & TCF_EM_INVERT;
228}
229
230static inline int tcf_em_last_match(struct tcf_ematch *em)
231{
232 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
233}
234
235static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
236{
237 if (tcf_em_last_match(em))
238 return 1;
239
240 if (result == 0 && em->flags & TCF_EM_REL_AND)
241 return 1;
242
243 if (result != 0 && em->flags & TCF_EM_REL_OR)
244 return 1;
245
246 return 0;
247}
248
249/**
250 * struct tcf_ematch_tree - ematch tree handle
251 *
252 * @hdr: ematch tree header supplied by userspace
253 * @matches: array of ematches
254 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000255struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 struct tcf_ematch_tree_hdr hdr;
257 struct tcf_ematch * matches;
258
259};
260
261/**
262 * struct tcf_ematch_ops - ematch module operations
263 *
264 * @kind: identifier (kind) of this ematch module
265 * @datalen: length of expected configuration data (optional)
266 * @change: called during validation (optional)
267 * @match: called during ematch tree evaluation, must return 1/0
268 * @destroy: called during destroyage (optional)
269 * @dump: called during dumping process (optional)
270 * @owner: owner, must be set to THIS_MODULE
271 * @link: link to previous/next ematch module (internal use)
272 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000273struct tcf_ematch_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 int kind;
275 int datalen;
John Fastabend82a470f2014-10-05 21:27:53 -0700276 int (*change)(struct net *net, void *,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 int, struct tcf_ematch *);
278 int (*match)(struct sk_buff *, struct tcf_ematch *,
279 struct tcf_pkt_info *);
John Fastabend82a470f2014-10-05 21:27:53 -0700280 void (*destroy)(struct tcf_ematch *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 int (*dump)(struct sk_buff *, struct tcf_ematch *);
282 struct module *owner;
283 struct list_head link;
284};
285
Joe Perches5c152572013-07-30 22:47:13 -0700286int tcf_em_register(struct tcf_ematch_ops *);
287void tcf_em_unregister(struct tcf_ematch_ops *);
288int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
289 struct tcf_ematch_tree *);
John Fastabend82a470f2014-10-05 21:27:53 -0700290void tcf_em_tree_destroy(struct tcf_ematch_tree *);
Joe Perches5c152572013-07-30 22:47:13 -0700291int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
292int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
293 struct tcf_pkt_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
295/**
296 * tcf_em_tree_change - replace ematch tree of a running classifier
297 *
298 * @tp: classifier kind handle
299 * @dst: destination ematch tree variable
300 * @src: source ematch tree (temporary tree from tcf_em_tree_validate)
301 *
302 * This functions replaces the ematch tree in @dst with the ematch
303 * tree in @src. The classifier in charge of the ematch tree may be
304 * running.
305 */
306static inline void tcf_em_tree_change(struct tcf_proto *tp,
307 struct tcf_ematch_tree *dst,
308 struct tcf_ematch_tree *src)
309{
310 tcf_tree_lock(tp);
311 memcpy(dst, src, sizeof(*dst));
312 tcf_tree_unlock(tp);
313}
314
315/**
316 * tcf_em_tree_match - evaulate an ematch tree
317 *
318 * @skb: socket buffer of the packet in question
319 * @tree: ematch tree to be used for evaluation
320 * @info: packet information examined by classifier
321 *
322 * This function matches @skb against the ematch tree in @tree by going
323 * through all ematches respecting their logic relations returning
324 * as soon as the result is obvious.
325 *
326 * Returns 1 if the ematch tree as-one matches, no ematches are configured
327 * or ematch is not enabled in the kernel, otherwise 0 is returned.
328 */
329static inline int tcf_em_tree_match(struct sk_buff *skb,
330 struct tcf_ematch_tree *tree,
331 struct tcf_pkt_info *info)
332{
333 if (tree->hdr.nmatches)
334 return __tcf_em_tree_match(skb, tree, info);
335 else
336 return 1;
337}
338
Patrick McHardydb3d99c2007-07-11 19:46:26 -0700339#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341#else /* CONFIG_NET_EMATCH */
342
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000343struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344};
345
346#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
John Fastabend82a470f2014-10-05 21:27:53 -0700347#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348#define tcf_em_tree_dump(skb, t, tlv) (0)
349#define tcf_em_tree_change(tp, dst, src) do { } while(0)
350#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
351
352#endif /* CONFIG_NET_EMATCH */
353
354static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
355{
356 switch (layer) {
357 case TCF_LAYER_LINK:
358 return skb->data;
359 case TCF_LAYER_NETWORK:
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700360 return skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 case TCF_LAYER_TRANSPORT:
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -0700362 return skb_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 }
364
365 return NULL;
366}
367
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700368static inline int tcf_valid_offset(const struct sk_buff *skb,
369 const unsigned char *ptr, const int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370{
David S. Millerda521b22010-12-21 12:43:16 -0800371 return likely((ptr + len) <= skb_tail_pointer(skb) &&
372 ptr >= skb->head &&
373 (ptr <= (ptr + len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374}
375
376#ifdef CONFIG_NET_CLS_IND
Denis V. Lunev0eeb8ff2007-12-04 01:15:45 -0800377#include <net/net_namespace.h>
378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379static inline int
WANG Cong2519a602014-01-09 16:14:02 -0800380tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
WANG Cong2519a602014-01-09 16:14:02 -0800382 char indev[IFNAMSIZ];
Patrick McHardyc01003c2007-03-29 11:46:52 -0700383 struct net_device *dev;
384
WANG Cong2519a602014-01-09 16:14:02 -0800385 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
386 return -EINVAL;
387 dev = __dev_get_by_name(net, indev);
388 if (!dev)
389 return -ENODEV;
390 return dev->ifindex;
391}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
WANG Cong2519a602014-01-09 16:14:02 -0800393static inline bool
394tcf_match_indev(struct sk_buff *skb, int ifindex)
395{
396 if (!ifindex)
397 return true;
398 if (!skb->skb_iif)
399 return false;
400 return ifindex == skb->skb_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401}
402#endif /* CONFIG_NET_CLS_IND */
403
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800404struct tc_cls_u32_knode {
405 struct tcf_exts *exts;
John Fastabende0148602016-02-17 14:59:30 -0800406 struct tc_u32_sel *sel;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800407 u32 handle;
408 u32 val;
409 u32 mask;
410 u32 link_handle;
John Fastabende0148602016-02-17 14:59:30 -0800411 u8 fshift;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800412};
413
414struct tc_cls_u32_hnode {
415 u32 handle;
416 u32 prio;
417 unsigned int divisor;
418};
419
420enum tc_clsu32_command {
421 TC_CLSU32_NEW_KNODE,
422 TC_CLSU32_REPLACE_KNODE,
423 TC_CLSU32_DELETE_KNODE,
424 TC_CLSU32_NEW_HNODE,
425 TC_CLSU32_REPLACE_HNODE,
426 TC_CLSU32_DELETE_HNODE,
427};
428
429struct tc_cls_u32_offload {
430 /* knode values */
431 enum tc_clsu32_command command;
432 union {
433 struct tc_cls_u32_knode knode;
434 struct tc_cls_u32_hnode hnode;
435 };
436};
437
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200438static inline bool tc_can_offload(const struct net_device *dev,
439 const struct tcf_proto *tp)
John Fastabend6843e7a2016-02-26 07:53:49 -0800440{
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200441 const struct Qdisc *sch = tp->q;
442 const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
443
John Fastabend2b6ab0d2016-02-26 07:54:13 -0800444 if (!(dev->features & NETIF_F_HW_TC))
445 return false;
John Fastabend9e8ce792016-02-26 07:54:39 -0800446 if (!dev->netdev_ops->ndo_setup_tc)
447 return false;
Daniel Borkmann92c075d2016-06-06 22:50:39 +0200448 if (cops && cops->tcf_cl_offload)
449 return cops->tcf_cl_offload(tp->classid);
John Fastabend9e8ce792016-02-26 07:54:39 -0800450
451 return true;
John Fastabend6843e7a2016-02-26 07:53:49 -0800452}
453
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200454static inline bool tc_skip_hw(u32 flags)
455{
456 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
457}
458
459static inline bool tc_should_offload(const struct net_device *dev,
460 const struct tcf_proto *tp, u32 flags)
461{
462 if (tc_skip_hw(flags))
463 return false;
464 return tc_can_offload(dev, tp);
465}
466
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700467static inline bool tc_skip_sw(u32 flags)
468{
469 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
470}
471
472/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
473static inline bool tc_flags_valid(u32 flags)
474{
475 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
476 return false;
477
478 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
479 return false;
480
481 return true;
482}
483
Or Gerlitze6960282017-02-16 10:31:12 +0200484static inline bool tc_in_hw(u32 flags)
485{
486 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
487}
488
Amir Vadai5b33f482016-03-08 12:42:29 +0200489enum tc_fl_command {
490 TC_CLSFLOWER_REPLACE,
491 TC_CLSFLOWER_DESTROY,
Amir Vadai10cbc682016-05-13 12:55:37 +0000492 TC_CLSFLOWER_STATS,
Amir Vadai5b33f482016-03-08 12:42:29 +0200493};
494
495struct tc_cls_flower_offload {
496 enum tc_fl_command command;
Jiri Pirko69ca05c2017-02-03 10:29:08 +0100497 u32 prio;
Amir Vadai8208d212016-03-11 11:08:45 +0200498 unsigned long cookie;
Amir Vadai5b33f482016-03-08 12:42:29 +0200499 struct flow_dissector *dissector;
500 struct fl_flow_key *mask;
501 struct fl_flow_key *key;
502 struct tcf_exts *exts;
503};
504
Yotam Gigib87f7932016-07-21 12:03:12 +0200505enum tc_matchall_command {
506 TC_CLSMATCHALL_REPLACE,
507 TC_CLSMATCHALL_DESTROY,
508};
509
510struct tc_cls_matchall_offload {
511 enum tc_matchall_command command;
512 struct tcf_exts *exts;
513 unsigned long cookie;
514};
515
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100516enum tc_clsbpf_command {
517 TC_CLSBPF_ADD,
518 TC_CLSBPF_REPLACE,
519 TC_CLSBPF_DESTROY,
Jakub Kicinski68d64062016-09-21 11:44:02 +0100520 TC_CLSBPF_STATS,
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100521};
522
523struct tc_cls_bpf_offload {
524 enum tc_clsbpf_command command;
525 struct tcf_exts *exts;
526 struct bpf_prog *prog;
527 const char *name;
528 bool exts_integrated;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100529 u32 gen_flags;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100530};
531
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500532
533/* This structure holds cookie structure that is passed from user
534 * to the kernel for actions and classifiers
535 */
536struct tc_cookie {
537 u8 *data;
538 u32 len;
539};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540#endif