blob: 4937c09b896344abbaa0c7299a58a198fd0b173e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PKT_SCHED_H
2#define __LINUX_PKT_SCHED_H
3
Jaswinder Singh Rajputb8adfd32009-01-30 22:07:05 +05304#include <linux/types.h>
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006/* Logical priority bands not depending on specific packet scheduler.
7 Every scheduler will map them to real traffic classes, if it has
8 no more precise mechanism to classify packets.
9
10 These numbers have no special meaning, though their coincidence
11 with obsolete IPv6 values is not occasional :-). New IPv6 drafts
12 preferred full anarchy inspired by diffserv group.
13
14 Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
15 class, actually, as rule it will be handled with more care than
16 filler or even bulk.
17 */
18
19#define TC_PRIO_BESTEFFORT 0
20#define TC_PRIO_FILLER 1
21#define TC_PRIO_BULK 2
22#define TC_PRIO_INTERACTIVE_BULK 4
23#define TC_PRIO_INTERACTIVE 6
24#define TC_PRIO_CONTROL 7
25
26#define TC_PRIO_MAX 15
27
28/* Generic queue statistics, available for all the elements.
29 Particular schedulers may have also their private records.
30 */
31
Eric Dumazetd94d9fe2009-11-04 09:50:58 -080032struct tc_stats {
stephen hemminger5eccdf5e2011-11-21 06:53:46 +000033 __u64 bytes; /* Number of enqueued bytes */
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 __u32 packets; /* Number of enqueued packets */
35 __u32 drops; /* Packets dropped because of lack of resources */
36 __u32 overlimits; /* Number of throttle events when this
37 * flow goes out of allocated bandwidth */
38 __u32 bps; /* Current flow byte rate */
39 __u32 pps; /* Current flow packet rate */
40 __u32 qlen;
41 __u32 backlog;
42};
43
Eric Dumazetd94d9fe2009-11-04 09:50:58 -080044struct tc_estimator {
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 signed char interval;
46 unsigned char ewma_log;
47};
48
49/* "Handles"
50 ---------
51
52 All the traffic control objects have 32bit identifiers, or "handles".
53
54 They can be considered as opaque numbers from user API viewpoint,
55 but actually they always consist of two fields: major and
56 minor numbers, which are interpreted by kernel specially,
57 that may be used by applications, though not recommended.
58
59 F.e. qdisc handles always have minor number equal to zero,
60 classes (or flows) have major equal to parent qdisc major, and
61 minor uniquely identifying class inside qdisc.
62
63 Macros to manipulate handles:
64 */
65
66#define TC_H_MAJ_MASK (0xFFFF0000U)
67#define TC_H_MIN_MASK (0x0000FFFFU)
68#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
69#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
70#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
71
72#define TC_H_UNSPEC (0U)
73#define TC_H_ROOT (0xFFFFFFFFU)
74#define TC_H_INGRESS (0xFFFFFFF1U)
Daniel Borkmann1f211a12016-01-07 22:29:47 +010075#define TC_H_CLSACT TC_H_INGRESS
76
77#define TC_H_MIN_INGRESS 0xFFF2U
78#define TC_H_MIN_EGRESS 0xFFF3U
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +020080/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
81enum tc_link_layer {
82 TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
83 TC_LINKLAYER_ETHERNET,
84 TC_LINKLAYER_ATM,
85};
86#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
87
Eric Dumazetd94d9fe2009-11-04 09:50:58 -080088struct tc_ratespec {
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 unsigned char cell_log;
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +020090 __u8 linklayer; /* lower 4 bits */
Jesper Dangaard Brouere08b0992007-09-12 16:36:28 +020091 unsigned short overhead;
92 short cell_align;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 unsigned short mpu;
94 __u32 rate;
95};
96
Patrick McHardy5feb5e12008-01-23 20:35:19 -080097#define TC_RTAB_SIZE 1024
98
Jussi Kivilinna175f9c12008-07-20 00:08:47 -070099struct tc_sizespec {
100 unsigned char cell_log;
101 unsigned char size_log;
102 short cell_align;
103 int overhead;
104 unsigned int linklayer;
105 unsigned int mpu;
106 unsigned int mtu;
107 unsigned int tsize;
108};
109
110enum {
111 TCA_STAB_UNSPEC,
112 TCA_STAB_BASE,
113 TCA_STAB_DATA,
114 __TCA_STAB_MAX
115};
116
117#define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119/* FIFO section */
120
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800121struct tc_fifo_qopt {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
123};
124
125/* PRIO section */
126
127#define TCQ_PRIO_BANDS 16
Thomas Grafbdc450a2005-11-05 21:14:28 +0100128#define TCQ_MIN_PRIO_BANDS 2
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800130struct tc_prio_qopt {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 int bands; /* Number of bands */
132 __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
Subash Abhinov Kasiviswanathanabb705d2017-03-22 17:34:29 -0600133 __u8 enable_flow; /* Enable dequeue */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134};
135
Subash Abhinov Kasiviswanathanabb705d2017-03-22 17:34:29 -0600136#define TCQ_PRIO_FLOW_CONTROL 1
137
Alexander Duyck92651942008-09-12 16:29:34 -0700138/* MULTIQ section */
139
140struct tc_multiq_qopt {
141 __u16 bands; /* Number of bands */
142 __u16 max_bands; /* Maximum number of queues */
143};
144
Shriram Rajagopalanc3059be2012-02-05 13:51:32 +0000145/* PLUG section */
146
147#define TCQ_PLUG_BUFFER 0
148#define TCQ_PLUG_RELEASE_ONE 1
149#define TCQ_PLUG_RELEASE_INDEFINITE 2
150#define TCQ_PLUG_LIMIT 3
151
152struct tc_plug_qopt {
153 /* TCQ_PLUG_BUFFER: Inset a plug into the queue and
154 * buffer any incoming packets
155 * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
156 * to beginning of the next plug.
157 * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
158 * Stop buffering packets until the next TCQ_PLUG_BUFFER
159 * command is received (just act as a pass-thru queue).
160 * TCQ_PLUG_LIMIT: Increase/decrease queue size
161 */
162 int action;
163 __u32 limit;
164};
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166/* TBF section */
167
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800168struct tc_tbf_qopt {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 struct tc_ratespec rate;
170 struct tc_ratespec peakrate;
171 __u32 limit;
172 __u32 buffer;
173 __u32 mtu;
174};
175
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800176enum {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 TCA_TBF_UNSPEC,
178 TCA_TBF_PARMS,
179 TCA_TBF_RTAB,
180 TCA_TBF_PTAB,
Yang Yinglianga33c4a22013-11-08 10:23:34 +0800181 TCA_TBF_RATE64,
182 TCA_TBF_PRATE64,
Yang Yingliang2e04ad42013-12-20 09:24:47 +0800183 TCA_TBF_BURST,
184 TCA_TBF_PBURST,
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +0200185 TCA_TBF_PAD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 __TCA_TBF_MAX,
187};
188
189#define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
190
191
192/* TEQL section */
193
194/* TEQL does not require any parameters */
195
196/* SFQ section */
197
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800198struct tc_sfq_qopt {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 unsigned quantum; /* Bytes per round allocated to flow */
200 int perturb_period; /* Period of hash perturbation */
201 __u32 limit; /* Maximal packets in queue */
202 unsigned divisor; /* Hash divisor */
203 unsigned flows; /* Maximal number of flows */
204};
205
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000206struct tc_sfqred_stats {
207 __u32 prob_drop; /* Early drops, below max threshold */
208 __u32 forced_drop; /* Early drops, after max threshold */
209 __u32 prob_mark; /* Marked packets, below max threshold */
210 __u32 forced_mark; /* Marked packets, after max threshold */
211 __u32 prob_mark_head; /* Marked packets, below max threshold */
212 __u32 forced_mark_head;/* Marked packets, after max threshold */
213};
214
Eric Dumazet18cb8092012-01-04 14:18:38 +0000215struct tc_sfq_qopt_v1 {
216 struct tc_sfq_qopt v0;
217 unsigned int depth; /* max number of packets per flow */
218 unsigned int headdrop;
Eric Dumazetddecf0f2012-01-06 06:31:44 +0000219/* SFQRED parameters */
220 __u32 limit; /* HARD maximal flow queue length (bytes) */
221 __u32 qth_min; /* Min average length threshold (bytes) */
222 __u32 qth_max; /* Max average length threshold (bytes) */
223 unsigned char Wlog; /* log(W) */
224 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
225 unsigned char Scell_log; /* cell size for idle damping */
226 unsigned char flags;
227 __u32 max_P; /* probability, high resolution */
228/* SFQRED stats */
229 struct tc_sfqred_stats stats;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000230};
231
232
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800233struct tc_sfq_xstats {
Patrick McHardy94de78d2008-01-31 18:37:16 -0800234 __s32 allot;
235};
236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237/* RED section */
238
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800239enum {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 TCA_RED_UNSPEC,
241 TCA_RED_PARMS,
242 TCA_RED_STAB,
Eric Dumazet8af2a212011-12-08 06:06:03 +0000243 TCA_RED_MAX_P,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 __TCA_RED_MAX,
245};
246
247#define TCA_RED_MAX (__TCA_RED_MAX - 1)
248
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800249struct tc_red_qopt {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 __u32 limit; /* HARD maximal queue length (bytes) */
251 __u32 qth_min; /* Min average length threshold (bytes) */
252 __u32 qth_max; /* Max average length threshold (bytes) */
253 unsigned char Wlog; /* log(W) */
254 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
255 unsigned char Scell_log; /* cell size for idle damping */
256 unsigned char flags;
Eric Dumazet8af2a212011-12-08 06:06:03 +0000257#define TC_RED_ECN 1
258#define TC_RED_HARDDROP 2
259#define TC_RED_ADAPTATIVE 4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260};
261
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800262struct tc_red_xstats {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 __u32 early; /* Early drops */
264 __u32 pdrop; /* Drops due to queue limits */
265 __u32 other; /* Drops due to drop() calls */
266 __u32 marked; /* Marked packets */
267};
268
269/* GRED section */
270
271#define MAX_DPs 16
272
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800273enum {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 TCA_GRED_UNSPEC,
275 TCA_GRED_PARMS,
276 TCA_GRED_STAB,
277 TCA_GRED_DPS,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000278 TCA_GRED_MAX_P,
David Warda3eb95f2015-05-09 22:01:46 -0400279 TCA_GRED_LIMIT,
280 __TCA_GRED_MAX,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281};
282
283#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
284
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800285struct tc_gred_qopt {
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100286 __u32 limit; /* HARD maximal queue length (bytes) */
287 __u32 qth_min; /* Min average length threshold (bytes) */
288 __u32 qth_max; /* Max average length threshold (bytes) */
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300289 __u32 DP; /* up to 2^32 DPs */
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100290 __u32 backlog;
291 __u32 qave;
292 __u32 forced;
293 __u32 early;
294 __u32 other;
295 __u32 pdrop;
296 __u8 Wlog; /* log(W) */
297 __u8 Plog; /* log(P_max/(qth_max-qth_min)) */
298 __u8 Scell_log; /* cell size for idle damping */
299 __u8 prio; /* prio of this VQ */
300 __u32 packets;
301 __u32 bytesin;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302};
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100303
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304/* gred setup */
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800305struct tc_gred_sopt {
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100306 __u32 DPs;
307 __u32 def_DP;
308 __u8 grio;
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100309 __u8 flags;
310 __u16 pad1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311};
312
stephen hemminger45e14432011-02-02 15:21:10 +0000313/* CHOKe section */
314
315enum {
316 TCA_CHOKE_UNSPEC,
317 TCA_CHOKE_PARMS,
318 TCA_CHOKE_STAB,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000319 TCA_CHOKE_MAX_P,
stephen hemminger45e14432011-02-02 15:21:10 +0000320 __TCA_CHOKE_MAX,
321};
322
323#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
324
325struct tc_choke_qopt {
326 __u32 limit; /* Hard queue length (packets) */
327 __u32 qth_min; /* Min average threshold (packets) */
328 __u32 qth_max; /* Max average threshold (packets) */
329 unsigned char Wlog; /* log(W) */
330 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
331 unsigned char Scell_log; /* cell size for idle damping */
332 unsigned char flags; /* see RED flags */
333};
334
335struct tc_choke_xstats {
336 __u32 early; /* Early drops */
337 __u32 pdrop; /* Drops due to queue limits */
338 __u32 other; /* Drops due to drop() calls */
339 __u32 marked; /* Marked packets */
340 __u32 matched; /* Drops due to flow match */
341};
342
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343/* HTB section */
344#define TC_HTB_NUMPRIO 8
345#define TC_HTB_MAXDEPTH 8
346#define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */
347
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800348struct tc_htb_opt {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 struct tc_ratespec rate;
350 struct tc_ratespec ceil;
351 __u32 buffer;
352 __u32 cbuffer;
353 __u32 quantum;
354 __u32 level; /* out only */
355 __u32 prio;
356};
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800357struct tc_htb_glob {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 __u32 version; /* to match HTB/TC */
359 __u32 rate2quantum; /* bps->quantum divisor */
360 __u32 defcls; /* default class number */
361 __u32 debug; /* debug flags */
362
363 /* stats */
stephen hemminger5eccdf5e2011-11-21 06:53:46 +0000364 __u32 direct_pkts; /* count of non shaped packets */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365};
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800366enum {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 TCA_HTB_UNSPEC,
368 TCA_HTB_PARMS,
369 TCA_HTB_INIT,
370 TCA_HTB_CTAB,
371 TCA_HTB_RTAB,
Eric Dumazet6906f4e2013-03-06 06:49:21 +0000372 TCA_HTB_DIRECT_QLEN,
Eric Dumazetdf62cdf2013-09-19 09:10:20 -0700373 TCA_HTB_RATE64,
374 TCA_HTB_CEIL64,
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +0200375 TCA_HTB_PAD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 __TCA_HTB_MAX,
377};
378
379#define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
380
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800381struct tc_htb_xstats {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 __u32 lends;
383 __u32 borrows;
384 __u32 giants; /* too big packets (rate will not be accurate) */
385 __u32 tokens;
386 __u32 ctokens;
387};
388
389/* HFSC section */
390
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800391struct tc_hfsc_qopt {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 __u16 defcls; /* default class */
393};
394
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800395struct tc_service_curve {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 __u32 m1; /* slope of the first segment in bps */
397 __u32 d; /* x-projection of the first segment in us */
398 __u32 m2; /* slope of the second segment in bps */
399};
400
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800401struct tc_hfsc_stats {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 __u64 work; /* total work done */
403 __u64 rtwork; /* work done by real-time criteria */
404 __u32 period; /* current period */
405 __u32 level; /* class level in hierarchy */
406};
407
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800408enum {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 TCA_HFSC_UNSPEC,
410 TCA_HFSC_RSC,
411 TCA_HFSC_FSC,
412 TCA_HFSC_USC,
413 __TCA_HFSC_MAX,
414};
415
416#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
417
418
419/* CBQ section */
420
421#define TC_CBQ_MAXPRIO 8
422#define TC_CBQ_MAXLEVEL 8
423#define TC_CBQ_DEF_EWMA 5
424
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800425struct tc_cbq_lssopt {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 unsigned char change;
427 unsigned char flags;
428#define TCF_CBQ_LSS_BOUNDED 1
429#define TCF_CBQ_LSS_ISOLATED 2
430 unsigned char ewma_log;
431 unsigned char level;
432#define TCF_CBQ_LSS_FLAGS 1
433#define TCF_CBQ_LSS_EWMA 2
434#define TCF_CBQ_LSS_MAXIDLE 4
435#define TCF_CBQ_LSS_MINIDLE 8
436#define TCF_CBQ_LSS_OFFTIME 0x10
437#define TCF_CBQ_LSS_AVPKT 0x20
438 __u32 maxidle;
439 __u32 minidle;
440 __u32 offtime;
441 __u32 avpkt;
442};
443
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800444struct tc_cbq_wrropt {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 unsigned char flags;
446 unsigned char priority;
447 unsigned char cpriority;
448 unsigned char __reserved;
449 __u32 allot;
450 __u32 weight;
451};
452
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800453struct tc_cbq_ovl {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 unsigned char strategy;
455#define TC_CBQ_OVL_CLASSIC 0
456#define TC_CBQ_OVL_DELAY 1
457#define TC_CBQ_OVL_LOWPRIO 2
458#define TC_CBQ_OVL_DROP 3
459#define TC_CBQ_OVL_RCLASSIC 4
460 unsigned char priority2;
Patrick McHardy8a470772005-06-28 12:56:45 -0700461 __u16 pad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 __u32 penalty;
463};
464
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800465struct tc_cbq_police {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 unsigned char police;
467 unsigned char __res1;
468 unsigned short __res2;
469};
470
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800471struct tc_cbq_fopt {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 __u32 split;
473 __u32 defmap;
474 __u32 defchange;
475};
476
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800477struct tc_cbq_xstats {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 __u32 borrows;
479 __u32 overactions;
480 __s32 avgidle;
481 __s32 undertime;
482};
483
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800484enum {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 TCA_CBQ_UNSPEC,
486 TCA_CBQ_LSSOPT,
487 TCA_CBQ_WRROPT,
488 TCA_CBQ_FOPT,
489 TCA_CBQ_OVL_STRATEGY,
490 TCA_CBQ_RATE,
491 TCA_CBQ_RTAB,
492 TCA_CBQ_POLICE,
493 __TCA_CBQ_MAX,
494};
495
496#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
497
498/* dsmark section */
499
500enum {
501 TCA_DSMARK_UNSPEC,
502 TCA_DSMARK_INDICES,
503 TCA_DSMARK_DEFAULT_INDEX,
504 TCA_DSMARK_SET_TC_INDEX,
505 TCA_DSMARK_MASK,
506 TCA_DSMARK_VALUE,
507 __TCA_DSMARK_MAX,
508};
509
510#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
511
512/* ATM section */
513
514enum {
515 TCA_ATM_UNSPEC,
516 TCA_ATM_FD, /* file/socket descriptor */
517 TCA_ATM_PTR, /* pointer to descriptor - later */
518 TCA_ATM_HDR, /* LL header */
519 TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
520 TCA_ATM_ADDR, /* PVC address (for output only) */
521 TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
522 __TCA_ATM_MAX,
523};
524
525#define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
526
527/* Network emulator */
528
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800529enum {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 TCA_NETEM_UNSPEC,
531 TCA_NETEM_CORR,
532 TCA_NETEM_DELAY_DIST,
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700533 TCA_NETEM_REORDER,
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800534 TCA_NETEM_CORRUPT,
stephen hemminger661b7972011-02-23 13:04:21 +0000535 TCA_NETEM_LOSS,
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000536 TCA_NETEM_RATE,
Eric Dumazete4ae0042012-04-30 23:11:05 +0000537 TCA_NETEM_ECN,
Yang Yingliang6a031f62013-12-25 17:35:15 +0800538 TCA_NETEM_RATE64,
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +0200539 TCA_NETEM_PAD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 __TCA_NETEM_MAX,
541};
542
543#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
544
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800545struct tc_netem_qopt {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 __u32 latency; /* added delay (us) */
547 __u32 limit; /* fifo limit (packets) */
548 __u32 loss; /* random packet loss (0=none ~0=100%) */
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700549 __u32 gap; /* re-ordering gap (0 for none) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 __u32 duplicate; /* random packet dup (0=none ~0=100%) */
551 __u32 jitter; /* random jitter in latency (us) */
552};
553
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800554struct tc_netem_corr {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 __u32 delay_corr; /* delay correlation */
556 __u32 loss_corr; /* packet loss correlation */
557 __u32 dup_corr; /* duplicate correlation */
558};
559
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800560struct tc_netem_reorder {
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700561 __u32 probability;
562 __u32 correlation;
563};
564
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800565struct tc_netem_corrupt {
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800566 __u32 probability;
567 __u32 correlation;
568};
569
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000570struct tc_netem_rate {
571 __u32 rate; /* byte/s */
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000572 __s32 packet_overhead;
573 __u32 cell_size;
574 __s32 cell_overhead;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000575};
576
stephen hemminger661b7972011-02-23 13:04:21 +0000577enum {
578 NETEM_LOSS_UNSPEC,
579 NETEM_LOSS_GI, /* General Intuitive - 4 state model */
580 NETEM_LOSS_GE, /* Gilbert Elliot models */
581 __NETEM_LOSS_MAX
582};
583#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
584
stephen hemminger5eccdf5e2011-11-21 06:53:46 +0000585/* State transition probabilities for 4 state model */
stephen hemminger661b7972011-02-23 13:04:21 +0000586struct tc_netem_gimodel {
587 __u32 p13;
588 __u32 p31;
589 __u32 p32;
590 __u32 p14;
591 __u32 p23;
592};
593
594/* Gilbert-Elliot models */
595struct tc_netem_gemodel {
596 __u32 p;
597 __u32 r;
598 __u32 h;
599 __u32 k1;
600};
601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602#define NETEM_DIST_SCALE 8192
stephen hemmingerdf173bd2011-02-23 13:04:19 +0000603#define NETEM_DIST_MAX 16384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Patrick McHardy13d2a1d2008-11-20 04:10:00 -0800605/* DRR */
606
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800607enum {
Patrick McHardy13d2a1d2008-11-20 04:10:00 -0800608 TCA_DRR_UNSPEC,
609 TCA_DRR_QUANTUM,
610 __TCA_DRR_MAX
611};
612
613#define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
614
Eric Dumazetd94d9fe2009-11-04 09:50:58 -0800615struct tc_drr_stats {
Chuck Ebberte672f7d2009-02-10 17:18:17 -0800616 __u32 deficit;
Patrick McHardy13d2a1d2008-11-20 04:10:00 -0800617};
618
John Fastabendb8970f02011-01-17 08:06:09 +0000619/* MQPRIO */
620#define TC_QOPT_BITMASK 15
621#define TC_QOPT_MAX_QUEUE 16
622
623struct tc_mqprio_qopt {
624 __u8 num_tc;
625 __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
626 __u8 hw;
627 __u16 count[TC_QOPT_MAX_QUEUE];
628 __u16 offset[TC_QOPT_MAX_QUEUE];
629};
630
Eric Dumazete13e02a32011-02-23 10:56:17 +0000631/* SFB */
632
633enum {
634 TCA_SFB_UNSPEC,
635 TCA_SFB_PARMS,
636 __TCA_SFB_MAX,
637};
638
639#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
640
641/*
642 * Note: increment, decrement are Q0.16 fixed-point values.
643 */
644struct tc_sfb_qopt {
645 __u32 rehash_interval; /* delay between hash move, in ms */
646 __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */
647 __u32 max; /* max len of qlen_min */
648 __u32 bin_size; /* maximum queue length per bin */
649 __u32 increment; /* probability increment, (d1 in Blue) */
650 __u32 decrement; /* probability decrement, (d2 in Blue) */
651 __u32 limit; /* max SFB queue length */
652 __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */
653 __u32 penalty_burst;
654};
655
656struct tc_sfb_xstats {
657 __u32 earlydrop;
658 __u32 penaltydrop;
659 __u32 bucketdrop;
660 __u32 queuedrop;
661 __u32 childdrop; /* drops in child qdisc */
662 __u32 marked;
663 __u32 maxqlen;
664 __u32 maxprob;
665 __u32 avgprob;
666};
667
668#define SFB_MAX_PROB 0xFFFF
669
stephen hemminger0545a302011-04-04 05:30:58 +0000670/* QFQ */
671enum {
672 TCA_QFQ_UNSPEC,
673 TCA_QFQ_WEIGHT,
674 TCA_QFQ_LMAX,
675 __TCA_QFQ_MAX
676};
677
678#define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1)
679
680struct tc_qfq_stats {
681 __u32 weight;
682 __u32 lmax;
683};
684
Eric Dumazet76e3cc12012-05-10 07:51:25 +0000685/* CODEL */
686
687enum {
688 TCA_CODEL_UNSPEC,
689 TCA_CODEL_TARGET,
690 TCA_CODEL_LIMIT,
691 TCA_CODEL_INTERVAL,
692 TCA_CODEL_ECN,
Eric Dumazet80ba92f2015-05-08 15:05:12 -0700693 TCA_CODEL_CE_THRESHOLD,
Eric Dumazet76e3cc12012-05-10 07:51:25 +0000694 __TCA_CODEL_MAX
695};
696
697#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
698
699struct tc_codel_xstats {
700 __u32 maxpacket; /* largest packet we've seen so far */
701 __u32 count; /* how many drops we've done since the last time we
702 * entered dropping state
703 */
704 __u32 lastcount; /* count at entry to dropping state */
705 __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
706 __s32 drop_next; /* time to drop next packet */
707 __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
708 __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
709 __u32 dropping; /* are we in dropping state ? */
Eric Dumazet80ba92f2015-05-08 15:05:12 -0700710 __u32 ce_mark; /* number of CE marked packets because of ce_threshold */
Eric Dumazet76e3cc12012-05-10 07:51:25 +0000711};
712
Eric Dumazet4b549a22012-05-11 09:30:50 +0000713/* FQ_CODEL */
714
715enum {
716 TCA_FQ_CODEL_UNSPEC,
717 TCA_FQ_CODEL_TARGET,
718 TCA_FQ_CODEL_LIMIT,
719 TCA_FQ_CODEL_INTERVAL,
720 TCA_FQ_CODEL_ECN,
721 TCA_FQ_CODEL_FLOWS,
722 TCA_FQ_CODEL_QUANTUM,
Eric Dumazet80ba92f2015-05-08 15:05:12 -0700723 TCA_FQ_CODEL_CE_THRESHOLD,
Eric Dumazet9d185622016-05-01 16:47:26 -0700724 TCA_FQ_CODEL_DROP_BATCH_SIZE,
Eric Dumazet95b58432016-05-06 08:55:12 -0700725 TCA_FQ_CODEL_MEMORY_LIMIT,
Eric Dumazet4b549a22012-05-11 09:30:50 +0000726 __TCA_FQ_CODEL_MAX
727};
728
729#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1)
730
731enum {
732 TCA_FQ_CODEL_XSTATS_QDISC,
733 TCA_FQ_CODEL_XSTATS_CLASS,
734};
735
736struct tc_fq_codel_qd_stats {
737 __u32 maxpacket; /* largest packet we've seen so far */
738 __u32 drop_overlimit; /* number of time max qdisc
739 * packet limit was hit
740 */
741 __u32 ecn_mark; /* number of packets we ECN marked
742 * instead of being dropped
743 */
744 __u32 new_flow_count; /* number of time packets
745 * created a 'new flow'
746 */
747 __u32 new_flows_len; /* count of flows in new list */
748 __u32 old_flows_len; /* count of flows in old list */
Eric Dumazet80ba92f2015-05-08 15:05:12 -0700749 __u32 ce_mark; /* packets above ce_threshold */
Eric Dumazet95b58432016-05-06 08:55:12 -0700750 __u32 memory_usage; /* in bytes */
751 __u32 drop_overmemory;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000752};
753
754struct tc_fq_codel_cl_stats {
755 __s32 deficit;
756 __u32 ldelay; /* in-queue delay seen by most recently
757 * dequeued packet
758 */
759 __u32 count;
760 __u32 lastcount;
761 __u32 dropping;
762 __s32 drop_next;
763};
764
765struct tc_fq_codel_xstats {
766 __u32 type;
767 union {
768 struct tc_fq_codel_qd_stats qdisc_stats;
769 struct tc_fq_codel_cl_stats class_stats;
770 };
771};
772
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700773/* FQ */
774
775enum {
776 TCA_FQ_UNSPEC,
777
778 TCA_FQ_PLIMIT, /* limit of total number of packets in queue */
779
780 TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */
781
782 TCA_FQ_QUANTUM, /* RR quantum */
783
784 TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */
785
786 TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
787
Eric Dumazet65c5189a2013-11-15 08:57:26 -0800788 TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700789
790 TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
791
792 TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
Eric Dumazetf52ed892013-11-15 08:58:14 -0800793
794 TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
795
Eric Dumazet06eb3952015-02-04 21:30:40 -0800796 TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
797
Eric Dumazet77879142016-09-19 23:39:11 -0400798 TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
799
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700800 __TCA_FQ_MAX
801};
802
803#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
804
805struct tc_fq_qd_stats {
806 __u64 gc_flows;
807 __u64 highprio_packets;
808 __u64 tcp_retrans;
809 __u64 throttled;
810 __u64 flows_plimit;
811 __u64 pkts_too_long;
812 __u64 allocation_errors;
813 __s64 time_next_delayed_flow;
814 __u32 flows;
815 __u32 inactive_flows;
816 __u32 throttled_flows;
Eric Dumazetfefa5692016-09-22 08:58:55 -0700817 __u32 unthrottle_latency_ns;
Eric Dumazetafe4fd02013-08-29 15:49:55 -0700818};
Terry Lam10239ed2013-12-15 00:30:21 -0800819
820/* Heavy-Hitter Filter */
821
822enum {
823 TCA_HHF_UNSPEC,
824 TCA_HHF_BACKLOG_LIMIT,
825 TCA_HHF_QUANTUM,
826 TCA_HHF_HH_FLOWS_LIMIT,
827 TCA_HHF_RESET_TIMEOUT,
828 TCA_HHF_ADMIT_BYTES,
829 TCA_HHF_EVICT_TIMEOUT,
830 TCA_HHF_NON_HH_WEIGHT,
831 __TCA_HHF_MAX
832};
833
834#define TCA_HHF_MAX (__TCA_HHF_MAX - 1)
835
836struct tc_hhf_xstats {
837 __u32 drop_overlimit; /* number of times max qdisc packet limit
838 * was hit
839 */
840 __u32 hh_overlimit; /* number of times max heavy-hitters was hit */
841 __u32 hh_tot_count; /* number of captured heavy-hitters so far */
842 __u32 hh_cur_count; /* number of current heavy-hitters */
843};
Vijay Subramaniand4b36212014-01-04 17:33:55 -0800844
845/* PIE */
846enum {
847 TCA_PIE_UNSPEC,
848 TCA_PIE_TARGET,
849 TCA_PIE_LIMIT,
850 TCA_PIE_TUPDATE,
851 TCA_PIE_ALPHA,
852 TCA_PIE_BETA,
853 TCA_PIE_ECN,
854 TCA_PIE_BYTEMODE,
855 __TCA_PIE_MAX
856};
857#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
858
859struct tc_pie_xstats {
860 __u32 prob; /* current probability */
861 __u32 delay; /* current delay in ms */
862 __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
863 __u32 packets_in; /* total number of packets enqueued */
864 __u32 dropped; /* packets dropped due to pie_action */
865 __u32 overlimit; /* dropped due to lack of space in queue */
866 __u32 maxq; /* maximum queue size */
867 __u32 ecn_mark; /* packets marked with ecn*/
868};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869#endif