blob: 7d6801fc5340eff65b81037519ada115cbc23e20 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
Stephen Hemminger798b6b12006-10-22 20:16:57 -07007 * 2 of the License.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Many of the algorithms and ideas for this came from
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * NIST Net which is not copyrighted.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000016#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/skbuff.h>
David S. Miller78776d32011-02-24 22:48:13 -080023#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/rtnetlink.h>
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000025#include <linux/reciprocal_div.h>
Eric Dumazetaec0a402013-06-28 07:40:57 -070026#include <linux/rbtree.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070028#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <net/pkt_sched.h>
Eric Dumazete4ae0042012-04-30 23:11:05 +000030#include <net/inet_ecn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
stephen hemminger250a65f2011-02-23 13:04:22 +000032#define VERSION "1.3"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
stephen hemminger661b7972011-02-23 13:04:21 +000055
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069*/
70
71struct netem_sched_data {
Eric Dumazetaec0a402013-06-28 07:40:57 -070072 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
Eric Dumazet50612532011-12-28 23:12:02 +000074
75 /* optional qdisc for classful handling (NULL at netem init) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 struct Qdisc *qdisc;
Eric Dumazet50612532011-12-28 23:12:02 +000077
Patrick McHardy59cb5c62007-03-16 01:20:31 -070078 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Dave Taht112f9cb2017-11-08 15:12:26 -080080 s64 latency;
81 s64 jitter;
Stephen Hemmingerb4076212007-03-22 12:16:21 -070082
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 u32 loss;
Eric Dumazete4ae0042012-04-30 23:11:05 +000084 u32 ecn;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 u32 limit;
86 u32 counter;
87 u32 gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070089 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080090 u32 corrupt;
Yang Yingliang6a031f62013-12-25 17:35:15 +080091 u64 rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000092 s32 packet_overhead;
93 u32 cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +010094 struct reciprocal_value cell_size_reciprocal;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000095 s32 cell_overhead;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97 struct crndstate {
Stephen Hemmingerb4076212007-03-22 12:16:21 -070098 u32 last;
99 u32 rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
stephen hemminger661b7972011-02-23 13:04:21 +0000106
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
112
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
119
Yang Yingliangc045a732014-02-14 10:30:43 +0800120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
124
stephen hemminger661b7972011-02-23 13:04:21 +0000125 /* Correlated Loss Generation models */
126 struct clgstate {
127 /* state of the Markov chain */
128 u8 state;
129
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
136 } clg;
137
Dave Taht836af832017-11-08 15:12:28 -0800138 struct tc_netem_slot slot_config;
139 struct slotstate {
140 u64 slot_next;
141 s32 packets_left;
142 s32 bytes_left;
143 } slot;
144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145};
146
Eric Dumazet50612532011-12-28 23:12:02 +0000147/* Time stamp put into socket buffer control block
148 * Only valid when skbs are in our internal t(ime)fifo queue.
Eric Dumazet56b17422014-11-03 08:19:53 -0800149 *
150 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
151 * and skb->next & skb->prev are scratch space for a qdisc,
152 * we save skb->tstamp value in skb->cb[] before destroying it.
Eric Dumazet50612532011-12-28 23:12:02 +0000153 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154struct netem_skb_cb {
Dave Taht112f9cb2017-11-08 15:12:26 -0800155 u64 time_to_send;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156};
157
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700158static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
159{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
David S. Miller16bda132012-02-06 15:14:37 -0500161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700163}
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165/* init_crandom - initialize correlated random number generator
166 * Use entropy source for initial seed.
167 */
168static void init_crandom(struct crndstate *state, unsigned long rho)
169{
170 state->rho = rho;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500171 state->last = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172}
173
174/* get_crandom - correlated random number generator
175 * Next number depends on last value.
176 * rho is scaled to avoid floating point.
177 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700178static u32 get_crandom(struct crndstate *state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
180 u64 value, rho;
181 unsigned long answer;
182
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700183 if (state->rho == 0) /* no correlation */
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500184 return prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500186 value = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 rho = (u64)state->rho + 1;
188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
189 state->last = answer;
190 return answer;
191}
192
stephen hemminger661b7972011-02-23 13:04:21 +0000193/* loss_4state - 4-state model loss generator
194 * Generates losses according to the 4-state Markov chain adopted in
195 * the GI (General and Intuitive) loss model.
196 */
197static bool loss_4state(struct netem_sched_data *q)
198{
199 struct clgstate *clg = &q->clg;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500200 u32 rnd = prandom_u32();
stephen hemminger661b7972011-02-23 13:04:21 +0000201
202 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300203 * Makes a comparison between rnd and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000204 * probabilities outgoing from the current state, then decides the
205 * next state and if the next packet has to be transmitted or lost.
206 * The four states correspond to:
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
209 * LOST_IN_GAP_PERIOD => lost packets within a burst period
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
stephen hemminger661b7972011-02-23 13:04:21 +0000211 */
212 switch (clg->state) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800213 case TX_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000214 if (rnd < clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800215 clg->state = LOST_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000216 return true;
stephen hemmingerab6c27b2013-11-29 11:03:35 -0800217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800218 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000219 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800220 } else if (clg->a1 + clg->a4 < rnd) {
221 clg->state = TX_IN_GAP_PERIOD;
222 }
stephen hemminger661b7972011-02-23 13:04:21 +0000223
224 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800225 case TX_IN_BURST_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000226 if (rnd < clg->a5) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800227 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000228 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800229 } else {
230 clg->state = TX_IN_BURST_PERIOD;
231 }
stephen hemminger661b7972011-02-23 13:04:21 +0000232
233 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800234 case LOST_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000235 if (rnd < clg->a3)
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800236 clg->state = TX_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800238 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000239 } else if (clg->a2 + clg->a3 < rnd) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800240 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000241 return true;
242 }
243 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800244 case LOST_IN_BURST_PERIOD:
245 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000246 break;
247 }
248
249 return false;
250}
251
252/* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
255 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300256 * Makes a comparison between random number and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000257 * probabilities outgoing from the current state, then decides the
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300258 * next state. A second random number is extracted and the comparison
stephen hemminger661b7972011-02-23 13:04:21 +0000259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
261 */
262static bool loss_gilb_ell(struct netem_sched_data *q)
263{
264 struct clgstate *clg = &q->clg;
265
266 switch (clg->state) {
Yang Yingliangc045a732014-02-14 10:30:43 +0800267 case GOOD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500268 if (prandom_u32() < clg->a1)
Yang Yingliangc045a732014-02-14 10:30:43 +0800269 clg->state = BAD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500270 if (prandom_u32() < clg->a4)
stephen hemminger661b7972011-02-23 13:04:21 +0000271 return true;
stephen hemminger7c2781f2013-11-29 11:02:43 -0800272 break;
Yang Yingliangc045a732014-02-14 10:30:43 +0800273 case BAD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500274 if (prandom_u32() < clg->a2)
Yang Yingliangc045a732014-02-14 10:30:43 +0800275 clg->state = GOOD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500276 if (prandom_u32() > clg->a3)
stephen hemminger661b7972011-02-23 13:04:21 +0000277 return true;
278 }
279
280 return false;
281}
282
283static bool loss_event(struct netem_sched_data *q)
284{
285 switch (q->loss_model) {
286 case CLG_RANDOM:
287 /* Random packet drop 0 => none, ~0 => all */
288 return q->loss && q->loss >= get_crandom(&q->loss_cor);
289
290 case CLG_4_STATES:
291 /* 4state loss model algorithm (used also for GI model)
292 * Extracts a value from the markov 4 state loss generator,
293 * if it is 1 drops a packet and if needed writes the event in
294 * the kernel logs
295 */
296 return loss_4state(q);
297
298 case CLG_GILB_ELL:
299 /* Gilbert-Elliot loss model algorithm
300 * Extracts a value from the Gilbert-Elliot loss generator,
301 * if it is 1 drops a packet and if needed writes the event in
302 * the kernel logs
303 */
304 return loss_gilb_ell(q);
305 }
306
307 return false; /* not reached */
308}
309
310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311/* tabledist - return a pseudo-randomly distributed value with mean mu and
312 * std deviation sigma. Uses table lookup to approximate the desired
313 * distribution, and a uniformly-distributed pseudo-random source.
314 */
Stephen Hemminger9b0ed892017-11-14 11:27:02 -0800315static s64 tabledist(s64 mu, s32 sigma,
Dave Taht112f9cb2017-11-08 15:12:26 -0800316 struct crndstate *state,
Stephen Hemminger9b0ed892017-11-14 11:27:02 -0800317 const struct disttable *dist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
Dave Taht112f9cb2017-11-08 15:12:26 -0800319 s64 x;
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700320 long t;
321 u32 rnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
323 if (sigma == 0)
324 return mu;
325
326 rnd = get_crandom(state);
327
328 /* default uniform distribution */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900329 if (dist == NULL)
Md. Islam043e3372018-02-06 23:14:18 -0500330 return ((rnd % (2 * sigma)) + mu) - sigma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332 t = dist->table[rnd % dist->size];
333 x = (sigma % NETEM_DIST_SCALE) * t;
334 if (x >= 0)
335 x += NETEM_DIST_SCALE/2;
336 else
337 x -= NETEM_DIST_SCALE/2;
338
339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
340}
341
Stephen Hemmingerbce552f2017-11-14 11:27:01 -0800342static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000343{
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000344 len += q->packet_overhead;
345
346 if (q->cell_size) {
347 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
348
349 if (len > cells * q->cell_size) /* extra cell needed for remainder */
350 cells++;
351 len = cells * (q->cell_size + q->cell_overhead);
352 }
Stephen Hemmingerbce552f2017-11-14 11:27:01 -0800353
354 return div64_u64(len * NSEC_PER_SEC, q->rate);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000355}
356
stephen hemmingerff704052013-10-06 15:16:49 -0700357static void tfifo_reset(struct Qdisc *sch)
358{
359 struct netem_sched_data *q = qdisc_priv(sch);
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700360 struct rb_node *p = rb_first(&q->t_root);
stephen hemmingerff704052013-10-06 15:16:49 -0700361
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700362 while (p) {
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700363 struct sk_buff *skb = rb_to_skb(p);
stephen hemmingerff704052013-10-06 15:16:49 -0700364
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700365 p = rb_next(p);
366 rb_erase(&skb->rbnode, &q->t_root);
Eric Dumazet2f08a9a2016-06-13 20:21:57 -0700367 rtnl_kfree_skbs(skb, skb);
stephen hemmingerff704052013-10-06 15:16:49 -0700368 }
369}
370
Eric Dumazet960fb662012-07-03 20:55:21 +0000371static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
Eric Dumazet50612532011-12-28 23:12:02 +0000372{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700373 struct netem_sched_data *q = qdisc_priv(sch);
Dave Taht112f9cb2017-11-08 15:12:26 -0800374 u64 tnext = netem_skb_cb(nskb)->time_to_send;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700375 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
Eric Dumazet50612532011-12-28 23:12:02 +0000376
Eric Dumazetaec0a402013-06-28 07:40:57 -0700377 while (*p) {
378 struct sk_buff *skb;
Eric Dumazet50612532011-12-28 23:12:02 +0000379
Eric Dumazetaec0a402013-06-28 07:40:57 -0700380 parent = *p;
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700381 skb = rb_to_skb(parent);
Eric Dumazet960fb662012-07-03 20:55:21 +0000382 if (tnext >= netem_skb_cb(skb)->time_to_send)
Eric Dumazetaec0a402013-06-28 07:40:57 -0700383 p = &parent->rb_right;
384 else
385 p = &parent->rb_left;
Eric Dumazet50612532011-12-28 23:12:02 +0000386 }
Eric Dumazet56b17422014-11-03 08:19:53 -0800387 rb_link_node(&nskb->rbnode, parent, p);
388 rb_insert_color(&nskb->rbnode, &q->t_root);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700389 sch->q.qlen++;
Eric Dumazet50612532011-12-28 23:12:02 +0000390}
391
Neil Horman6071bd12016-05-02 12:20:15 -0400392/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
393 * when we statistically choose to corrupt one, we instead segment it, returning
394 * the first packet to be corrupted, and re-enqueue the remaining frames
395 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700396static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
397 struct sk_buff **to_free)
Neil Horman6071bd12016-05-02 12:20:15 -0400398{
399 struct sk_buff *segs;
400 netdev_features_t features = netif_skb_features(skb);
401
402 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
403
404 if (IS_ERR_OR_NULL(segs)) {
Eric Dumazet520ac302016-06-21 23:16:49 -0700405 qdisc_drop(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400406 return NULL;
407 }
408 consume_skb(skb);
409 return segs;
410}
411
Florian Westphal48da34b2016-09-18 00:57:34 +0200412static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
413{
414 skb->next = qh->head;
415
416 if (!qh->head)
417 qh->tail = skb;
418 qh->head = skb;
419 qh->qlen++;
420}
421
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700422/*
423 * Insert one skb into qdisc.
424 * Note: parent depends on return value to account for queue length.
425 * NET_XMIT_DROP: queue length didn't change.
426 * NET_XMIT_SUCCESS: one skb was queued.
427 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700428static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
429 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430{
431 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700432 /* We don't fill cb now as skb_unshare() may invalidate it */
433 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700434 struct sk_buff *skb2;
Neil Horman6071bd12016-05-02 12:20:15 -0400435 struct sk_buff *segs = NULL;
436 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
437 int nb = 0;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700438 int count = 1;
Neil Horman6071bd12016-05-02 12:20:15 -0400439 int rc = NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700441 /* Random duplication */
442 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
443 ++count;
444
stephen hemminger661b7972011-02-23 13:04:21 +0000445 /* Drop packet? */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000446 if (loss_event(q)) {
447 if (q->ecn && INET_ECN_set_ce(skb))
John Fastabend25331d62014-09-28 11:53:29 -0700448 qdisc_qstats_drop(sch); /* mark packet */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000449 else
450 --count;
451 }
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700452 if (count == 0) {
John Fastabend25331d62014-09-28 11:53:29 -0700453 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700454 __qdisc_drop(skb, to_free);
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700455 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 }
457
Eric Dumazet5a308f42012-07-14 03:16:27 +0000458 /* If a delay is expected, orphan the skb. (orphaning usually takes
459 * place at TX completion time, so _before_ the link transit delay)
Eric Dumazet5a308f42012-07-14 03:16:27 +0000460 */
Nik Unger5080f392017-03-13 10:16:58 -0700461 if (q->latency || q->jitter || q->rate)
Eric Dumazetf2f872f2013-07-30 17:55:08 -0700462 skb_orphan_partial(skb);
David S. Miller4e8a5202006-10-22 21:00:33 -0700463
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700464 /*
465 * If we need to duplicate packet, then re-insert at top of the
466 * qdisc tree, since parent queuer expects that only one
467 * skb will be queued.
468 */
469 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
David S. Miller7698b4f2008-07-16 01:42:40 -0700470 struct Qdisc *rootq = qdisc_root(sch);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700471 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Eric Dumazetb396cca2015-05-11 09:06:56 -0700473 q->duplicate = 0;
Eric Dumazet520ac302016-06-21 23:16:49 -0700474 rootq->enqueue(skb2, rootq, to_free);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700475 q->duplicate = dupsave;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 }
477
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800478 /*
479 * Randomized packet corruption.
480 * Make copy if needed since we are modifying
481 * If packet is going to be hardware checksummed, then
482 * do it now in software before we mangle it.
483 */
484 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
Neil Horman6071bd12016-05-02 12:20:15 -0400485 if (skb_is_gso(skb)) {
Eric Dumazet520ac302016-06-21 23:16:49 -0700486 segs = netem_segment(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400487 if (!segs)
488 return NET_XMIT_DROP;
489 } else {
490 segs = skb;
491 }
492
493 skb = segs;
494 segs = segs->next;
495
Eric Dumazet8a6e9c62016-06-28 10:30:08 +0200496 skb = skb_unshare(skb, GFP_ATOMIC);
497 if (unlikely(!skb)) {
498 qdisc_qstats_drop(sch);
499 goto finish_segs;
500 }
501 if (skb->ip_summed == CHECKSUM_PARTIAL &&
502 skb_checksum_help(skb)) {
503 qdisc_drop(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400504 goto finish_segs;
505 }
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800506
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500507 skb->data[prandom_u32() % skb_headlen(skb)] ^=
508 1<<(prandom_u32() % 8);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800509 }
510
Florian Westphal97d06782016-09-18 00:57:31 +0200511 if (unlikely(sch->q.qlen >= sch->limit))
Alexey Kodanev35d889d2018-03-05 20:52:54 +0300512 return qdisc_drop_all(skb, sch, to_free);
Eric Dumazet960fb662012-07-03 20:55:21 +0000513
John Fastabend25331d62014-09-28 11:53:29 -0700514 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazet960fb662012-07-03 20:55:21 +0000515
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700516 cb = netem_skb_cb(skb);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000517 if (q->gap == 0 || /* not doing reordering */
Vijay Subramaniana42b4792012-01-19 10:20:59 +0000518 q->counter < q->gap - 1 || /* inside last reordering gap */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800519 q->reorder < get_crandom(&q->reorder_cor)) {
Dave Taht112f9cb2017-11-08 15:12:26 -0800520 u64 now;
521 s64 delay;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800522
523 delay = tabledist(q->latency, q->jitter,
524 &q->delay_cor, q->delay_dist);
525
Dave Taht112f9cb2017-11-08 15:12:26 -0800526 now = ktime_get_ns();
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000527
528 if (q->rate) {
Nik Unger5080f392017-03-13 10:16:58 -0700529 struct netem_skb_cb *last = NULL;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000530
Nik Unger5080f392017-03-13 10:16:58 -0700531 if (sch->q.tail)
532 last = netem_skb_cb(sch->q.tail);
533 if (q->t_root.rb_node) {
534 struct sk_buff *t_skb;
535 struct netem_skb_cb *t_last;
536
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700537 t_skb = skb_rb_last(&q->t_root);
Nik Unger5080f392017-03-13 10:16:58 -0700538 t_last = netem_skb_cb(t_skb);
539 if (!last ||
540 t_last->time_to_send > last->time_to_send) {
541 last = t_last;
542 }
543 }
544
Eric Dumazetaec0a402013-06-28 07:40:57 -0700545 if (last) {
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000546 /*
Johannes Naaba13d3102013-01-23 11:36:51 +0000547 * Last packet in queue is reference point (now),
548 * calculate this time bonus and subtract
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000549 * from delay.
550 */
Nik Unger5080f392017-03-13 10:16:58 -0700551 delay -= last->time_to_send - now;
Dave Taht112f9cb2017-11-08 15:12:26 -0800552 delay = max_t(s64, 0, delay);
Nik Unger5080f392017-03-13 10:16:58 -0700553 now = last->time_to_send;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000554 }
Johannes Naaba13d3102013-01-23 11:36:51 +0000555
Stephen Hemmingerbce552f2017-11-14 11:27:01 -0800556 delay += packet_time_ns(qdisc_pkt_len(skb), q);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000557 }
558
Patrick McHardy7c59e252007-03-23 11:27:45 -0700559 cb->time_to_send = now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 ++q->counter;
Eric Dumazet960fb662012-07-03 20:55:21 +0000561 tfifo_enqueue(skb, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 } else {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900563 /*
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700564 * Do re-ordering by putting one out of N packets at the front
565 * of the queue.
566 */
Dave Taht112f9cb2017-11-08 15:12:26 -0800567 cb->time_to_send = ktime_get_ns();
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700568 q->counter = 0;
Jarek Poplawski8ba25da2008-11-02 00:36:03 -0700569
Florian Westphal48da34b2016-09-18 00:57:34 +0200570 netem_enqueue_skb_head(&sch->q, skb);
Hagen Paul Pfeifereb101922012-01-04 17:35:26 +0000571 sch->qstats.requeues++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700572 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Neil Horman6071bd12016-05-02 12:20:15 -0400574finish_segs:
575 if (segs) {
576 while (segs) {
577 skb2 = segs->next;
578 segs->next = NULL;
579 qdisc_skb_cb(segs)->pkt_len = segs->len;
580 last_len = segs->len;
Eric Dumazet520ac302016-06-21 23:16:49 -0700581 rc = qdisc_enqueue(segs, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400582 if (rc != NET_XMIT_SUCCESS) {
583 if (net_xmit_drop_count(rc))
584 qdisc_qstats_drop(sch);
585 } else {
586 nb++;
587 len += last_len;
588 }
589 segs = skb2;
590 }
591 sch->q.qlen += nb;
592 if (nb > 1)
593 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
594 }
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000595 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596}
597
Dave Taht836af832017-11-08 15:12:28 -0800598/* Delay the next round with a new future slot with a
599 * correct number of bytes and packets.
600 */
601
602static void get_slot_next(struct netem_sched_data *q, u64 now)
603{
604 q->slot.slot_next = now + q->slot_config.min_delay +
605 (prandom_u32() *
606 (q->slot_config.max_delay -
607 q->slot_config.min_delay) >> 32);
608 q->slot.packets_left = q->slot_config.max_packets;
609 q->slot.bytes_left = q->slot_config.max_bytes;
610}
611
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612static struct sk_buff *netem_dequeue(struct Qdisc *sch)
613{
614 struct netem_sched_data *q = qdisc_priv(sch);
615 struct sk_buff *skb;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700616 struct rb_node *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
Eric Dumazet50612532011-12-28 23:12:02 +0000618tfifo_dequeue:
Florian Westphaled760cb2016-09-18 00:57:33 +0200619 skb = __qdisc_dequeue_head(&sch->q);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700620 if (skb) {
John Fastabend25331d62014-09-28 11:53:29 -0700621 qdisc_qstats_backlog_dec(sch, skb);
Beshay, Joseph0ad2a832015-04-06 18:00:56 +0000622deliver:
Eric Dumazetaec0a402013-06-28 07:40:57 -0700623 qdisc_bstats_update(sch, skb);
624 return skb;
625 }
626 p = rb_first(&q->t_root);
627 if (p) {
Dave Taht112f9cb2017-11-08 15:12:26 -0800628 u64 time_to_send;
Dave Taht836af832017-11-08 15:12:28 -0800629 u64 now = ktime_get_ns();
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700630
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700631 skb = rb_to_skb(p);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700632
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700633 /* if more time remaining? */
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700634 time_to_send = netem_skb_cb(skb)->time_to_send;
Dave Taht836af832017-11-08 15:12:28 -0800635 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
636 get_slot_next(q, now);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700637
Dave Taht836af832017-11-08 15:12:28 -0800638 if (time_to_send <= now && q->slot.slot_next <= now) {
639 rb_erase(p, &q->t_root);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700640 sch->q.qlen--;
Beshay, Joseph0ad2a832015-04-06 18:00:56 +0000641 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700642 skb->next = NULL;
643 skb->prev = NULL;
Eric Dumazetbffa72c2017-09-19 05:14:24 -0700644 /* skb->dev shares skb->rbnode area,
645 * we need to restore its value.
646 */
647 skb->dev = qdisc_dev(sch);
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700648
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000649#ifdef CONFIG_NET_CLS_ACT
650 /*
651 * If it's at ingress let's pretend the delay is
652 * from the network (tstamp will be updated).
653 */
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500654 if (skb->tc_redirected && skb->tc_from_ingress)
Thomas Gleixner2456e852016-12-25 11:38:40 +0100655 skb->tstamp = 0;
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000656#endif
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000657
Dave Taht836af832017-11-08 15:12:28 -0800658 if (q->slot.slot_next) {
659 q->slot.packets_left--;
660 q->slot.bytes_left -= qdisc_pkt_len(skb);
661 if (q->slot.packets_left <= 0 ||
662 q->slot.bytes_left <= 0)
663 get_slot_next(q, now);
664 }
665
Eric Dumazet50612532011-12-28 23:12:02 +0000666 if (q->qdisc) {
Eric Dumazet21de12e2016-06-20 15:00:43 -0700667 unsigned int pkt_len = qdisc_pkt_len(skb);
Eric Dumazet520ac302016-06-21 23:16:49 -0700668 struct sk_buff *to_free = NULL;
669 int err;
Eric Dumazet50612532011-12-28 23:12:02 +0000670
Eric Dumazet520ac302016-06-21 23:16:49 -0700671 err = qdisc_enqueue(skb, q->qdisc, &to_free);
672 kfree_skb_list(to_free);
Eric Dumazet21de12e2016-06-20 15:00:43 -0700673 if (err != NET_XMIT_SUCCESS &&
674 net_xmit_drop_count(err)) {
675 qdisc_qstats_drop(sch);
676 qdisc_tree_reduce_backlog(sch, 1,
677 pkt_len);
Eric Dumazet50612532011-12-28 23:12:02 +0000678 }
679 goto tfifo_dequeue;
680 }
Eric Dumazetaec0a402013-06-28 07:40:57 -0700681 goto deliver;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700682 }
Stephen Hemminger11274e52007-03-22 12:17:42 -0700683
Eric Dumazet50612532011-12-28 23:12:02 +0000684 if (q->qdisc) {
685 skb = q->qdisc->ops->dequeue(q->qdisc);
686 if (skb)
687 goto deliver;
688 }
Dave Taht836af832017-11-08 15:12:28 -0800689
690 qdisc_watchdog_schedule_ns(&q->watchdog,
691 max(time_to_send,
692 q->slot.slot_next));
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700693 }
694
Eric Dumazet50612532011-12-28 23:12:02 +0000695 if (q->qdisc) {
696 skb = q->qdisc->ops->dequeue(q->qdisc);
697 if (skb)
698 goto deliver;
699 }
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700700 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701}
702
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703static void netem_reset(struct Qdisc *sch)
704{
705 struct netem_sched_data *q = qdisc_priv(sch);
706
Eric Dumazet50612532011-12-28 23:12:02 +0000707 qdisc_reset_queue(sch);
stephen hemmingerff704052013-10-06 15:16:49 -0700708 tfifo_reset(sch);
Eric Dumazet50612532011-12-28 23:12:02 +0000709 if (q->qdisc)
710 qdisc_reset(q->qdisc);
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700711 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712}
713
stephen hemminger6373a9a2011-02-23 13:04:18 +0000714static void dist_free(struct disttable *d)
715{
WANG Cong4cb28972014-06-02 15:55:22 -0700716 kvfree(d);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000717}
718
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719/*
720 * Distribution data is a variable size payload containing
721 * signed 16 bit values.
722 */
Dave Taht836af832017-11-08 15:12:28 -0800723
Patrick McHardy1e904742008-01-22 22:11:17 -0800724static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725{
726 struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000727 size_t n = nla_len(attr)/sizeof(__s16);
Patrick McHardy1e904742008-01-22 22:11:17 -0800728 const __s16 *data = nla_data(attr);
David S. Miller7698b4f2008-07-16 01:42:40 -0700729 spinlock_t *root_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 struct disttable *d;
731 int i;
732
stephen hemmingerdf173bd2011-02-23 13:04:19 +0000733 if (n > NETEM_DIST_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 return -EINVAL;
735
Michal Hocko752ade62017-05-08 15:57:27 -0700736 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 if (!d)
738 return -ENOMEM;
739
740 d->size = n;
741 for (i = 0; i < n; i++)
742 d->table[i] = data[i];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900743
Jarek Poplawski102396a2008-08-29 14:21:52 -0700744 root_lock = qdisc_root_sleeping_lock(sch);
David S. Miller7698b4f2008-07-16 01:42:40 -0700745
746 spin_lock_bh(root_lock);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000747 swap(q->delay_dist, d);
David S. Miller7698b4f2008-07-16 01:42:40 -0700748 spin_unlock_bh(root_lock);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000749
750 dist_free(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 return 0;
752}
753
Dave Taht836af832017-11-08 15:12:28 -0800754static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
755{
756 const struct tc_netem_slot *c = nla_data(attr);
757
758 q->slot_config = *c;
759 if (q->slot_config.max_packets == 0)
760 q->slot_config.max_packets = INT_MAX;
761 if (q->slot_config.max_bytes == 0)
762 q->slot_config.max_bytes = INT_MAX;
763 q->slot.packets_left = q->slot_config.max_packets;
764 q->slot.bytes_left = q->slot_config.max_bytes;
765 if (q->slot_config.min_delay | q->slot_config.max_delay)
766 q->slot.slot_next = ktime_get_ns();
767 else
768 q->slot.slot_next = 0;
769}
770
Yang Yingliang49545a772014-02-14 10:30:42 +0800771static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772{
Patrick McHardy1e904742008-01-22 22:11:17 -0800773 const struct tc_netem_corr *c = nla_data(attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 init_crandom(&q->delay_cor, c->delay_corr);
776 init_crandom(&q->loss_cor, c->loss_corr);
777 init_crandom(&q->dup_cor, c->dup_corr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778}
779
Yang Yingliang49545a772014-02-14 10:30:42 +0800780static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700781{
Patrick McHardy1e904742008-01-22 22:11:17 -0800782 const struct tc_netem_reorder *r = nla_data(attr);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700783
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700784 q->reorder = r->probability;
785 init_crandom(&q->reorder_cor, r->correlation);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700786}
787
Yang Yingliang49545a772014-02-14 10:30:42 +0800788static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800789{
Patrick McHardy1e904742008-01-22 22:11:17 -0800790 const struct tc_netem_corrupt *r = nla_data(attr);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800791
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800792 q->corrupt = r->probability;
793 init_crandom(&q->corrupt_cor, r->correlation);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800794}
795
Yang Yingliang49545a772014-02-14 10:30:42 +0800796static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000797{
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000798 const struct tc_netem_rate *r = nla_data(attr);
799
800 q->rate = r->rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000801 q->packet_overhead = r->packet_overhead;
802 q->cell_size = r->cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100803 q->cell_overhead = r->cell_overhead;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000804 if (q->cell_size)
805 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100806 else
807 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000808}
809
Yang Yingliang49545a772014-02-14 10:30:42 +0800810static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
stephen hemminger661b7972011-02-23 13:04:21 +0000811{
stephen hemminger661b7972011-02-23 13:04:21 +0000812 const struct nlattr *la;
813 int rem;
814
815 nla_for_each_nested(la, attr, rem) {
816 u16 type = nla_type(la);
817
Yang Yingliang833fa742013-12-10 20:55:32 +0800818 switch (type) {
stephen hemminger661b7972011-02-23 13:04:21 +0000819 case NETEM_LOSS_GI: {
820 const struct tc_netem_gimodel *gi = nla_data(la);
821
stephen hemminger24946542011-12-23 09:16:30 +0000822 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
stephen hemminger661b7972011-02-23 13:04:21 +0000823 pr_info("netem: incorrect gi model size\n");
824 return -EINVAL;
825 }
826
827 q->loss_model = CLG_4_STATES;
828
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800829 q->clg.state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000830 q->clg.a1 = gi->p13;
831 q->clg.a2 = gi->p31;
832 q->clg.a3 = gi->p32;
833 q->clg.a4 = gi->p14;
834 q->clg.a5 = gi->p23;
835 break;
836 }
837
838 case NETEM_LOSS_GE: {
839 const struct tc_netem_gemodel *ge = nla_data(la);
840
stephen hemminger24946542011-12-23 09:16:30 +0000841 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
842 pr_info("netem: incorrect ge model size\n");
stephen hemminger661b7972011-02-23 13:04:21 +0000843 return -EINVAL;
844 }
845
846 q->loss_model = CLG_GILB_ELL;
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800847 q->clg.state = GOOD_STATE;
stephen hemminger661b7972011-02-23 13:04:21 +0000848 q->clg.a1 = ge->p;
849 q->clg.a2 = ge->r;
850 q->clg.a3 = ge->h;
851 q->clg.a4 = ge->k1;
852 break;
853 }
854
855 default:
856 pr_info("netem: unknown loss type %u\n", type);
857 return -EINVAL;
858 }
859 }
860
861 return 0;
862}
863
Patrick McHardy27a34212008-01-23 20:35:39 -0800864static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
865 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
866 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
867 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000868 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
stephen hemminger661b7972011-02-23 13:04:21 +0000869 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
Eric Dumazete4ae0042012-04-30 23:11:05 +0000870 [TCA_NETEM_ECN] = { .type = NLA_U32 },
Yang Yingliang6a031f62013-12-25 17:35:15 +0800871 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
Dave Taht99803172017-11-08 15:12:27 -0800872 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
873 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
Dave Taht836af832017-11-08 15:12:28 -0800874 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
Patrick McHardy27a34212008-01-23 20:35:39 -0800875};
876
Thomas Graf2c10b322008-09-02 17:30:27 -0700877static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
878 const struct nla_policy *policy, int len)
879{
880 int nested_len = nla_len(nla) - NLA_ALIGN(len);
881
stephen hemminger661b7972011-02-23 13:04:21 +0000882 if (nested_len < 0) {
883 pr_info("netem: invalid attributes len %d\n", nested_len);
Thomas Graf2c10b322008-09-02 17:30:27 -0700884 return -EINVAL;
stephen hemminger661b7972011-02-23 13:04:21 +0000885 }
886
Thomas Graf2c10b322008-09-02 17:30:27 -0700887 if (nested_len >= nla_attr_size(0))
888 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
Johannes Bergfceb6432017-04-12 14:34:07 +0200889 nested_len, policy, NULL);
stephen hemminger661b7972011-02-23 13:04:21 +0000890
Thomas Graf2c10b322008-09-02 17:30:27 -0700891 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
892 return 0;
893}
894
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800895/* Parse netlink message to set options */
Alexander Aring20307212017-12-20 12:35:14 -0500896static int netem_change(struct Qdisc *sch, struct nlattr *opt,
897 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898{
899 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardyb03f4672008-01-23 20:32:21 -0800900 struct nlattr *tb[TCA_NETEM_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 struct tc_netem_qopt *qopt;
Yang Yingliang54a4b052014-02-14 10:30:41 +0800902 struct clgstate old_clg;
903 int old_loss_model = CLG_RANDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 int ret;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900905
Patrick McHardyb03f4672008-01-23 20:32:21 -0800906 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 return -EINVAL;
908
Thomas Graf2c10b322008-09-02 17:30:27 -0700909 qopt = nla_data(opt);
910 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
Patrick McHardyb03f4672008-01-23 20:32:21 -0800911 if (ret < 0)
912 return ret;
913
Yang Yingliang54a4b052014-02-14 10:30:41 +0800914 /* backup q->clg and q->loss_model */
915 old_clg = q->clg;
916 old_loss_model = q->loss_model;
917
918 if (tb[TCA_NETEM_LOSS]) {
Yang Yingliang49545a772014-02-14 10:30:42 +0800919 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
Yang Yingliang54a4b052014-02-14 10:30:41 +0800920 if (ret) {
921 q->loss_model = old_loss_model;
922 return ret;
923 }
924 } else {
925 q->loss_model = CLG_RANDOM;
926 }
927
928 if (tb[TCA_NETEM_DELAY_DIST]) {
929 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
930 if (ret) {
931 /* recover clg and loss_model, in case of
932 * q->clg and q->loss_model were modified
933 * in get_loss_clg()
934 */
935 q->clg = old_clg;
936 q->loss_model = old_loss_model;
937 return ret;
938 }
939 }
940
Eric Dumazet50612532011-12-28 23:12:02 +0000941 sch->limit = qopt->limit;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900942
Dave Taht112f9cb2017-11-08 15:12:26 -0800943 q->latency = PSCHED_TICKS2NS(qopt->latency);
944 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 q->limit = qopt->limit;
946 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700947 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 q->loss = qopt->loss;
949 q->duplicate = qopt->duplicate;
950
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700951 /* for compatibility with earlier versions.
952 * if gap is set, need to assume 100% probability
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700953 */
Stephen Hemmingera362e0a2007-03-22 12:15:45 -0700954 if (q->gap)
955 q->reorder = ~0;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700956
Stephen Hemminger265eb672008-11-03 21:13:26 -0800957 if (tb[TCA_NETEM_CORR])
Yang Yingliang49545a772014-02-14 10:30:42 +0800958 get_correlation(q, tb[TCA_NETEM_CORR]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959
Stephen Hemminger265eb672008-11-03 21:13:26 -0800960 if (tb[TCA_NETEM_REORDER])
Yang Yingliang49545a772014-02-14 10:30:42 +0800961 get_reorder(q, tb[TCA_NETEM_REORDER]);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800962
Stephen Hemminger265eb672008-11-03 21:13:26 -0800963 if (tb[TCA_NETEM_CORRUPT])
Yang Yingliang49545a772014-02-14 10:30:42 +0800964 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000966 if (tb[TCA_NETEM_RATE])
Yang Yingliang49545a772014-02-14 10:30:42 +0800967 get_rate(q, tb[TCA_NETEM_RATE]);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000968
Yang Yingliang6a031f62013-12-25 17:35:15 +0800969 if (tb[TCA_NETEM_RATE64])
970 q->rate = max_t(u64, q->rate,
971 nla_get_u64(tb[TCA_NETEM_RATE64]));
972
Dave Taht99803172017-11-08 15:12:27 -0800973 if (tb[TCA_NETEM_LATENCY64])
974 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
975
976 if (tb[TCA_NETEM_JITTER64])
977 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
978
Eric Dumazete4ae0042012-04-30 23:11:05 +0000979 if (tb[TCA_NETEM_ECN])
980 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
981
Dave Taht836af832017-11-08 15:12:28 -0800982 if (tb[TCA_NETEM_SLOT])
983 get_slot(q, tb[TCA_NETEM_SLOT]);
984
stephen hemminger661b7972011-02-23 13:04:21 +0000985 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986}
987
Alexander Aringe63d7df2017-12-20 12:35:13 -0500988static int netem_init(struct Qdisc *sch, struct nlattr *opt,
989 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990{
991 struct netem_sched_data *q = qdisc_priv(sch);
992 int ret;
993
Nikolay Aleksandrov634576a2017-08-30 12:49:03 +0300994 qdisc_watchdog_init(&q->watchdog, sch);
995
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 if (!opt)
997 return -EINVAL;
998
stephen hemminger661b7972011-02-23 13:04:21 +0000999 q->loss_model = CLG_RANDOM;
Alexander Aring20307212017-12-20 12:35:14 -05001000 ret = netem_change(sch, opt, extack);
Eric Dumazet50612532011-12-28 23:12:02 +00001001 if (ret)
stephen hemminger250a65f2011-02-23 13:04:22 +00001002 pr_info("netem: change failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 return ret;
1004}
1005
1006static void netem_destroy(struct Qdisc *sch)
1007{
1008 struct netem_sched_data *q = qdisc_priv(sch);
1009
Patrick McHardy59cb5c62007-03-16 01:20:31 -07001010 qdisc_watchdog_cancel(&q->watchdog);
Eric Dumazet50612532011-12-28 23:12:02 +00001011 if (q->qdisc)
1012 qdisc_destroy(q->qdisc);
stephen hemminger6373a9a2011-02-23 13:04:18 +00001013 dist_free(q->delay_dist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014}
1015
stephen hemminger661b7972011-02-23 13:04:21 +00001016static int dump_loss_model(const struct netem_sched_data *q,
1017 struct sk_buff *skb)
1018{
1019 struct nlattr *nest;
1020
1021 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
1022 if (nest == NULL)
1023 goto nla_put_failure;
1024
1025 switch (q->loss_model) {
1026 case CLG_RANDOM:
1027 /* legacy loss model */
1028 nla_nest_cancel(skb, nest);
1029 return 0; /* no data */
1030
1031 case CLG_4_STATES: {
1032 struct tc_netem_gimodel gi = {
1033 .p13 = q->clg.a1,
1034 .p31 = q->clg.a2,
1035 .p32 = q->clg.a3,
1036 .p14 = q->clg.a4,
1037 .p23 = q->clg.a5,
1038 };
1039
David S. Miller1b34ec42012-03-29 05:11:39 -04001040 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1041 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +00001042 break;
1043 }
1044 case CLG_GILB_ELL: {
1045 struct tc_netem_gemodel ge = {
1046 .p = q->clg.a1,
1047 .r = q->clg.a2,
1048 .h = q->clg.a3,
1049 .k1 = q->clg.a4,
1050 };
1051
David S. Miller1b34ec42012-03-29 05:11:39 -04001052 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1053 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +00001054 break;
1055 }
1056 }
1057
1058 nla_nest_end(skb, nest);
1059 return 0;
1060
1061nla_put_failure:
1062 nla_nest_cancel(skb, nest);
1063 return -1;
1064}
1065
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1067{
1068 const struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger861d7f72011-02-23 13:04:17 +00001069 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 struct tc_netem_qopt qopt;
1071 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001072 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001073 struct tc_netem_corrupt corrupt;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001074 struct tc_netem_rate rate;
Dave Taht836af832017-11-08 15:12:28 -08001075 struct tc_netem_slot slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
Dave Taht112f9cb2017-11-08 15:12:26 -08001077 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1078 UINT_MAX);
1079 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1080 UINT_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 qopt.limit = q->limit;
1082 qopt.loss = q->loss;
1083 qopt.gap = q->gap;
1084 qopt.duplicate = q->duplicate;
David S. Miller1b34ec42012-03-29 05:11:39 -04001085 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1086 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
Dave Taht99803172017-11-08 15:12:27 -08001088 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1089 goto nla_put_failure;
1090
1091 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1092 goto nla_put_failure;
1093
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 cor.delay_corr = q->delay_cor.rho;
1095 cor.loss_corr = q->loss_cor.rho;
1096 cor.dup_corr = q->dup_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001097 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1098 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001099
1100 reorder.probability = q->reorder;
1101 reorder.correlation = q->reorder_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001102 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1103 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001104
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001105 corrupt.probability = q->corrupt;
1106 corrupt.correlation = q->corrupt_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001107 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1108 goto nla_put_failure;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001109
Yang Yingliang6a031f62013-12-25 17:35:15 +08001110 if (q->rate >= (1ULL << 32)) {
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001111 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1112 TCA_NETEM_PAD))
Yang Yingliang6a031f62013-12-25 17:35:15 +08001113 goto nla_put_failure;
1114 rate.rate = ~0U;
1115 } else {
1116 rate.rate = q->rate;
1117 }
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +00001118 rate.packet_overhead = q->packet_overhead;
1119 rate.cell_size = q->cell_size;
1120 rate.cell_overhead = q->cell_overhead;
David S. Miller1b34ec42012-03-29 05:11:39 -04001121 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1122 goto nla_put_failure;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001123
Eric Dumazete4ae0042012-04-30 23:11:05 +00001124 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1125 goto nla_put_failure;
1126
stephen hemminger661b7972011-02-23 13:04:21 +00001127 if (dump_loss_model(q, skb) != 0)
1128 goto nla_put_failure;
1129
Dave Taht836af832017-11-08 15:12:28 -08001130 if (q->slot_config.min_delay | q->slot_config.max_delay) {
1131 slot = q->slot_config;
1132 if (slot.max_packets == INT_MAX)
1133 slot.max_packets = 0;
1134 if (slot.max_bytes == INT_MAX)
1135 slot.max_bytes = 0;
1136 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1137 goto nla_put_failure;
1138 }
1139
stephen hemminger861d7f72011-02-23 13:04:17 +00001140 return nla_nest_end(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
Patrick McHardy1e904742008-01-22 22:11:17 -08001142nla_put_failure:
stephen hemminger861d7f72011-02-23 13:04:17 +00001143 nlmsg_trim(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 return -1;
1145}
1146
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001147static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1148 struct sk_buff *skb, struct tcmsg *tcm)
1149{
1150 struct netem_sched_data *q = qdisc_priv(sch);
1151
Eric Dumazet50612532011-12-28 23:12:02 +00001152 if (cl != 1 || !q->qdisc) /* only one class */
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001153 return -ENOENT;
1154
1155 tcm->tcm_handle |= TC_H_MIN(1);
1156 tcm->tcm_info = q->qdisc->handle;
1157
1158 return 0;
1159}
1160
1161static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -05001162 struct Qdisc **old, struct netlink_ext_ack *extack)
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001163{
1164 struct netem_sched_data *q = qdisc_priv(sch);
1165
WANG Cong86a79962016-02-25 14:55:00 -08001166 *old = qdisc_replace(sch, new, &q->qdisc);
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001167 return 0;
1168}
1169
1170static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1171{
1172 struct netem_sched_data *q = qdisc_priv(sch);
1173 return q->qdisc;
1174}
1175
WANG Cong143976c2017-08-24 16:51:29 -07001176static unsigned long netem_find(struct Qdisc *sch, u32 classid)
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001177{
1178 return 1;
1179}
1180
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001181static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1182{
1183 if (!walker->stop) {
1184 if (walker->count >= walker->skip)
1185 if (walker->fn(sch, 1, walker) < 0) {
1186 walker->stop = 1;
1187 return;
1188 }
1189 walker->count++;
1190 }
1191}
1192
1193static const struct Qdisc_class_ops netem_class_ops = {
1194 .graft = netem_graft,
1195 .leaf = netem_leaf,
WANG Cong143976c2017-08-24 16:51:29 -07001196 .find = netem_find,
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001197 .walk = netem_walk,
1198 .dump = netem_dump_class,
1199};
1200
Eric Dumazet20fea082007-11-14 01:44:41 -08001201static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 .id = "netem",
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001203 .cl_ops = &netem_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 .priv_size = sizeof(struct netem_sched_data),
1205 .enqueue = netem_enqueue,
1206 .dequeue = netem_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07001207 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 .init = netem_init,
1209 .reset = netem_reset,
1210 .destroy = netem_destroy,
1211 .change = netem_change,
1212 .dump = netem_dump,
1213 .owner = THIS_MODULE,
1214};
1215
1216
1217static int __init netem_module_init(void)
1218{
Stephen Hemmingereb229c42005-11-03 13:49:01 -08001219 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 return register_qdisc(&netem_qdisc_ops);
1221}
1222static void __exit netem_module_exit(void)
1223{
1224 unregister_qdisc(&netem_qdisc_ops);
1225}
1226module_init(netem_module_init)
1227module_exit(netem_module_exit)
1228MODULE_LICENSE("GPL");