blob: 506e1960ed7f8ac52a6fbc9b68de66e246c3d249 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
Stephen Hemminger798b6b12006-10-22 20:16:57 -07007 * 2 of the License.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Many of the algorithms and ideas for this came from
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * NIST Net which is not copyrighted.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000016#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/skbuff.h>
David S. Miller78776d32011-02-24 22:48:13 -080023#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/rtnetlink.h>
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000025#include <linux/reciprocal_div.h>
Eric Dumazetaec0a402013-06-28 07:40:57 -070026#include <linux/rbtree.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070028#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <net/pkt_sched.h>
Eric Dumazete4ae0042012-04-30 23:11:05 +000030#include <net/inet_ecn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
stephen hemminger250a65f2011-02-23 13:04:22 +000032#define VERSION "1.3"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
stephen hemminger661b7972011-02-23 13:04:21 +000055
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069*/
70
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -070071struct disttable {
72 u32 size;
73 s16 table[0];
74};
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076struct netem_sched_data {
Eric Dumazetaec0a402013-06-28 07:40:57 -070077 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
78 struct rb_root t_root;
Eric Dumazet50612532011-12-28 23:12:02 +000079
80 /* optional qdisc for classful handling (NULL at netem init) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 struct Qdisc *qdisc;
Eric Dumazet50612532011-12-28 23:12:02 +000082
Patrick McHardy59cb5c62007-03-16 01:20:31 -070083 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Dave Taht112f9cb2017-11-08 15:12:26 -080085 s64 latency;
86 s64 jitter;
Stephen Hemmingerb4076212007-03-22 12:16:21 -070087
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 u32 loss;
Eric Dumazete4ae0042012-04-30 23:11:05 +000089 u32 ecn;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 u32 limit;
91 u32 counter;
92 u32 gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070094 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080095 u32 corrupt;
Yang Yingliang6a031f62013-12-25 17:35:15 +080096 u64 rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000097 s32 packet_overhead;
98 u32 cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +010099 struct reciprocal_value cell_size_reciprocal;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000100 s32 cell_overhead;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 struct crndstate {
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700103 u32 last;
104 u32 rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800105 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700107 struct disttable *delay_dist;
stephen hemminger661b7972011-02-23 13:04:21 +0000108
109 enum {
110 CLG_RANDOM,
111 CLG_4_STATES,
112 CLG_GILB_ELL,
113 } loss_model;
114
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800115 enum {
116 TX_IN_GAP_PERIOD = 1,
117 TX_IN_BURST_PERIOD,
118 LOST_IN_GAP_PERIOD,
119 LOST_IN_BURST_PERIOD,
120 } _4_state_model;
121
Yang Yingliangc045a732014-02-14 10:30:43 +0800122 enum {
123 GOOD_STATE = 1,
124 BAD_STATE,
125 } GE_state_model;
126
stephen hemminger661b7972011-02-23 13:04:21 +0000127 /* Correlated Loss Generation models */
128 struct clgstate {
129 /* state of the Markov chain */
130 u8 state;
131
132 /* 4-states and Gilbert-Elliot models */
133 u32 a1; /* p13 for 4-states or p for GE */
134 u32 a2; /* p31 for 4-states or r for GE */
135 u32 a3; /* p32 for 4-states or h for GE */
136 u32 a4; /* p14 for 4-states or 1-k for GE */
137 u32 a5; /* p23 used only in 4-states */
138 } clg;
139
Dave Taht836af832017-11-08 15:12:28 -0800140 struct tc_netem_slot slot_config;
141 struct slotstate {
142 u64 slot_next;
143 s32 packets_left;
144 s32 bytes_left;
145 } slot;
146
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700147 struct disttable *slot_dist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148};
149
Eric Dumazet50612532011-12-28 23:12:02 +0000150/* Time stamp put into socket buffer control block
151 * Only valid when skbs are in our internal t(ime)fifo queue.
Eric Dumazet56b17422014-11-03 08:19:53 -0800152 *
153 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
154 * and skb->next & skb->prev are scratch space for a qdisc,
155 * we save skb->tstamp value in skb->cb[] before destroying it.
Eric Dumazet50612532011-12-28 23:12:02 +0000156 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157struct netem_skb_cb {
Dave Taht112f9cb2017-11-08 15:12:26 -0800158 u64 time_to_send;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159};
160
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700161static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
162{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700163 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
David S. Miller16bda132012-02-06 15:14:37 -0500164 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700165 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700166}
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168/* init_crandom - initialize correlated random number generator
169 * Use entropy source for initial seed.
170 */
171static void init_crandom(struct crndstate *state, unsigned long rho)
172{
173 state->rho = rho;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500174 state->last = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176
177/* get_crandom - correlated random number generator
178 * Next number depends on last value.
179 * rho is scaled to avoid floating point.
180 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700181static u32 get_crandom(struct crndstate *state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
183 u64 value, rho;
184 unsigned long answer;
185
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700186 if (!state || state->rho == 0) /* no correlation */
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500187 return prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500189 value = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 rho = (u64)state->rho + 1;
191 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
192 state->last = answer;
193 return answer;
194}
195
stephen hemminger661b7972011-02-23 13:04:21 +0000196/* loss_4state - 4-state model loss generator
197 * Generates losses according to the 4-state Markov chain adopted in
198 * the GI (General and Intuitive) loss model.
199 */
200static bool loss_4state(struct netem_sched_data *q)
201{
202 struct clgstate *clg = &q->clg;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500203 u32 rnd = prandom_u32();
stephen hemminger661b7972011-02-23 13:04:21 +0000204
205 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300206 * Makes a comparison between rnd and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000207 * probabilities outgoing from the current state, then decides the
208 * next state and if the next packet has to be transmitted or lost.
209 * The four states correspond to:
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
211 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
212 * LOST_IN_GAP_PERIOD => lost packets within a burst period
213 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
stephen hemminger661b7972011-02-23 13:04:21 +0000214 */
215 switch (clg->state) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800216 case TX_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000217 if (rnd < clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800218 clg->state = LOST_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000219 return true;
stephen hemmingerab6c27b2013-11-29 11:03:35 -0800220 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800221 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000222 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800223 } else if (clg->a1 + clg->a4 < rnd) {
224 clg->state = TX_IN_GAP_PERIOD;
225 }
stephen hemminger661b7972011-02-23 13:04:21 +0000226
227 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800228 case TX_IN_BURST_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000229 if (rnd < clg->a5) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800230 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000231 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800232 } else {
233 clg->state = TX_IN_BURST_PERIOD;
234 }
stephen hemminger661b7972011-02-23 13:04:21 +0000235
236 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800237 case LOST_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000238 if (rnd < clg->a3)
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800239 clg->state = TX_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000240 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800241 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000242 } else if (clg->a2 + clg->a3 < rnd) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800243 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000244 return true;
245 }
246 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800247 case LOST_IN_BURST_PERIOD:
248 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000249 break;
250 }
251
252 return false;
253}
254
255/* loss_gilb_ell - Gilbert-Elliot model loss generator
256 * Generates losses according to the Gilbert-Elliot loss model or
257 * its special cases (Gilbert or Simple Gilbert)
258 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300259 * Makes a comparison between random number and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000260 * probabilities outgoing from the current state, then decides the
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300261 * next state. A second random number is extracted and the comparison
stephen hemminger661b7972011-02-23 13:04:21 +0000262 * with the loss probability of the current state decides if the next
263 * packet will be transmitted or lost.
264 */
265static bool loss_gilb_ell(struct netem_sched_data *q)
266{
267 struct clgstate *clg = &q->clg;
268
269 switch (clg->state) {
Yang Yingliangc045a732014-02-14 10:30:43 +0800270 case GOOD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500271 if (prandom_u32() < clg->a1)
Yang Yingliangc045a732014-02-14 10:30:43 +0800272 clg->state = BAD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500273 if (prandom_u32() < clg->a4)
stephen hemminger661b7972011-02-23 13:04:21 +0000274 return true;
stephen hemminger7c2781f2013-11-29 11:02:43 -0800275 break;
Yang Yingliangc045a732014-02-14 10:30:43 +0800276 case BAD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500277 if (prandom_u32() < clg->a2)
Yang Yingliangc045a732014-02-14 10:30:43 +0800278 clg->state = GOOD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500279 if (prandom_u32() > clg->a3)
stephen hemminger661b7972011-02-23 13:04:21 +0000280 return true;
281 }
282
283 return false;
284}
285
286static bool loss_event(struct netem_sched_data *q)
287{
288 switch (q->loss_model) {
289 case CLG_RANDOM:
290 /* Random packet drop 0 => none, ~0 => all */
291 return q->loss && q->loss >= get_crandom(&q->loss_cor);
292
293 case CLG_4_STATES:
294 /* 4state loss model algorithm (used also for GI model)
295 * Extracts a value from the markov 4 state loss generator,
296 * if it is 1 drops a packet and if needed writes the event in
297 * the kernel logs
298 */
299 return loss_4state(q);
300
301 case CLG_GILB_ELL:
302 /* Gilbert-Elliot loss model algorithm
303 * Extracts a value from the Gilbert-Elliot loss generator,
304 * if it is 1 drops a packet and if needed writes the event in
305 * the kernel logs
306 */
307 return loss_gilb_ell(q);
308 }
309
310 return false; /* not reached */
311}
312
313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314/* tabledist - return a pseudo-randomly distributed value with mean mu and
315 * std deviation sigma. Uses table lookup to approximate the desired
316 * distribution, and a uniformly-distributed pseudo-random source.
317 */
Stephen Hemminger9b0ed892017-11-14 11:27:02 -0800318static s64 tabledist(s64 mu, s32 sigma,
Dave Taht112f9cb2017-11-08 15:12:26 -0800319 struct crndstate *state,
Stephen Hemminger9b0ed892017-11-14 11:27:02 -0800320 const struct disttable *dist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
Dave Taht112f9cb2017-11-08 15:12:26 -0800322 s64 x;
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700323 long t;
324 u32 rnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
326 if (sigma == 0)
327 return mu;
328
329 rnd = get_crandom(state);
330
331 /* default uniform distribution */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900332 if (dist == NULL)
Md. Islam043e3372018-02-06 23:14:18 -0500333 return ((rnd % (2 * sigma)) + mu) - sigma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335 t = dist->table[rnd % dist->size];
336 x = (sigma % NETEM_DIST_SCALE) * t;
337 if (x >= 0)
338 x += NETEM_DIST_SCALE/2;
339 else
340 x -= NETEM_DIST_SCALE/2;
341
342 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
343}
344
Stephen Hemmingerbce552f2017-11-14 11:27:01 -0800345static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000346{
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000347 len += q->packet_overhead;
348
349 if (q->cell_size) {
350 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
351
352 if (len > cells * q->cell_size) /* extra cell needed for remainder */
353 cells++;
354 len = cells * (q->cell_size + q->cell_overhead);
355 }
Stephen Hemmingerbce552f2017-11-14 11:27:01 -0800356
357 return div64_u64(len * NSEC_PER_SEC, q->rate);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000358}
359
stephen hemmingerff704052013-10-06 15:16:49 -0700360static void tfifo_reset(struct Qdisc *sch)
361{
362 struct netem_sched_data *q = qdisc_priv(sch);
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700363 struct rb_node *p = rb_first(&q->t_root);
stephen hemmingerff704052013-10-06 15:16:49 -0700364
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700365 while (p) {
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700366 struct sk_buff *skb = rb_to_skb(p);
stephen hemmingerff704052013-10-06 15:16:49 -0700367
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700368 p = rb_next(p);
369 rb_erase(&skb->rbnode, &q->t_root);
Eric Dumazet2f08a9a2016-06-13 20:21:57 -0700370 rtnl_kfree_skbs(skb, skb);
stephen hemmingerff704052013-10-06 15:16:49 -0700371 }
372}
373
Eric Dumazet960fb662012-07-03 20:55:21 +0000374static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
Eric Dumazet50612532011-12-28 23:12:02 +0000375{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700376 struct netem_sched_data *q = qdisc_priv(sch);
Dave Taht112f9cb2017-11-08 15:12:26 -0800377 u64 tnext = netem_skb_cb(nskb)->time_to_send;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700378 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
Eric Dumazet50612532011-12-28 23:12:02 +0000379
Eric Dumazetaec0a402013-06-28 07:40:57 -0700380 while (*p) {
381 struct sk_buff *skb;
Eric Dumazet50612532011-12-28 23:12:02 +0000382
Eric Dumazetaec0a402013-06-28 07:40:57 -0700383 parent = *p;
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700384 skb = rb_to_skb(parent);
Eric Dumazet960fb662012-07-03 20:55:21 +0000385 if (tnext >= netem_skb_cb(skb)->time_to_send)
Eric Dumazetaec0a402013-06-28 07:40:57 -0700386 p = &parent->rb_right;
387 else
388 p = &parent->rb_left;
Eric Dumazet50612532011-12-28 23:12:02 +0000389 }
Eric Dumazet56b17422014-11-03 08:19:53 -0800390 rb_link_node(&nskb->rbnode, parent, p);
391 rb_insert_color(&nskb->rbnode, &q->t_root);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700392 sch->q.qlen++;
Eric Dumazet50612532011-12-28 23:12:02 +0000393}
394
Neil Horman6071bd12016-05-02 12:20:15 -0400395/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
396 * when we statistically choose to corrupt one, we instead segment it, returning
397 * the first packet to be corrupted, and re-enqueue the remaining frames
398 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700399static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
400 struct sk_buff **to_free)
Neil Horman6071bd12016-05-02 12:20:15 -0400401{
402 struct sk_buff *segs;
403 netdev_features_t features = netif_skb_features(skb);
404
405 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
406
407 if (IS_ERR_OR_NULL(segs)) {
Eric Dumazet520ac302016-06-21 23:16:49 -0700408 qdisc_drop(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400409 return NULL;
410 }
411 consume_skb(skb);
412 return segs;
413}
414
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700415/*
416 * Insert one skb into qdisc.
417 * Note: parent depends on return value to account for queue length.
418 * NET_XMIT_DROP: queue length didn't change.
419 * NET_XMIT_SUCCESS: one skb was queued.
420 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700421static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
422 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423{
424 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700425 /* We don't fill cb now as skb_unshare() may invalidate it */
426 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700427 struct sk_buff *skb2;
Neil Horman6071bd12016-05-02 12:20:15 -0400428 struct sk_buff *segs = NULL;
429 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
430 int nb = 0;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700431 int count = 1;
Neil Horman6071bd12016-05-02 12:20:15 -0400432 int rc = NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700434 /* Random duplication */
435 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
436 ++count;
437
stephen hemminger661b7972011-02-23 13:04:21 +0000438 /* Drop packet? */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000439 if (loss_event(q)) {
440 if (q->ecn && INET_ECN_set_ce(skb))
John Fastabend25331d62014-09-28 11:53:29 -0700441 qdisc_qstats_drop(sch); /* mark packet */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000442 else
443 --count;
444 }
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700445 if (count == 0) {
John Fastabend25331d62014-09-28 11:53:29 -0700446 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700447 __qdisc_drop(skb, to_free);
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700448 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 }
450
Eric Dumazet5a308f42012-07-14 03:16:27 +0000451 /* If a delay is expected, orphan the skb. (orphaning usually takes
452 * place at TX completion time, so _before_ the link transit delay)
Eric Dumazet5a308f42012-07-14 03:16:27 +0000453 */
Nik Unger5080f392017-03-13 10:16:58 -0700454 if (q->latency || q->jitter || q->rate)
Eric Dumazetf2f872f2013-07-30 17:55:08 -0700455 skb_orphan_partial(skb);
David S. Miller4e8a5202006-10-22 21:00:33 -0700456
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700457 /*
458 * If we need to duplicate packet, then re-insert at top of the
459 * qdisc tree, since parent queuer expects that only one
460 * skb will be queued.
461 */
462 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
David S. Miller7698b4f2008-07-16 01:42:40 -0700463 struct Qdisc *rootq = qdisc_root(sch);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700464 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
Eric Dumazetb396cca2015-05-11 09:06:56 -0700466 q->duplicate = 0;
Eric Dumazet520ac302016-06-21 23:16:49 -0700467 rootq->enqueue(skb2, rootq, to_free);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700468 q->duplicate = dupsave;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 }
470
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800471 /*
472 * Randomized packet corruption.
473 * Make copy if needed since we are modifying
474 * If packet is going to be hardware checksummed, then
475 * do it now in software before we mangle it.
476 */
477 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
Neil Horman6071bd12016-05-02 12:20:15 -0400478 if (skb_is_gso(skb)) {
Eric Dumazet520ac302016-06-21 23:16:49 -0700479 segs = netem_segment(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400480 if (!segs)
481 return NET_XMIT_DROP;
482 } else {
483 segs = skb;
484 }
485
486 skb = segs;
487 segs = segs->next;
488
Eric Dumazet8a6e9c62016-06-28 10:30:08 +0200489 skb = skb_unshare(skb, GFP_ATOMIC);
490 if (unlikely(!skb)) {
491 qdisc_qstats_drop(sch);
492 goto finish_segs;
493 }
494 if (skb->ip_summed == CHECKSUM_PARTIAL &&
495 skb_checksum_help(skb)) {
496 qdisc_drop(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400497 goto finish_segs;
498 }
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800499
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500500 skb->data[prandom_u32() % skb_headlen(skb)] ^=
501 1<<(prandom_u32() % 8);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800502 }
503
Florian Westphal97d06782016-09-18 00:57:31 +0200504 if (unlikely(sch->q.qlen >= sch->limit))
Alexey Kodanev35d889d2018-03-05 20:52:54 +0300505 return qdisc_drop_all(skb, sch, to_free);
Eric Dumazet960fb662012-07-03 20:55:21 +0000506
John Fastabend25331d62014-09-28 11:53:29 -0700507 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazet960fb662012-07-03 20:55:21 +0000508
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700509 cb = netem_skb_cb(skb);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000510 if (q->gap == 0 || /* not doing reordering */
Vijay Subramaniana42b4792012-01-19 10:20:59 +0000511 q->counter < q->gap - 1 || /* inside last reordering gap */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800512 q->reorder < get_crandom(&q->reorder_cor)) {
Dave Taht112f9cb2017-11-08 15:12:26 -0800513 u64 now;
514 s64 delay;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800515
516 delay = tabledist(q->latency, q->jitter,
517 &q->delay_cor, q->delay_dist);
518
Dave Taht112f9cb2017-11-08 15:12:26 -0800519 now = ktime_get_ns();
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000520
521 if (q->rate) {
Nik Unger5080f392017-03-13 10:16:58 -0700522 struct netem_skb_cb *last = NULL;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000523
Nik Unger5080f392017-03-13 10:16:58 -0700524 if (sch->q.tail)
525 last = netem_skb_cb(sch->q.tail);
526 if (q->t_root.rb_node) {
527 struct sk_buff *t_skb;
528 struct netem_skb_cb *t_last;
529
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700530 t_skb = skb_rb_last(&q->t_root);
Nik Unger5080f392017-03-13 10:16:58 -0700531 t_last = netem_skb_cb(t_skb);
532 if (!last ||
533 t_last->time_to_send > last->time_to_send) {
534 last = t_last;
535 }
536 }
537
Eric Dumazetaec0a402013-06-28 07:40:57 -0700538 if (last) {
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000539 /*
Johannes Naaba13d3102013-01-23 11:36:51 +0000540 * Last packet in queue is reference point (now),
541 * calculate this time bonus and subtract
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000542 * from delay.
543 */
Nik Unger5080f392017-03-13 10:16:58 -0700544 delay -= last->time_to_send - now;
Dave Taht112f9cb2017-11-08 15:12:26 -0800545 delay = max_t(s64, 0, delay);
Nik Unger5080f392017-03-13 10:16:58 -0700546 now = last->time_to_send;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000547 }
Johannes Naaba13d3102013-01-23 11:36:51 +0000548
Stephen Hemmingerbce552f2017-11-14 11:27:01 -0800549 delay += packet_time_ns(qdisc_pkt_len(skb), q);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000550 }
551
Patrick McHardy7c59e252007-03-23 11:27:45 -0700552 cb->time_to_send = now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 ++q->counter;
Eric Dumazet960fb662012-07-03 20:55:21 +0000554 tfifo_enqueue(skb, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 } else {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900556 /*
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700557 * Do re-ordering by putting one out of N packets at the front
558 * of the queue.
559 */
Dave Taht112f9cb2017-11-08 15:12:26 -0800560 cb->time_to_send = ktime_get_ns();
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700561 q->counter = 0;
Jarek Poplawski8ba25da2008-11-02 00:36:03 -0700562
David S. Miller59697732018-07-29 16:33:28 -0700563 __qdisc_enqueue_head(skb, &sch->q);
Hagen Paul Pfeifereb101922012-01-04 17:35:26 +0000564 sch->qstats.requeues++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700565 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
Neil Horman6071bd12016-05-02 12:20:15 -0400567finish_segs:
568 if (segs) {
569 while (segs) {
570 skb2 = segs->next;
David S. Millera8305bf2018-07-29 20:42:53 -0700571 skb_mark_not_on_list(segs);
Neil Horman6071bd12016-05-02 12:20:15 -0400572 qdisc_skb_cb(segs)->pkt_len = segs->len;
573 last_len = segs->len;
Eric Dumazet520ac302016-06-21 23:16:49 -0700574 rc = qdisc_enqueue(segs, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400575 if (rc != NET_XMIT_SUCCESS) {
576 if (net_xmit_drop_count(rc))
577 qdisc_qstats_drop(sch);
578 } else {
579 nb++;
580 len += last_len;
581 }
582 segs = skb2;
583 }
584 sch->q.qlen += nb;
585 if (nb > 1)
586 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
587 }
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000588 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589}
590
Dave Taht836af832017-11-08 15:12:28 -0800591/* Delay the next round with a new future slot with a
592 * correct number of bytes and packets.
593 */
594
595static void get_slot_next(struct netem_sched_data *q, u64 now)
596{
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700597 s64 next_delay;
598
599 if (!q->slot_dist)
600 next_delay = q->slot_config.min_delay +
601 (prandom_u32() *
602 (q->slot_config.max_delay -
603 q->slot_config.min_delay) >> 32);
604 else
605 next_delay = tabledist(q->slot_config.dist_delay,
606 (s32)(q->slot_config.dist_jitter),
607 NULL, q->slot_dist);
608
609 q->slot.slot_next = now + next_delay;
Dave Taht836af832017-11-08 15:12:28 -0800610 q->slot.packets_left = q->slot_config.max_packets;
611 q->slot.bytes_left = q->slot_config.max_bytes;
612}
613
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614static struct sk_buff *netem_dequeue(struct Qdisc *sch)
615{
616 struct netem_sched_data *q = qdisc_priv(sch);
617 struct sk_buff *skb;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700618 struct rb_node *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
Eric Dumazet50612532011-12-28 23:12:02 +0000620tfifo_dequeue:
Florian Westphaled760cb2016-09-18 00:57:33 +0200621 skb = __qdisc_dequeue_head(&sch->q);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700622 if (skb) {
John Fastabend25331d62014-09-28 11:53:29 -0700623 qdisc_qstats_backlog_dec(sch, skb);
Beshay, Joseph0ad2a832015-04-06 18:00:56 +0000624deliver:
Eric Dumazetaec0a402013-06-28 07:40:57 -0700625 qdisc_bstats_update(sch, skb);
626 return skb;
627 }
628 p = rb_first(&q->t_root);
629 if (p) {
Dave Taht112f9cb2017-11-08 15:12:26 -0800630 u64 time_to_send;
Dave Taht836af832017-11-08 15:12:28 -0800631 u64 now = ktime_get_ns();
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700632
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700633 skb = rb_to_skb(p);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700634
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700635 /* if more time remaining? */
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700636 time_to_send = netem_skb_cb(skb)->time_to_send;
Dave Taht836af832017-11-08 15:12:28 -0800637 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
638 get_slot_next(q, now);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700639
Dave Taht836af832017-11-08 15:12:28 -0800640 if (time_to_send <= now && q->slot.slot_next <= now) {
641 rb_erase(p, &q->t_root);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700642 sch->q.qlen--;
Beshay, Joseph0ad2a832015-04-06 18:00:56 +0000643 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700644 skb->next = NULL;
645 skb->prev = NULL;
Eric Dumazetbffa72c2017-09-19 05:14:24 -0700646 /* skb->dev shares skb->rbnode area,
647 * we need to restore its value.
648 */
649 skb->dev = qdisc_dev(sch);
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700650
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000651#ifdef CONFIG_NET_CLS_ACT
652 /*
653 * If it's at ingress let's pretend the delay is
654 * from the network (tstamp will be updated).
655 */
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500656 if (skb->tc_redirected && skb->tc_from_ingress)
Thomas Gleixner2456e852016-12-25 11:38:40 +0100657 skb->tstamp = 0;
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000658#endif
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000659
Dave Taht836af832017-11-08 15:12:28 -0800660 if (q->slot.slot_next) {
661 q->slot.packets_left--;
662 q->slot.bytes_left -= qdisc_pkt_len(skb);
663 if (q->slot.packets_left <= 0 ||
664 q->slot.bytes_left <= 0)
665 get_slot_next(q, now);
666 }
667
Eric Dumazet50612532011-12-28 23:12:02 +0000668 if (q->qdisc) {
Eric Dumazet21de12e2016-06-20 15:00:43 -0700669 unsigned int pkt_len = qdisc_pkt_len(skb);
Eric Dumazet520ac302016-06-21 23:16:49 -0700670 struct sk_buff *to_free = NULL;
671 int err;
Eric Dumazet50612532011-12-28 23:12:02 +0000672
Eric Dumazet520ac302016-06-21 23:16:49 -0700673 err = qdisc_enqueue(skb, q->qdisc, &to_free);
674 kfree_skb_list(to_free);
Eric Dumazet21de12e2016-06-20 15:00:43 -0700675 if (err != NET_XMIT_SUCCESS &&
676 net_xmit_drop_count(err)) {
677 qdisc_qstats_drop(sch);
678 qdisc_tree_reduce_backlog(sch, 1,
679 pkt_len);
Eric Dumazet50612532011-12-28 23:12:02 +0000680 }
681 goto tfifo_dequeue;
682 }
Eric Dumazetaec0a402013-06-28 07:40:57 -0700683 goto deliver;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700684 }
Stephen Hemminger11274e52007-03-22 12:17:42 -0700685
Eric Dumazet50612532011-12-28 23:12:02 +0000686 if (q->qdisc) {
687 skb = q->qdisc->ops->dequeue(q->qdisc);
688 if (skb)
689 goto deliver;
690 }
Dave Taht836af832017-11-08 15:12:28 -0800691
692 qdisc_watchdog_schedule_ns(&q->watchdog,
693 max(time_to_send,
694 q->slot.slot_next));
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700695 }
696
Eric Dumazet50612532011-12-28 23:12:02 +0000697 if (q->qdisc) {
698 skb = q->qdisc->ops->dequeue(q->qdisc);
699 if (skb)
700 goto deliver;
701 }
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700702 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703}
704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705static void netem_reset(struct Qdisc *sch)
706{
707 struct netem_sched_data *q = qdisc_priv(sch);
708
Eric Dumazet50612532011-12-28 23:12:02 +0000709 qdisc_reset_queue(sch);
stephen hemmingerff704052013-10-06 15:16:49 -0700710 tfifo_reset(sch);
Eric Dumazet50612532011-12-28 23:12:02 +0000711 if (q->qdisc)
712 qdisc_reset(q->qdisc);
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700713 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714}
715
stephen hemminger6373a9a2011-02-23 13:04:18 +0000716static void dist_free(struct disttable *d)
717{
WANG Cong4cb28972014-06-02 15:55:22 -0700718 kvfree(d);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000719}
720
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721/*
722 * Distribution data is a variable size payload containing
723 * signed 16 bit values.
724 */
Dave Taht836af832017-11-08 15:12:28 -0800725
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700726static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
727 const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728{
stephen hemminger6373a9a2011-02-23 13:04:18 +0000729 size_t n = nla_len(attr)/sizeof(__s16);
Patrick McHardy1e904742008-01-22 22:11:17 -0800730 const __s16 *data = nla_data(attr);
David S. Miller7698b4f2008-07-16 01:42:40 -0700731 spinlock_t *root_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 struct disttable *d;
733 int i;
734
stephen hemmingerdf173bd2011-02-23 13:04:19 +0000735 if (n > NETEM_DIST_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 return -EINVAL;
737
Michal Hocko752ade62017-05-08 15:57:27 -0700738 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 if (!d)
740 return -ENOMEM;
741
742 d->size = n;
743 for (i = 0; i < n; i++)
744 d->table[i] = data[i];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900745
Jarek Poplawski102396a2008-08-29 14:21:52 -0700746 root_lock = qdisc_root_sleeping_lock(sch);
David S. Miller7698b4f2008-07-16 01:42:40 -0700747
748 spin_lock_bh(root_lock);
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700749 swap(*tbl, d);
David S. Miller7698b4f2008-07-16 01:42:40 -0700750 spin_unlock_bh(root_lock);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000751
752 dist_free(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 return 0;
754}
755
Dave Taht836af832017-11-08 15:12:28 -0800756static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
757{
758 const struct tc_netem_slot *c = nla_data(attr);
759
760 q->slot_config = *c;
761 if (q->slot_config.max_packets == 0)
762 q->slot_config.max_packets = INT_MAX;
763 if (q->slot_config.max_bytes == 0)
764 q->slot_config.max_bytes = INT_MAX;
765 q->slot.packets_left = q->slot_config.max_packets;
766 q->slot.bytes_left = q->slot_config.max_bytes;
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700767 if (q->slot_config.min_delay | q->slot_config.max_delay |
768 q->slot_config.dist_jitter)
Dave Taht836af832017-11-08 15:12:28 -0800769 q->slot.slot_next = ktime_get_ns();
770 else
771 q->slot.slot_next = 0;
772}
773
Yang Yingliang49545a772014-02-14 10:30:42 +0800774static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
Patrick McHardy1e904742008-01-22 22:11:17 -0800776 const struct tc_netem_corr *c = nla_data(attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 init_crandom(&q->delay_cor, c->delay_corr);
779 init_crandom(&q->loss_cor, c->loss_corr);
780 init_crandom(&q->dup_cor, c->dup_corr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781}
782
Yang Yingliang49545a772014-02-14 10:30:42 +0800783static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700784{
Patrick McHardy1e904742008-01-22 22:11:17 -0800785 const struct tc_netem_reorder *r = nla_data(attr);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700786
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700787 q->reorder = r->probability;
788 init_crandom(&q->reorder_cor, r->correlation);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700789}
790
Yang Yingliang49545a772014-02-14 10:30:42 +0800791static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800792{
Patrick McHardy1e904742008-01-22 22:11:17 -0800793 const struct tc_netem_corrupt *r = nla_data(attr);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800794
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800795 q->corrupt = r->probability;
796 init_crandom(&q->corrupt_cor, r->correlation);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800797}
798
Yang Yingliang49545a772014-02-14 10:30:42 +0800799static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000800{
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000801 const struct tc_netem_rate *r = nla_data(attr);
802
803 q->rate = r->rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000804 q->packet_overhead = r->packet_overhead;
805 q->cell_size = r->cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100806 q->cell_overhead = r->cell_overhead;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000807 if (q->cell_size)
808 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100809 else
810 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000811}
812
Yang Yingliang49545a772014-02-14 10:30:42 +0800813static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
stephen hemminger661b7972011-02-23 13:04:21 +0000814{
stephen hemminger661b7972011-02-23 13:04:21 +0000815 const struct nlattr *la;
816 int rem;
817
818 nla_for_each_nested(la, attr, rem) {
819 u16 type = nla_type(la);
820
Yang Yingliang833fa742013-12-10 20:55:32 +0800821 switch (type) {
stephen hemminger661b7972011-02-23 13:04:21 +0000822 case NETEM_LOSS_GI: {
823 const struct tc_netem_gimodel *gi = nla_data(la);
824
stephen hemminger24946542011-12-23 09:16:30 +0000825 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
stephen hemminger661b7972011-02-23 13:04:21 +0000826 pr_info("netem: incorrect gi model size\n");
827 return -EINVAL;
828 }
829
830 q->loss_model = CLG_4_STATES;
831
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800832 q->clg.state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000833 q->clg.a1 = gi->p13;
834 q->clg.a2 = gi->p31;
835 q->clg.a3 = gi->p32;
836 q->clg.a4 = gi->p14;
837 q->clg.a5 = gi->p23;
838 break;
839 }
840
841 case NETEM_LOSS_GE: {
842 const struct tc_netem_gemodel *ge = nla_data(la);
843
stephen hemminger24946542011-12-23 09:16:30 +0000844 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
845 pr_info("netem: incorrect ge model size\n");
stephen hemminger661b7972011-02-23 13:04:21 +0000846 return -EINVAL;
847 }
848
849 q->loss_model = CLG_GILB_ELL;
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800850 q->clg.state = GOOD_STATE;
stephen hemminger661b7972011-02-23 13:04:21 +0000851 q->clg.a1 = ge->p;
852 q->clg.a2 = ge->r;
853 q->clg.a3 = ge->h;
854 q->clg.a4 = ge->k1;
855 break;
856 }
857
858 default:
859 pr_info("netem: unknown loss type %u\n", type);
860 return -EINVAL;
861 }
862 }
863
864 return 0;
865}
866
Patrick McHardy27a34212008-01-23 20:35:39 -0800867static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
868 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
869 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
870 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000871 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
stephen hemminger661b7972011-02-23 13:04:21 +0000872 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
Eric Dumazete4ae0042012-04-30 23:11:05 +0000873 [TCA_NETEM_ECN] = { .type = NLA_U32 },
Yang Yingliang6a031f62013-12-25 17:35:15 +0800874 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
Dave Taht99803172017-11-08 15:12:27 -0800875 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
876 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
Dave Taht836af832017-11-08 15:12:28 -0800877 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
Patrick McHardy27a34212008-01-23 20:35:39 -0800878};
879
Thomas Graf2c10b322008-09-02 17:30:27 -0700880static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
881 const struct nla_policy *policy, int len)
882{
883 int nested_len = nla_len(nla) - NLA_ALIGN(len);
884
stephen hemminger661b7972011-02-23 13:04:21 +0000885 if (nested_len < 0) {
886 pr_info("netem: invalid attributes len %d\n", nested_len);
Thomas Graf2c10b322008-09-02 17:30:27 -0700887 return -EINVAL;
stephen hemminger661b7972011-02-23 13:04:21 +0000888 }
889
Thomas Graf2c10b322008-09-02 17:30:27 -0700890 if (nested_len >= nla_attr_size(0))
891 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
Johannes Bergfceb6432017-04-12 14:34:07 +0200892 nested_len, policy, NULL);
stephen hemminger661b7972011-02-23 13:04:21 +0000893
Thomas Graf2c10b322008-09-02 17:30:27 -0700894 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
895 return 0;
896}
897
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800898/* Parse netlink message to set options */
Alexander Aring20307212017-12-20 12:35:14 -0500899static int netem_change(struct Qdisc *sch, struct nlattr *opt,
900 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901{
902 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardyb03f4672008-01-23 20:32:21 -0800903 struct nlattr *tb[TCA_NETEM_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 struct tc_netem_qopt *qopt;
Yang Yingliang54a4b052014-02-14 10:30:41 +0800905 struct clgstate old_clg;
906 int old_loss_model = CLG_RANDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 int ret;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900908
Patrick McHardyb03f4672008-01-23 20:32:21 -0800909 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 return -EINVAL;
911
Thomas Graf2c10b322008-09-02 17:30:27 -0700912 qopt = nla_data(opt);
913 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
Patrick McHardyb03f4672008-01-23 20:32:21 -0800914 if (ret < 0)
915 return ret;
916
Yang Yingliang54a4b052014-02-14 10:30:41 +0800917 /* backup q->clg and q->loss_model */
918 old_clg = q->clg;
919 old_loss_model = q->loss_model;
920
921 if (tb[TCA_NETEM_LOSS]) {
Yang Yingliang49545a772014-02-14 10:30:42 +0800922 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
Yang Yingliang54a4b052014-02-14 10:30:41 +0800923 if (ret) {
924 q->loss_model = old_loss_model;
925 return ret;
926 }
927 } else {
928 q->loss_model = CLG_RANDOM;
929 }
930
931 if (tb[TCA_NETEM_DELAY_DIST]) {
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700932 ret = get_dist_table(sch, &q->delay_dist,
933 tb[TCA_NETEM_DELAY_DIST]);
934 if (ret)
935 goto get_table_failure;
936 }
937
938 if (tb[TCA_NETEM_SLOT_DIST]) {
939 ret = get_dist_table(sch, &q->slot_dist,
940 tb[TCA_NETEM_SLOT_DIST]);
941 if (ret)
942 goto get_table_failure;
Yang Yingliang54a4b052014-02-14 10:30:41 +0800943 }
944
Eric Dumazet50612532011-12-28 23:12:02 +0000945 sch->limit = qopt->limit;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900946
Dave Taht112f9cb2017-11-08 15:12:26 -0800947 q->latency = PSCHED_TICKS2NS(qopt->latency);
948 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 q->limit = qopt->limit;
950 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700951 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 q->loss = qopt->loss;
953 q->duplicate = qopt->duplicate;
954
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700955 /* for compatibility with earlier versions.
956 * if gap is set, need to assume 100% probability
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700957 */
Stephen Hemmingera362e0a2007-03-22 12:15:45 -0700958 if (q->gap)
959 q->reorder = ~0;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700960
Stephen Hemminger265eb672008-11-03 21:13:26 -0800961 if (tb[TCA_NETEM_CORR])
Yang Yingliang49545a772014-02-14 10:30:42 +0800962 get_correlation(q, tb[TCA_NETEM_CORR]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963
Stephen Hemminger265eb672008-11-03 21:13:26 -0800964 if (tb[TCA_NETEM_REORDER])
Yang Yingliang49545a772014-02-14 10:30:42 +0800965 get_reorder(q, tb[TCA_NETEM_REORDER]);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800966
Stephen Hemminger265eb672008-11-03 21:13:26 -0800967 if (tb[TCA_NETEM_CORRUPT])
Yang Yingliang49545a772014-02-14 10:30:42 +0800968 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000970 if (tb[TCA_NETEM_RATE])
Yang Yingliang49545a772014-02-14 10:30:42 +0800971 get_rate(q, tb[TCA_NETEM_RATE]);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000972
Yang Yingliang6a031f62013-12-25 17:35:15 +0800973 if (tb[TCA_NETEM_RATE64])
974 q->rate = max_t(u64, q->rate,
975 nla_get_u64(tb[TCA_NETEM_RATE64]));
976
Dave Taht99803172017-11-08 15:12:27 -0800977 if (tb[TCA_NETEM_LATENCY64])
978 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
979
980 if (tb[TCA_NETEM_JITTER64])
981 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
982
Eric Dumazete4ae0042012-04-30 23:11:05 +0000983 if (tb[TCA_NETEM_ECN])
984 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
985
Dave Taht836af832017-11-08 15:12:28 -0800986 if (tb[TCA_NETEM_SLOT])
987 get_slot(q, tb[TCA_NETEM_SLOT]);
988
stephen hemminger661b7972011-02-23 13:04:21 +0000989 return ret;
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700990
991get_table_failure:
992 /* recover clg and loss_model, in case of
993 * q->clg and q->loss_model were modified
994 * in get_loss_clg()
995 */
996 q->clg = old_clg;
997 q->loss_model = old_loss_model;
998 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999}
1000
Alexander Aringe63d7df2017-12-20 12:35:13 -05001001static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1002 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003{
1004 struct netem_sched_data *q = qdisc_priv(sch);
1005 int ret;
1006
Nikolay Aleksandrov634576a2017-08-30 12:49:03 +03001007 qdisc_watchdog_init(&q->watchdog, sch);
1008
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 if (!opt)
1010 return -EINVAL;
1011
stephen hemminger661b7972011-02-23 13:04:21 +00001012 q->loss_model = CLG_RANDOM;
Alexander Aring20307212017-12-20 12:35:14 -05001013 ret = netem_change(sch, opt, extack);
Eric Dumazet50612532011-12-28 23:12:02 +00001014 if (ret)
stephen hemminger250a65f2011-02-23 13:04:22 +00001015 pr_info("netem: change failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 return ret;
1017}
1018
1019static void netem_destroy(struct Qdisc *sch)
1020{
1021 struct netem_sched_data *q = qdisc_priv(sch);
1022
Patrick McHardy59cb5c62007-03-16 01:20:31 -07001023 qdisc_watchdog_cancel(&q->watchdog);
Eric Dumazet50612532011-12-28 23:12:02 +00001024 if (q->qdisc)
1025 qdisc_destroy(q->qdisc);
stephen hemminger6373a9a2011-02-23 13:04:18 +00001026 dist_free(q->delay_dist);
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -07001027 dist_free(q->slot_dist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028}
1029
stephen hemminger661b7972011-02-23 13:04:21 +00001030static int dump_loss_model(const struct netem_sched_data *q,
1031 struct sk_buff *skb)
1032{
1033 struct nlattr *nest;
1034
1035 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
1036 if (nest == NULL)
1037 goto nla_put_failure;
1038
1039 switch (q->loss_model) {
1040 case CLG_RANDOM:
1041 /* legacy loss model */
1042 nla_nest_cancel(skb, nest);
1043 return 0; /* no data */
1044
1045 case CLG_4_STATES: {
1046 struct tc_netem_gimodel gi = {
1047 .p13 = q->clg.a1,
1048 .p31 = q->clg.a2,
1049 .p32 = q->clg.a3,
1050 .p14 = q->clg.a4,
1051 .p23 = q->clg.a5,
1052 };
1053
David S. Miller1b34ec42012-03-29 05:11:39 -04001054 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1055 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +00001056 break;
1057 }
1058 case CLG_GILB_ELL: {
1059 struct tc_netem_gemodel ge = {
1060 .p = q->clg.a1,
1061 .r = q->clg.a2,
1062 .h = q->clg.a3,
1063 .k1 = q->clg.a4,
1064 };
1065
David S. Miller1b34ec42012-03-29 05:11:39 -04001066 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1067 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +00001068 break;
1069 }
1070 }
1071
1072 nla_nest_end(skb, nest);
1073 return 0;
1074
1075nla_put_failure:
1076 nla_nest_cancel(skb, nest);
1077 return -1;
1078}
1079
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1081{
1082 const struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger861d7f72011-02-23 13:04:17 +00001083 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 struct tc_netem_qopt qopt;
1085 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001086 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001087 struct tc_netem_corrupt corrupt;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001088 struct tc_netem_rate rate;
Dave Taht836af832017-11-08 15:12:28 -08001089 struct tc_netem_slot slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
Dave Taht112f9cb2017-11-08 15:12:26 -08001091 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1092 UINT_MAX);
1093 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1094 UINT_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 qopt.limit = q->limit;
1096 qopt.loss = q->loss;
1097 qopt.gap = q->gap;
1098 qopt.duplicate = q->duplicate;
David S. Miller1b34ec42012-03-29 05:11:39 -04001099 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1100 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
Dave Taht99803172017-11-08 15:12:27 -08001102 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1103 goto nla_put_failure;
1104
1105 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1106 goto nla_put_failure;
1107
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 cor.delay_corr = q->delay_cor.rho;
1109 cor.loss_corr = q->loss_cor.rho;
1110 cor.dup_corr = q->dup_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001111 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1112 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001113
1114 reorder.probability = q->reorder;
1115 reorder.correlation = q->reorder_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001116 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1117 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001118
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001119 corrupt.probability = q->corrupt;
1120 corrupt.correlation = q->corrupt_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001121 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1122 goto nla_put_failure;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001123
Yang Yingliang6a031f62013-12-25 17:35:15 +08001124 if (q->rate >= (1ULL << 32)) {
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001125 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1126 TCA_NETEM_PAD))
Yang Yingliang6a031f62013-12-25 17:35:15 +08001127 goto nla_put_failure;
1128 rate.rate = ~0U;
1129 } else {
1130 rate.rate = q->rate;
1131 }
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +00001132 rate.packet_overhead = q->packet_overhead;
1133 rate.cell_size = q->cell_size;
1134 rate.cell_overhead = q->cell_overhead;
David S. Miller1b34ec42012-03-29 05:11:39 -04001135 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1136 goto nla_put_failure;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001137
Eric Dumazete4ae0042012-04-30 23:11:05 +00001138 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1139 goto nla_put_failure;
1140
stephen hemminger661b7972011-02-23 13:04:21 +00001141 if (dump_loss_model(q, skb) != 0)
1142 goto nla_put_failure;
1143
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -07001144 if (q->slot_config.min_delay | q->slot_config.max_delay |
1145 q->slot_config.dist_jitter) {
Dave Taht836af832017-11-08 15:12:28 -08001146 slot = q->slot_config;
1147 if (slot.max_packets == INT_MAX)
1148 slot.max_packets = 0;
1149 if (slot.max_bytes == INT_MAX)
1150 slot.max_bytes = 0;
1151 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1152 goto nla_put_failure;
1153 }
1154
stephen hemminger861d7f72011-02-23 13:04:17 +00001155 return nla_nest_end(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Patrick McHardy1e904742008-01-22 22:11:17 -08001157nla_put_failure:
stephen hemminger861d7f72011-02-23 13:04:17 +00001158 nlmsg_trim(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 return -1;
1160}
1161
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001162static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1163 struct sk_buff *skb, struct tcmsg *tcm)
1164{
1165 struct netem_sched_data *q = qdisc_priv(sch);
1166
Eric Dumazet50612532011-12-28 23:12:02 +00001167 if (cl != 1 || !q->qdisc) /* only one class */
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001168 return -ENOENT;
1169
1170 tcm->tcm_handle |= TC_H_MIN(1);
1171 tcm->tcm_info = q->qdisc->handle;
1172
1173 return 0;
1174}
1175
1176static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -05001177 struct Qdisc **old, struct netlink_ext_ack *extack)
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001178{
1179 struct netem_sched_data *q = qdisc_priv(sch);
1180
WANG Cong86a79962016-02-25 14:55:00 -08001181 *old = qdisc_replace(sch, new, &q->qdisc);
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001182 return 0;
1183}
1184
1185static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1186{
1187 struct netem_sched_data *q = qdisc_priv(sch);
1188 return q->qdisc;
1189}
1190
WANG Cong143976c2017-08-24 16:51:29 -07001191static unsigned long netem_find(struct Qdisc *sch, u32 classid)
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001192{
1193 return 1;
1194}
1195
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001196static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1197{
1198 if (!walker->stop) {
1199 if (walker->count >= walker->skip)
1200 if (walker->fn(sch, 1, walker) < 0) {
1201 walker->stop = 1;
1202 return;
1203 }
1204 walker->count++;
1205 }
1206}
1207
1208static const struct Qdisc_class_ops netem_class_ops = {
1209 .graft = netem_graft,
1210 .leaf = netem_leaf,
WANG Cong143976c2017-08-24 16:51:29 -07001211 .find = netem_find,
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001212 .walk = netem_walk,
1213 .dump = netem_dump_class,
1214};
1215
Eric Dumazet20fea082007-11-14 01:44:41 -08001216static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 .id = "netem",
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001218 .cl_ops = &netem_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 .priv_size = sizeof(struct netem_sched_data),
1220 .enqueue = netem_enqueue,
1221 .dequeue = netem_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07001222 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 .init = netem_init,
1224 .reset = netem_reset,
1225 .destroy = netem_destroy,
1226 .change = netem_change,
1227 .dump = netem_dump,
1228 .owner = THIS_MODULE,
1229};
1230
1231
1232static int __init netem_module_init(void)
1233{
Stephen Hemmingereb229c42005-11-03 13:49:01 -08001234 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 return register_qdisc(&netem_qdisc_ops);
1236}
1237static void __exit netem_module_exit(void)
1238{
1239 unregister_qdisc(&netem_qdisc_ops);
1240}
1241module_init(netem_module_init)
1242module_exit(netem_module_exit)
1243MODULE_LICENSE("GPL");