blob: 47d6decba0ea66294ef0b946706cbd75a63ced38 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
Stephen Hemminger798b6b12006-10-22 20:16:57 -07007 * 2 of the License.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Many of the algorithms and ideas for this came from
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * NIST Net which is not copyrighted.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000016#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/skbuff.h>
David S. Miller78776d32011-02-24 22:48:13 -080023#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/rtnetlink.h>
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000025#include <linux/reciprocal_div.h>
Eric Dumazetaec0a402013-06-28 07:40:57 -070026#include <linux/rbtree.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070028#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <net/pkt_sched.h>
Eric Dumazete4ae0042012-04-30 23:11:05 +000030#include <net/inet_ecn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
stephen hemminger250a65f2011-02-23 13:04:22 +000032#define VERSION "1.3"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
stephen hemminger661b7972011-02-23 13:04:21 +000055
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069*/
70
71struct netem_sched_data {
Eric Dumazetaec0a402013-06-28 07:40:57 -070072 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
Eric Dumazet50612532011-12-28 23:12:02 +000074
75 /* optional qdisc for classful handling (NULL at netem init) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 struct Qdisc *qdisc;
Eric Dumazet50612532011-12-28 23:12:02 +000077
Patrick McHardy59cb5c62007-03-16 01:20:31 -070078 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Dave Taht112f9cb2017-11-08 15:12:26 -080080 s64 latency;
81 s64 jitter;
Stephen Hemmingerb4076212007-03-22 12:16:21 -070082
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 u32 loss;
Eric Dumazete4ae0042012-04-30 23:11:05 +000084 u32 ecn;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 u32 limit;
86 u32 counter;
87 u32 gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070089 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080090 u32 corrupt;
Yang Yingliang6a031f62013-12-25 17:35:15 +080091 u64 rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000092 s32 packet_overhead;
93 u32 cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +010094 struct reciprocal_value cell_size_reciprocal;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000095 s32 cell_overhead;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97 struct crndstate {
Stephen Hemmingerb4076212007-03-22 12:16:21 -070098 u32 last;
99 u32 rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
stephen hemminger661b7972011-02-23 13:04:21 +0000106
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
112
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
119
Yang Yingliangc045a732014-02-14 10:30:43 +0800120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
124
stephen hemminger661b7972011-02-23 13:04:21 +0000125 /* Correlated Loss Generation models */
126 struct clgstate {
127 /* state of the Markov chain */
128 u8 state;
129
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
136 } clg;
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138};
139
Eric Dumazet50612532011-12-28 23:12:02 +0000140/* Time stamp put into socket buffer control block
141 * Only valid when skbs are in our internal t(ime)fifo queue.
Eric Dumazet56b17422014-11-03 08:19:53 -0800142 *
143 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
144 * and skb->next & skb->prev are scratch space for a qdisc,
145 * we save skb->tstamp value in skb->cb[] before destroying it.
Eric Dumazet50612532011-12-28 23:12:02 +0000146 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147struct netem_skb_cb {
Dave Taht112f9cb2017-11-08 15:12:26 -0800148 u64 time_to_send;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149};
150
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700151static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
152{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700153 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
David S. Miller16bda132012-02-06 15:14:37 -0500154 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700155 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700156}
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158/* init_crandom - initialize correlated random number generator
159 * Use entropy source for initial seed.
160 */
161static void init_crandom(struct crndstate *state, unsigned long rho)
162{
163 state->rho = rho;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500164 state->last = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165}
166
167/* get_crandom - correlated random number generator
168 * Next number depends on last value.
169 * rho is scaled to avoid floating point.
170 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700171static u32 get_crandom(struct crndstate *state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
173 u64 value, rho;
174 unsigned long answer;
175
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700176 if (state->rho == 0) /* no correlation */
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500177 return prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500179 value = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 rho = (u64)state->rho + 1;
181 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
182 state->last = answer;
183 return answer;
184}
185
stephen hemminger661b7972011-02-23 13:04:21 +0000186/* loss_4state - 4-state model loss generator
187 * Generates losses according to the 4-state Markov chain adopted in
188 * the GI (General and Intuitive) loss model.
189 */
190static bool loss_4state(struct netem_sched_data *q)
191{
192 struct clgstate *clg = &q->clg;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500193 u32 rnd = prandom_u32();
stephen hemminger661b7972011-02-23 13:04:21 +0000194
195 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300196 * Makes a comparison between rnd and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000197 * probabilities outgoing from the current state, then decides the
198 * next state and if the next packet has to be transmitted or lost.
199 * The four states correspond to:
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800200 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
201 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
202 * LOST_IN_GAP_PERIOD => lost packets within a burst period
203 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
stephen hemminger661b7972011-02-23 13:04:21 +0000204 */
205 switch (clg->state) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800206 case TX_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000207 if (rnd < clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800208 clg->state = LOST_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000209 return true;
stephen hemmingerab6c27b2013-11-29 11:03:35 -0800210 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800211 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000212 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800213 } else if (clg->a1 + clg->a4 < rnd) {
214 clg->state = TX_IN_GAP_PERIOD;
215 }
stephen hemminger661b7972011-02-23 13:04:21 +0000216
217 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800218 case TX_IN_BURST_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000219 if (rnd < clg->a5) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800220 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000221 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800222 } else {
223 clg->state = TX_IN_BURST_PERIOD;
224 }
stephen hemminger661b7972011-02-23 13:04:21 +0000225
226 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800227 case LOST_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000228 if (rnd < clg->a3)
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800229 clg->state = TX_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000230 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800231 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000232 } else if (clg->a2 + clg->a3 < rnd) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800233 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000234 return true;
235 }
236 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800237 case LOST_IN_BURST_PERIOD:
238 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000239 break;
240 }
241
242 return false;
243}
244
245/* loss_gilb_ell - Gilbert-Elliot model loss generator
246 * Generates losses according to the Gilbert-Elliot loss model or
247 * its special cases (Gilbert or Simple Gilbert)
248 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300249 * Makes a comparison between random number and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000250 * probabilities outgoing from the current state, then decides the
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300251 * next state. A second random number is extracted and the comparison
stephen hemminger661b7972011-02-23 13:04:21 +0000252 * with the loss probability of the current state decides if the next
253 * packet will be transmitted or lost.
254 */
255static bool loss_gilb_ell(struct netem_sched_data *q)
256{
257 struct clgstate *clg = &q->clg;
258
259 switch (clg->state) {
Yang Yingliangc045a732014-02-14 10:30:43 +0800260 case GOOD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500261 if (prandom_u32() < clg->a1)
Yang Yingliangc045a732014-02-14 10:30:43 +0800262 clg->state = BAD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500263 if (prandom_u32() < clg->a4)
stephen hemminger661b7972011-02-23 13:04:21 +0000264 return true;
stephen hemminger7c2781f2013-11-29 11:02:43 -0800265 break;
Yang Yingliangc045a732014-02-14 10:30:43 +0800266 case BAD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500267 if (prandom_u32() < clg->a2)
Yang Yingliangc045a732014-02-14 10:30:43 +0800268 clg->state = GOOD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500269 if (prandom_u32() > clg->a3)
stephen hemminger661b7972011-02-23 13:04:21 +0000270 return true;
271 }
272
273 return false;
274}
275
276static bool loss_event(struct netem_sched_data *q)
277{
278 switch (q->loss_model) {
279 case CLG_RANDOM:
280 /* Random packet drop 0 => none, ~0 => all */
281 return q->loss && q->loss >= get_crandom(&q->loss_cor);
282
283 case CLG_4_STATES:
284 /* 4state loss model algorithm (used also for GI model)
285 * Extracts a value from the markov 4 state loss generator,
286 * if it is 1 drops a packet and if needed writes the event in
287 * the kernel logs
288 */
289 return loss_4state(q);
290
291 case CLG_GILB_ELL:
292 /* Gilbert-Elliot loss model algorithm
293 * Extracts a value from the Gilbert-Elliot loss generator,
294 * if it is 1 drops a packet and if needed writes the event in
295 * the kernel logs
296 */
297 return loss_gilb_ell(q);
298 }
299
300 return false; /* not reached */
301}
302
303
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304/* tabledist - return a pseudo-randomly distributed value with mean mu and
305 * std deviation sigma. Uses table lookup to approximate the desired
306 * distribution, and a uniformly-distributed pseudo-random source.
307 */
Dave Taht112f9cb2017-11-08 15:12:26 -0800308static s64 tabledist(s64 mu, s64 sigma,
309 struct crndstate *state,
310 const struct disttable *dist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311{
Dave Taht112f9cb2017-11-08 15:12:26 -0800312 s64 x;
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700313 long t;
314 u32 rnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316 if (sigma == 0)
317 return mu;
318
319 rnd = get_crandom(state);
320
321 /* default uniform distribution */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900322 if (dist == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 return (rnd % (2*sigma)) - sigma + mu;
324
325 t = dist->table[rnd % dist->size];
326 x = (sigma % NETEM_DIST_SCALE) * t;
327 if (x >= 0)
328 x += NETEM_DIST_SCALE/2;
329 else
330 x -= NETEM_DIST_SCALE/2;
331
332 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
333}
334
Dave Taht112f9cb2017-11-08 15:12:26 -0800335static u64 packet_len_2_sched_time(unsigned int len,
336 struct netem_sched_data *q)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000337{
Dave Taht112f9cb2017-11-08 15:12:26 -0800338 u64 offset;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000339 len += q->packet_overhead;
340
341 if (q->cell_size) {
342 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
343
344 if (len > cells * q->cell_size) /* extra cell needed for remainder */
345 cells++;
346 len = cells * (q->cell_size + q->cell_overhead);
347 }
Dave Taht112f9cb2017-11-08 15:12:26 -0800348 offset = (u64)len * NSEC_PER_SEC;
349 do_div(offset, q->rate);
350 return offset;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000351}
352
stephen hemmingerff704052013-10-06 15:16:49 -0700353static void tfifo_reset(struct Qdisc *sch)
354{
355 struct netem_sched_data *q = qdisc_priv(sch);
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700356 struct rb_node *p = rb_first(&q->t_root);
stephen hemmingerff704052013-10-06 15:16:49 -0700357
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700358 while (p) {
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700359 struct sk_buff *skb = rb_to_skb(p);
stephen hemmingerff704052013-10-06 15:16:49 -0700360
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700361 p = rb_next(p);
362 rb_erase(&skb->rbnode, &q->t_root);
Eric Dumazet2f08a9a2016-06-13 20:21:57 -0700363 rtnl_kfree_skbs(skb, skb);
stephen hemmingerff704052013-10-06 15:16:49 -0700364 }
365}
366
Eric Dumazet960fb662012-07-03 20:55:21 +0000367static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
Eric Dumazet50612532011-12-28 23:12:02 +0000368{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700369 struct netem_sched_data *q = qdisc_priv(sch);
Dave Taht112f9cb2017-11-08 15:12:26 -0800370 u64 tnext = netem_skb_cb(nskb)->time_to_send;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700371 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
Eric Dumazet50612532011-12-28 23:12:02 +0000372
Eric Dumazetaec0a402013-06-28 07:40:57 -0700373 while (*p) {
374 struct sk_buff *skb;
Eric Dumazet50612532011-12-28 23:12:02 +0000375
Eric Dumazetaec0a402013-06-28 07:40:57 -0700376 parent = *p;
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700377 skb = rb_to_skb(parent);
Eric Dumazet960fb662012-07-03 20:55:21 +0000378 if (tnext >= netem_skb_cb(skb)->time_to_send)
Eric Dumazetaec0a402013-06-28 07:40:57 -0700379 p = &parent->rb_right;
380 else
381 p = &parent->rb_left;
Eric Dumazet50612532011-12-28 23:12:02 +0000382 }
Eric Dumazet56b17422014-11-03 08:19:53 -0800383 rb_link_node(&nskb->rbnode, parent, p);
384 rb_insert_color(&nskb->rbnode, &q->t_root);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700385 sch->q.qlen++;
Eric Dumazet50612532011-12-28 23:12:02 +0000386}
387
Neil Horman6071bd12016-05-02 12:20:15 -0400388/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
389 * when we statistically choose to corrupt one, we instead segment it, returning
390 * the first packet to be corrupted, and re-enqueue the remaining frames
391 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700392static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
393 struct sk_buff **to_free)
Neil Horman6071bd12016-05-02 12:20:15 -0400394{
395 struct sk_buff *segs;
396 netdev_features_t features = netif_skb_features(skb);
397
398 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
399
400 if (IS_ERR_OR_NULL(segs)) {
Eric Dumazet520ac302016-06-21 23:16:49 -0700401 qdisc_drop(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400402 return NULL;
403 }
404 consume_skb(skb);
405 return segs;
406}
407
Florian Westphal48da34b2016-09-18 00:57:34 +0200408static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
409{
410 skb->next = qh->head;
411
412 if (!qh->head)
413 qh->tail = skb;
414 qh->head = skb;
415 qh->qlen++;
416}
417
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700418/*
419 * Insert one skb into qdisc.
420 * Note: parent depends on return value to account for queue length.
421 * NET_XMIT_DROP: queue length didn't change.
422 * NET_XMIT_SUCCESS: one skb was queued.
423 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700424static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
425 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
427 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700428 /* We don't fill cb now as skb_unshare() may invalidate it */
429 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700430 struct sk_buff *skb2;
Neil Horman6071bd12016-05-02 12:20:15 -0400431 struct sk_buff *segs = NULL;
432 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
433 int nb = 0;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700434 int count = 1;
Neil Horman6071bd12016-05-02 12:20:15 -0400435 int rc = NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700437 /* Random duplication */
438 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
439 ++count;
440
stephen hemminger661b7972011-02-23 13:04:21 +0000441 /* Drop packet? */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000442 if (loss_event(q)) {
443 if (q->ecn && INET_ECN_set_ce(skb))
John Fastabend25331d62014-09-28 11:53:29 -0700444 qdisc_qstats_drop(sch); /* mark packet */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000445 else
446 --count;
447 }
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700448 if (count == 0) {
John Fastabend25331d62014-09-28 11:53:29 -0700449 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700450 __qdisc_drop(skb, to_free);
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700451 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 }
453
Eric Dumazet5a308f42012-07-14 03:16:27 +0000454 /* If a delay is expected, orphan the skb. (orphaning usually takes
455 * place at TX completion time, so _before_ the link transit delay)
Eric Dumazet5a308f42012-07-14 03:16:27 +0000456 */
Nik Unger5080f392017-03-13 10:16:58 -0700457 if (q->latency || q->jitter || q->rate)
Eric Dumazetf2f872f2013-07-30 17:55:08 -0700458 skb_orphan_partial(skb);
David S. Miller4e8a5202006-10-22 21:00:33 -0700459
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700460 /*
461 * If we need to duplicate packet, then re-insert at top of the
462 * qdisc tree, since parent queuer expects that only one
463 * skb will be queued.
464 */
465 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
David S. Miller7698b4f2008-07-16 01:42:40 -0700466 struct Qdisc *rootq = qdisc_root(sch);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700467 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Eric Dumazetb396cca2015-05-11 09:06:56 -0700469 q->duplicate = 0;
Eric Dumazet520ac302016-06-21 23:16:49 -0700470 rootq->enqueue(skb2, rootq, to_free);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700471 q->duplicate = dupsave;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 }
473
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800474 /*
475 * Randomized packet corruption.
476 * Make copy if needed since we are modifying
477 * If packet is going to be hardware checksummed, then
478 * do it now in software before we mangle it.
479 */
480 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
Neil Horman6071bd12016-05-02 12:20:15 -0400481 if (skb_is_gso(skb)) {
Eric Dumazet520ac302016-06-21 23:16:49 -0700482 segs = netem_segment(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400483 if (!segs)
484 return NET_XMIT_DROP;
485 } else {
486 segs = skb;
487 }
488
489 skb = segs;
490 segs = segs->next;
491
Eric Dumazet8a6e9c62016-06-28 10:30:08 +0200492 skb = skb_unshare(skb, GFP_ATOMIC);
493 if (unlikely(!skb)) {
494 qdisc_qstats_drop(sch);
495 goto finish_segs;
496 }
497 if (skb->ip_summed == CHECKSUM_PARTIAL &&
498 skb_checksum_help(skb)) {
499 qdisc_drop(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400500 goto finish_segs;
501 }
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800502
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500503 skb->data[prandom_u32() % skb_headlen(skb)] ^=
504 1<<(prandom_u32() % 8);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800505 }
506
Florian Westphal97d06782016-09-18 00:57:31 +0200507 if (unlikely(sch->q.qlen >= sch->limit))
Eric Dumazet520ac302016-06-21 23:16:49 -0700508 return qdisc_drop(skb, sch, to_free);
Eric Dumazet960fb662012-07-03 20:55:21 +0000509
John Fastabend25331d62014-09-28 11:53:29 -0700510 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazet960fb662012-07-03 20:55:21 +0000511
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700512 cb = netem_skb_cb(skb);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000513 if (q->gap == 0 || /* not doing reordering */
Vijay Subramaniana42b4792012-01-19 10:20:59 +0000514 q->counter < q->gap - 1 || /* inside last reordering gap */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800515 q->reorder < get_crandom(&q->reorder_cor)) {
Dave Taht112f9cb2017-11-08 15:12:26 -0800516 u64 now;
517 s64 delay;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800518
519 delay = tabledist(q->latency, q->jitter,
520 &q->delay_cor, q->delay_dist);
521
Dave Taht112f9cb2017-11-08 15:12:26 -0800522 now = ktime_get_ns();
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000523
524 if (q->rate) {
Nik Unger5080f392017-03-13 10:16:58 -0700525 struct netem_skb_cb *last = NULL;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000526
Nik Unger5080f392017-03-13 10:16:58 -0700527 if (sch->q.tail)
528 last = netem_skb_cb(sch->q.tail);
529 if (q->t_root.rb_node) {
530 struct sk_buff *t_skb;
531 struct netem_skb_cb *t_last;
532
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700533 t_skb = skb_rb_last(&q->t_root);
Nik Unger5080f392017-03-13 10:16:58 -0700534 t_last = netem_skb_cb(t_skb);
535 if (!last ||
536 t_last->time_to_send > last->time_to_send) {
537 last = t_last;
538 }
539 }
540
Eric Dumazetaec0a402013-06-28 07:40:57 -0700541 if (last) {
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000542 /*
Johannes Naaba13d3102013-01-23 11:36:51 +0000543 * Last packet in queue is reference point (now),
544 * calculate this time bonus and subtract
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000545 * from delay.
546 */
Nik Unger5080f392017-03-13 10:16:58 -0700547 delay -= last->time_to_send - now;
Dave Taht112f9cb2017-11-08 15:12:26 -0800548 delay = max_t(s64, 0, delay);
Nik Unger5080f392017-03-13 10:16:58 -0700549 now = last->time_to_send;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000550 }
Johannes Naaba13d3102013-01-23 11:36:51 +0000551
Yang Yingliang8cfd88d2013-12-25 17:35:14 +0800552 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000553 }
554
Patrick McHardy7c59e252007-03-23 11:27:45 -0700555 cb->time_to_send = now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 ++q->counter;
Eric Dumazet960fb662012-07-03 20:55:21 +0000557 tfifo_enqueue(skb, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 } else {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900559 /*
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700560 * Do re-ordering by putting one out of N packets at the front
561 * of the queue.
562 */
Dave Taht112f9cb2017-11-08 15:12:26 -0800563 cb->time_to_send = ktime_get_ns();
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700564 q->counter = 0;
Jarek Poplawski8ba25da2008-11-02 00:36:03 -0700565
Florian Westphal48da34b2016-09-18 00:57:34 +0200566 netem_enqueue_skb_head(&sch->q, skb);
Hagen Paul Pfeifereb101922012-01-04 17:35:26 +0000567 sch->qstats.requeues++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
Neil Horman6071bd12016-05-02 12:20:15 -0400570finish_segs:
571 if (segs) {
572 while (segs) {
573 skb2 = segs->next;
574 segs->next = NULL;
575 qdisc_skb_cb(segs)->pkt_len = segs->len;
576 last_len = segs->len;
Eric Dumazet520ac302016-06-21 23:16:49 -0700577 rc = qdisc_enqueue(segs, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400578 if (rc != NET_XMIT_SUCCESS) {
579 if (net_xmit_drop_count(rc))
580 qdisc_qstats_drop(sch);
581 } else {
582 nb++;
583 len += last_len;
584 }
585 segs = skb2;
586 }
587 sch->q.qlen += nb;
588 if (nb > 1)
589 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
590 }
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000591 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592}
593
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594static struct sk_buff *netem_dequeue(struct Qdisc *sch)
595{
596 struct netem_sched_data *q = qdisc_priv(sch);
597 struct sk_buff *skb;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700598 struct rb_node *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
Eric Dumazet50612532011-12-28 23:12:02 +0000600tfifo_dequeue:
Florian Westphaled760cb2016-09-18 00:57:33 +0200601 skb = __qdisc_dequeue_head(&sch->q);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700602 if (skb) {
John Fastabend25331d62014-09-28 11:53:29 -0700603 qdisc_qstats_backlog_dec(sch, skb);
Beshay, Joseph0ad2a832015-04-06 18:00:56 +0000604deliver:
Eric Dumazetaec0a402013-06-28 07:40:57 -0700605 qdisc_bstats_update(sch, skb);
606 return skb;
607 }
608 p = rb_first(&q->t_root);
609 if (p) {
Dave Taht112f9cb2017-11-08 15:12:26 -0800610 u64 time_to_send;
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700611
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700612 skb = rb_to_skb(p);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700613
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700614 /* if more time remaining? */
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700615 time_to_send = netem_skb_cb(skb)->time_to_send;
Dave Taht112f9cb2017-11-08 15:12:26 -0800616 if (time_to_send <= ktime_get_ns()) {
Eric Dumazetaec0a402013-06-28 07:40:57 -0700617 rb_erase(p, &q->t_root);
618
619 sch->q.qlen--;
Beshay, Joseph0ad2a832015-04-06 18:00:56 +0000620 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700621 skb->next = NULL;
622 skb->prev = NULL;
Eric Dumazetbffa72c2017-09-19 05:14:24 -0700623 /* skb->dev shares skb->rbnode area,
624 * we need to restore its value.
625 */
626 skb->dev = qdisc_dev(sch);
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700627
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000628#ifdef CONFIG_NET_CLS_ACT
629 /*
630 * If it's at ingress let's pretend the delay is
631 * from the network (tstamp will be updated).
632 */
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500633 if (skb->tc_redirected && skb->tc_from_ingress)
Thomas Gleixner2456e852016-12-25 11:38:40 +0100634 skb->tstamp = 0;
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000635#endif
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000636
Eric Dumazet50612532011-12-28 23:12:02 +0000637 if (q->qdisc) {
Eric Dumazet21de12e2016-06-20 15:00:43 -0700638 unsigned int pkt_len = qdisc_pkt_len(skb);
Eric Dumazet520ac302016-06-21 23:16:49 -0700639 struct sk_buff *to_free = NULL;
640 int err;
Eric Dumazet50612532011-12-28 23:12:02 +0000641
Eric Dumazet520ac302016-06-21 23:16:49 -0700642 err = qdisc_enqueue(skb, q->qdisc, &to_free);
643 kfree_skb_list(to_free);
Eric Dumazet21de12e2016-06-20 15:00:43 -0700644 if (err != NET_XMIT_SUCCESS &&
645 net_xmit_drop_count(err)) {
646 qdisc_qstats_drop(sch);
647 qdisc_tree_reduce_backlog(sch, 1,
648 pkt_len);
Eric Dumazet50612532011-12-28 23:12:02 +0000649 }
650 goto tfifo_dequeue;
651 }
Eric Dumazetaec0a402013-06-28 07:40:57 -0700652 goto deliver;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700653 }
Stephen Hemminger11274e52007-03-22 12:17:42 -0700654
Eric Dumazet50612532011-12-28 23:12:02 +0000655 if (q->qdisc) {
656 skb = q->qdisc->ops->dequeue(q->qdisc);
657 if (skb)
658 goto deliver;
659 }
Dave Taht112f9cb2017-11-08 15:12:26 -0800660 qdisc_watchdog_schedule_ns(&q->watchdog, time_to_send);
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700661 }
662
Eric Dumazet50612532011-12-28 23:12:02 +0000663 if (q->qdisc) {
664 skb = q->qdisc->ops->dequeue(q->qdisc);
665 if (skb)
666 goto deliver;
667 }
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700668 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669}
670
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671static void netem_reset(struct Qdisc *sch)
672{
673 struct netem_sched_data *q = qdisc_priv(sch);
674
Eric Dumazet50612532011-12-28 23:12:02 +0000675 qdisc_reset_queue(sch);
stephen hemmingerff704052013-10-06 15:16:49 -0700676 tfifo_reset(sch);
Eric Dumazet50612532011-12-28 23:12:02 +0000677 if (q->qdisc)
678 qdisc_reset(q->qdisc);
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700679 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680}
681
stephen hemminger6373a9a2011-02-23 13:04:18 +0000682static void dist_free(struct disttable *d)
683{
WANG Cong4cb28972014-06-02 15:55:22 -0700684 kvfree(d);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000685}
686
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687/*
688 * Distribution data is a variable size payload containing
689 * signed 16 bit values.
690 */
Patrick McHardy1e904742008-01-22 22:11:17 -0800691static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692{
693 struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000694 size_t n = nla_len(attr)/sizeof(__s16);
Patrick McHardy1e904742008-01-22 22:11:17 -0800695 const __s16 *data = nla_data(attr);
David S. Miller7698b4f2008-07-16 01:42:40 -0700696 spinlock_t *root_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 struct disttable *d;
698 int i;
699
stephen hemmingerdf173bd2011-02-23 13:04:19 +0000700 if (n > NETEM_DIST_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 return -EINVAL;
702
Michal Hocko752ade62017-05-08 15:57:27 -0700703 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 if (!d)
705 return -ENOMEM;
706
707 d->size = n;
708 for (i = 0; i < n; i++)
709 d->table[i] = data[i];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900710
Jarek Poplawski102396a2008-08-29 14:21:52 -0700711 root_lock = qdisc_root_sleeping_lock(sch);
David S. Miller7698b4f2008-07-16 01:42:40 -0700712
713 spin_lock_bh(root_lock);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000714 swap(q->delay_dist, d);
David S. Miller7698b4f2008-07-16 01:42:40 -0700715 spin_unlock_bh(root_lock);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000716
717 dist_free(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 return 0;
719}
720
Yang Yingliang49545a772014-02-14 10:30:42 +0800721static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722{
Patrick McHardy1e904742008-01-22 22:11:17 -0800723 const struct tc_netem_corr *c = nla_data(attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 init_crandom(&q->delay_cor, c->delay_corr);
726 init_crandom(&q->loss_cor, c->loss_corr);
727 init_crandom(&q->dup_cor, c->dup_corr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728}
729
Yang Yingliang49545a772014-02-14 10:30:42 +0800730static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700731{
Patrick McHardy1e904742008-01-22 22:11:17 -0800732 const struct tc_netem_reorder *r = nla_data(attr);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700733
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700734 q->reorder = r->probability;
735 init_crandom(&q->reorder_cor, r->correlation);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700736}
737
Yang Yingliang49545a772014-02-14 10:30:42 +0800738static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800739{
Patrick McHardy1e904742008-01-22 22:11:17 -0800740 const struct tc_netem_corrupt *r = nla_data(attr);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800741
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800742 q->corrupt = r->probability;
743 init_crandom(&q->corrupt_cor, r->correlation);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800744}
745
Yang Yingliang49545a772014-02-14 10:30:42 +0800746static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000747{
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000748 const struct tc_netem_rate *r = nla_data(attr);
749
750 q->rate = r->rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000751 q->packet_overhead = r->packet_overhead;
752 q->cell_size = r->cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100753 q->cell_overhead = r->cell_overhead;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000754 if (q->cell_size)
755 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100756 else
757 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000758}
759
Yang Yingliang49545a772014-02-14 10:30:42 +0800760static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
stephen hemminger661b7972011-02-23 13:04:21 +0000761{
stephen hemminger661b7972011-02-23 13:04:21 +0000762 const struct nlattr *la;
763 int rem;
764
765 nla_for_each_nested(la, attr, rem) {
766 u16 type = nla_type(la);
767
Yang Yingliang833fa742013-12-10 20:55:32 +0800768 switch (type) {
stephen hemminger661b7972011-02-23 13:04:21 +0000769 case NETEM_LOSS_GI: {
770 const struct tc_netem_gimodel *gi = nla_data(la);
771
stephen hemminger24946542011-12-23 09:16:30 +0000772 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
stephen hemminger661b7972011-02-23 13:04:21 +0000773 pr_info("netem: incorrect gi model size\n");
774 return -EINVAL;
775 }
776
777 q->loss_model = CLG_4_STATES;
778
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800779 q->clg.state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000780 q->clg.a1 = gi->p13;
781 q->clg.a2 = gi->p31;
782 q->clg.a3 = gi->p32;
783 q->clg.a4 = gi->p14;
784 q->clg.a5 = gi->p23;
785 break;
786 }
787
788 case NETEM_LOSS_GE: {
789 const struct tc_netem_gemodel *ge = nla_data(la);
790
stephen hemminger24946542011-12-23 09:16:30 +0000791 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
792 pr_info("netem: incorrect ge model size\n");
stephen hemminger661b7972011-02-23 13:04:21 +0000793 return -EINVAL;
794 }
795
796 q->loss_model = CLG_GILB_ELL;
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800797 q->clg.state = GOOD_STATE;
stephen hemminger661b7972011-02-23 13:04:21 +0000798 q->clg.a1 = ge->p;
799 q->clg.a2 = ge->r;
800 q->clg.a3 = ge->h;
801 q->clg.a4 = ge->k1;
802 break;
803 }
804
805 default:
806 pr_info("netem: unknown loss type %u\n", type);
807 return -EINVAL;
808 }
809 }
810
811 return 0;
812}
813
Patrick McHardy27a34212008-01-23 20:35:39 -0800814static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
815 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
816 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
817 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000818 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
stephen hemminger661b7972011-02-23 13:04:21 +0000819 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
Eric Dumazete4ae0042012-04-30 23:11:05 +0000820 [TCA_NETEM_ECN] = { .type = NLA_U32 },
Yang Yingliang6a031f62013-12-25 17:35:15 +0800821 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
Dave Taht99803172017-11-08 15:12:27 -0800822 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
823 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
Patrick McHardy27a34212008-01-23 20:35:39 -0800824};
825
Thomas Graf2c10b322008-09-02 17:30:27 -0700826static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
827 const struct nla_policy *policy, int len)
828{
829 int nested_len = nla_len(nla) - NLA_ALIGN(len);
830
stephen hemminger661b7972011-02-23 13:04:21 +0000831 if (nested_len < 0) {
832 pr_info("netem: invalid attributes len %d\n", nested_len);
Thomas Graf2c10b322008-09-02 17:30:27 -0700833 return -EINVAL;
stephen hemminger661b7972011-02-23 13:04:21 +0000834 }
835
Thomas Graf2c10b322008-09-02 17:30:27 -0700836 if (nested_len >= nla_attr_size(0))
837 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
Johannes Bergfceb6432017-04-12 14:34:07 +0200838 nested_len, policy, NULL);
stephen hemminger661b7972011-02-23 13:04:21 +0000839
Thomas Graf2c10b322008-09-02 17:30:27 -0700840 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
841 return 0;
842}
843
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800844/* Parse netlink message to set options */
Patrick McHardy1e904742008-01-22 22:11:17 -0800845static int netem_change(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846{
847 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardyb03f4672008-01-23 20:32:21 -0800848 struct nlattr *tb[TCA_NETEM_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 struct tc_netem_qopt *qopt;
Yang Yingliang54a4b052014-02-14 10:30:41 +0800850 struct clgstate old_clg;
851 int old_loss_model = CLG_RANDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 int ret;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900853
Patrick McHardyb03f4672008-01-23 20:32:21 -0800854 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 return -EINVAL;
856
Thomas Graf2c10b322008-09-02 17:30:27 -0700857 qopt = nla_data(opt);
858 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
Patrick McHardyb03f4672008-01-23 20:32:21 -0800859 if (ret < 0)
860 return ret;
861
Yang Yingliang54a4b052014-02-14 10:30:41 +0800862 /* backup q->clg and q->loss_model */
863 old_clg = q->clg;
864 old_loss_model = q->loss_model;
865
866 if (tb[TCA_NETEM_LOSS]) {
Yang Yingliang49545a772014-02-14 10:30:42 +0800867 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
Yang Yingliang54a4b052014-02-14 10:30:41 +0800868 if (ret) {
869 q->loss_model = old_loss_model;
870 return ret;
871 }
872 } else {
873 q->loss_model = CLG_RANDOM;
874 }
875
876 if (tb[TCA_NETEM_DELAY_DIST]) {
877 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
878 if (ret) {
879 /* recover clg and loss_model, in case of
880 * q->clg and q->loss_model were modified
881 * in get_loss_clg()
882 */
883 q->clg = old_clg;
884 q->loss_model = old_loss_model;
885 return ret;
886 }
887 }
888
Eric Dumazet50612532011-12-28 23:12:02 +0000889 sch->limit = qopt->limit;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900890
Dave Taht112f9cb2017-11-08 15:12:26 -0800891 q->latency = PSCHED_TICKS2NS(qopt->latency);
892 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 q->limit = qopt->limit;
894 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700895 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 q->loss = qopt->loss;
897 q->duplicate = qopt->duplicate;
898
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700899 /* for compatibility with earlier versions.
900 * if gap is set, need to assume 100% probability
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700901 */
Stephen Hemmingera362e0a2007-03-22 12:15:45 -0700902 if (q->gap)
903 q->reorder = ~0;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700904
Stephen Hemminger265eb672008-11-03 21:13:26 -0800905 if (tb[TCA_NETEM_CORR])
Yang Yingliang49545a772014-02-14 10:30:42 +0800906 get_correlation(q, tb[TCA_NETEM_CORR]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907
Stephen Hemminger265eb672008-11-03 21:13:26 -0800908 if (tb[TCA_NETEM_REORDER])
Yang Yingliang49545a772014-02-14 10:30:42 +0800909 get_reorder(q, tb[TCA_NETEM_REORDER]);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800910
Stephen Hemminger265eb672008-11-03 21:13:26 -0800911 if (tb[TCA_NETEM_CORRUPT])
Yang Yingliang49545a772014-02-14 10:30:42 +0800912 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000914 if (tb[TCA_NETEM_RATE])
Yang Yingliang49545a772014-02-14 10:30:42 +0800915 get_rate(q, tb[TCA_NETEM_RATE]);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000916
Yang Yingliang6a031f62013-12-25 17:35:15 +0800917 if (tb[TCA_NETEM_RATE64])
918 q->rate = max_t(u64, q->rate,
919 nla_get_u64(tb[TCA_NETEM_RATE64]));
920
Dave Taht99803172017-11-08 15:12:27 -0800921 if (tb[TCA_NETEM_LATENCY64])
922 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
923
924 if (tb[TCA_NETEM_JITTER64])
925 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
926
Eric Dumazete4ae0042012-04-30 23:11:05 +0000927 if (tb[TCA_NETEM_ECN])
928 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
929
stephen hemminger661b7972011-02-23 13:04:21 +0000930 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931}
932
Patrick McHardy1e904742008-01-22 22:11:17 -0800933static int netem_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934{
935 struct netem_sched_data *q = qdisc_priv(sch);
936 int ret;
937
Nikolay Aleksandrov634576a2017-08-30 12:49:03 +0300938 qdisc_watchdog_init(&q->watchdog, sch);
939
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 if (!opt)
941 return -EINVAL;
942
stephen hemminger661b7972011-02-23 13:04:21 +0000943 q->loss_model = CLG_RANDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 ret = netem_change(sch, opt);
Eric Dumazet50612532011-12-28 23:12:02 +0000945 if (ret)
stephen hemminger250a65f2011-02-23 13:04:22 +0000946 pr_info("netem: change failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 return ret;
948}
949
950static void netem_destroy(struct Qdisc *sch)
951{
952 struct netem_sched_data *q = qdisc_priv(sch);
953
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700954 qdisc_watchdog_cancel(&q->watchdog);
Eric Dumazet50612532011-12-28 23:12:02 +0000955 if (q->qdisc)
956 qdisc_destroy(q->qdisc);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000957 dist_free(q->delay_dist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958}
959
stephen hemminger661b7972011-02-23 13:04:21 +0000960static int dump_loss_model(const struct netem_sched_data *q,
961 struct sk_buff *skb)
962{
963 struct nlattr *nest;
964
965 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
966 if (nest == NULL)
967 goto nla_put_failure;
968
969 switch (q->loss_model) {
970 case CLG_RANDOM:
971 /* legacy loss model */
972 nla_nest_cancel(skb, nest);
973 return 0; /* no data */
974
975 case CLG_4_STATES: {
976 struct tc_netem_gimodel gi = {
977 .p13 = q->clg.a1,
978 .p31 = q->clg.a2,
979 .p32 = q->clg.a3,
980 .p14 = q->clg.a4,
981 .p23 = q->clg.a5,
982 };
983
David S. Miller1b34ec42012-03-29 05:11:39 -0400984 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
985 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +0000986 break;
987 }
988 case CLG_GILB_ELL: {
989 struct tc_netem_gemodel ge = {
990 .p = q->clg.a1,
991 .r = q->clg.a2,
992 .h = q->clg.a3,
993 .k1 = q->clg.a4,
994 };
995
David S. Miller1b34ec42012-03-29 05:11:39 -0400996 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
997 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +0000998 break;
999 }
1000 }
1001
1002 nla_nest_end(skb, nest);
1003 return 0;
1004
1005nla_put_failure:
1006 nla_nest_cancel(skb, nest);
1007 return -1;
1008}
1009
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1011{
1012 const struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger861d7f72011-02-23 13:04:17 +00001013 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 struct tc_netem_qopt qopt;
1015 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001016 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001017 struct tc_netem_corrupt corrupt;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001018 struct tc_netem_rate rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019
Dave Taht112f9cb2017-11-08 15:12:26 -08001020 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1021 UINT_MAX);
1022 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1023 UINT_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 qopt.limit = q->limit;
1025 qopt.loss = q->loss;
1026 qopt.gap = q->gap;
1027 qopt.duplicate = q->duplicate;
David S. Miller1b34ec42012-03-29 05:11:39 -04001028 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1029 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
Dave Taht99803172017-11-08 15:12:27 -08001031 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1032 goto nla_put_failure;
1033
1034 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1035 goto nla_put_failure;
1036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 cor.delay_corr = q->delay_cor.rho;
1038 cor.loss_corr = q->loss_cor.rho;
1039 cor.dup_corr = q->dup_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001040 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1041 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001042
1043 reorder.probability = q->reorder;
1044 reorder.correlation = q->reorder_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001045 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1046 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001047
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001048 corrupt.probability = q->corrupt;
1049 corrupt.correlation = q->corrupt_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001050 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1051 goto nla_put_failure;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001052
Yang Yingliang6a031f62013-12-25 17:35:15 +08001053 if (q->rate >= (1ULL << 32)) {
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001054 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1055 TCA_NETEM_PAD))
Yang Yingliang6a031f62013-12-25 17:35:15 +08001056 goto nla_put_failure;
1057 rate.rate = ~0U;
1058 } else {
1059 rate.rate = q->rate;
1060 }
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +00001061 rate.packet_overhead = q->packet_overhead;
1062 rate.cell_size = q->cell_size;
1063 rate.cell_overhead = q->cell_overhead;
David S. Miller1b34ec42012-03-29 05:11:39 -04001064 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1065 goto nla_put_failure;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001066
Eric Dumazete4ae0042012-04-30 23:11:05 +00001067 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1068 goto nla_put_failure;
1069
stephen hemminger661b7972011-02-23 13:04:21 +00001070 if (dump_loss_model(q, skb) != 0)
1071 goto nla_put_failure;
1072
stephen hemminger861d7f72011-02-23 13:04:17 +00001073 return nla_nest_end(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
Patrick McHardy1e904742008-01-22 22:11:17 -08001075nla_put_failure:
stephen hemminger861d7f72011-02-23 13:04:17 +00001076 nlmsg_trim(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 return -1;
1078}
1079
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001080static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1081 struct sk_buff *skb, struct tcmsg *tcm)
1082{
1083 struct netem_sched_data *q = qdisc_priv(sch);
1084
Eric Dumazet50612532011-12-28 23:12:02 +00001085 if (cl != 1 || !q->qdisc) /* only one class */
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001086 return -ENOENT;
1087
1088 tcm->tcm_handle |= TC_H_MIN(1);
1089 tcm->tcm_info = q->qdisc->handle;
1090
1091 return 0;
1092}
1093
1094static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1095 struct Qdisc **old)
1096{
1097 struct netem_sched_data *q = qdisc_priv(sch);
1098
WANG Cong86a79962016-02-25 14:55:00 -08001099 *old = qdisc_replace(sch, new, &q->qdisc);
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001100 return 0;
1101}
1102
1103static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1104{
1105 struct netem_sched_data *q = qdisc_priv(sch);
1106 return q->qdisc;
1107}
1108
WANG Cong143976c2017-08-24 16:51:29 -07001109static unsigned long netem_find(struct Qdisc *sch, u32 classid)
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001110{
1111 return 1;
1112}
1113
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001114static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1115{
1116 if (!walker->stop) {
1117 if (walker->count >= walker->skip)
1118 if (walker->fn(sch, 1, walker) < 0) {
1119 walker->stop = 1;
1120 return;
1121 }
1122 walker->count++;
1123 }
1124}
1125
1126static const struct Qdisc_class_ops netem_class_ops = {
1127 .graft = netem_graft,
1128 .leaf = netem_leaf,
WANG Cong143976c2017-08-24 16:51:29 -07001129 .find = netem_find,
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001130 .walk = netem_walk,
1131 .dump = netem_dump_class,
1132};
1133
Eric Dumazet20fea082007-11-14 01:44:41 -08001134static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 .id = "netem",
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001136 .cl_ops = &netem_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 .priv_size = sizeof(struct netem_sched_data),
1138 .enqueue = netem_enqueue,
1139 .dequeue = netem_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07001140 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 .init = netem_init,
1142 .reset = netem_reset,
1143 .destroy = netem_destroy,
1144 .change = netem_change,
1145 .dump = netem_dump,
1146 .owner = THIS_MODULE,
1147};
1148
1149
1150static int __init netem_module_init(void)
1151{
Stephen Hemmingereb229c42005-11-03 13:49:01 -08001152 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 return register_qdisc(&netem_qdisc_ops);
1154}
1155static void __exit netem_module_exit(void)
1156{
1157 unregister_qdisc(&netem_qdisc_ops);
1158}
1159module_init(netem_module_init)
1160module_exit(netem_module_exit)
1161MODULE_LICENSE("GPL");