blob: 1b3dd6190e9386c6f8104153eb9fdc0255e03ac5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
Stephen Hemminger798b6b12006-10-22 20:16:57 -07007 * 2 of the License.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Many of the algorithms and ideas for this came from
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * NIST Net which is not copyrighted.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000016#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/skbuff.h>
David S. Miller78776d32011-02-24 22:48:13 -080023#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/rtnetlink.h>
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000025#include <linux/reciprocal_div.h>
Eric Dumazetaec0a402013-06-28 07:40:57 -070026#include <linux/rbtree.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070028#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <net/pkt_sched.h>
Eric Dumazete4ae0042012-04-30 23:11:05 +000030#include <net/inet_ecn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
stephen hemminger250a65f2011-02-23 13:04:22 +000032#define VERSION "1.3"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
stephen hemminger661b7972011-02-23 13:04:21 +000055
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069*/
70
71struct netem_sched_data {
Eric Dumazetaec0a402013-06-28 07:40:57 -070072 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
Eric Dumazet50612532011-12-28 23:12:02 +000074
75 /* optional qdisc for classful handling (NULL at netem init) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 struct Qdisc *qdisc;
Eric Dumazet50612532011-12-28 23:12:02 +000077
Patrick McHardy59cb5c62007-03-16 01:20:31 -070078 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Stephen Hemmingerb4076212007-03-22 12:16:21 -070080 psched_tdiff_t latency;
81 psched_tdiff_t jitter;
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 u32 loss;
Eric Dumazete4ae0042012-04-30 23:11:05 +000084 u32 ecn;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 u32 limit;
86 u32 counter;
87 u32 gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070089 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080090 u32 corrupt;
Yang Yingliang6a031f62013-12-25 17:35:15 +080091 u64 rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000092 s32 packet_overhead;
93 u32 cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +010094 struct reciprocal_value cell_size_reciprocal;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000095 s32 cell_overhead;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97 struct crndstate {
Stephen Hemmingerb4076212007-03-22 12:16:21 -070098 u32 last;
99 u32 rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
stephen hemminger661b7972011-02-23 13:04:21 +0000106
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
112
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
119
Yang Yingliangc045a732014-02-14 10:30:43 +0800120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
124
stephen hemminger661b7972011-02-23 13:04:21 +0000125 /* Correlated Loss Generation models */
126 struct clgstate {
127 /* state of the Markov chain */
128 u8 state;
129
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
136 } clg;
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138};
139
Eric Dumazet50612532011-12-28 23:12:02 +0000140/* Time stamp put into socket buffer control block
141 * Only valid when skbs are in our internal t(ime)fifo queue.
Eric Dumazet56b17422014-11-03 08:19:53 -0800142 *
143 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
144 * and skb->next & skb->prev are scratch space for a qdisc,
145 * we save skb->tstamp value in skb->cb[] before destroying it.
Eric Dumazet50612532011-12-28 23:12:02 +0000146 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147struct netem_skb_cb {
148 psched_time_t time_to_send;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700149 ktime_t tstamp_save;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150};
151
Eric Dumazetaec0a402013-06-28 07:40:57 -0700152
153static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
154{
Geliang Tang7f7cd562016-12-20 22:02:16 +0800155 return rb_entry(rb, struct sk_buff, rbnode);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700156}
157
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700158static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
159{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
David S. Miller16bda132012-02-06 15:14:37 -0500161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700163}
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165/* init_crandom - initialize correlated random number generator
166 * Use entropy source for initial seed.
167 */
168static void init_crandom(struct crndstate *state, unsigned long rho)
169{
170 state->rho = rho;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500171 state->last = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172}
173
174/* get_crandom - correlated random number generator
175 * Next number depends on last value.
176 * rho is scaled to avoid floating point.
177 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700178static u32 get_crandom(struct crndstate *state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
180 u64 value, rho;
181 unsigned long answer;
182
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700183 if (state->rho == 0) /* no correlation */
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500184 return prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500186 value = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 rho = (u64)state->rho + 1;
188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
189 state->last = answer;
190 return answer;
191}
192
stephen hemminger661b7972011-02-23 13:04:21 +0000193/* loss_4state - 4-state model loss generator
194 * Generates losses according to the 4-state Markov chain adopted in
195 * the GI (General and Intuitive) loss model.
196 */
197static bool loss_4state(struct netem_sched_data *q)
198{
199 struct clgstate *clg = &q->clg;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500200 u32 rnd = prandom_u32();
stephen hemminger661b7972011-02-23 13:04:21 +0000201
202 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300203 * Makes a comparison between rnd and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000204 * probabilities outgoing from the current state, then decides the
205 * next state and if the next packet has to be transmitted or lost.
206 * The four states correspond to:
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
209 * LOST_IN_GAP_PERIOD => lost packets within a burst period
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
stephen hemminger661b7972011-02-23 13:04:21 +0000211 */
212 switch (clg->state) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800213 case TX_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000214 if (rnd < clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800215 clg->state = LOST_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000216 return true;
stephen hemmingerab6c27b2013-11-29 11:03:35 -0800217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800218 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000219 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800220 } else if (clg->a1 + clg->a4 < rnd) {
221 clg->state = TX_IN_GAP_PERIOD;
222 }
stephen hemminger661b7972011-02-23 13:04:21 +0000223
224 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800225 case TX_IN_BURST_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000226 if (rnd < clg->a5) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800227 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000228 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800229 } else {
230 clg->state = TX_IN_BURST_PERIOD;
231 }
stephen hemminger661b7972011-02-23 13:04:21 +0000232
233 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800234 case LOST_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000235 if (rnd < clg->a3)
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800236 clg->state = TX_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800238 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000239 } else if (clg->a2 + clg->a3 < rnd) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800240 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000241 return true;
242 }
243 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800244 case LOST_IN_BURST_PERIOD:
245 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000246 break;
247 }
248
249 return false;
250}
251
252/* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
255 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300256 * Makes a comparison between random number and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000257 * probabilities outgoing from the current state, then decides the
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300258 * next state. A second random number is extracted and the comparison
stephen hemminger661b7972011-02-23 13:04:21 +0000259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
261 */
262static bool loss_gilb_ell(struct netem_sched_data *q)
263{
264 struct clgstate *clg = &q->clg;
265
266 switch (clg->state) {
Yang Yingliangc045a732014-02-14 10:30:43 +0800267 case GOOD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500268 if (prandom_u32() < clg->a1)
Yang Yingliangc045a732014-02-14 10:30:43 +0800269 clg->state = BAD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500270 if (prandom_u32() < clg->a4)
stephen hemminger661b7972011-02-23 13:04:21 +0000271 return true;
stephen hemminger7c2781f2013-11-29 11:02:43 -0800272 break;
Yang Yingliangc045a732014-02-14 10:30:43 +0800273 case BAD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500274 if (prandom_u32() < clg->a2)
Yang Yingliangc045a732014-02-14 10:30:43 +0800275 clg->state = GOOD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500276 if (prandom_u32() > clg->a3)
stephen hemminger661b7972011-02-23 13:04:21 +0000277 return true;
278 }
279
280 return false;
281}
282
283static bool loss_event(struct netem_sched_data *q)
284{
285 switch (q->loss_model) {
286 case CLG_RANDOM:
287 /* Random packet drop 0 => none, ~0 => all */
288 return q->loss && q->loss >= get_crandom(&q->loss_cor);
289
290 case CLG_4_STATES:
291 /* 4state loss model algorithm (used also for GI model)
292 * Extracts a value from the markov 4 state loss generator,
293 * if it is 1 drops a packet and if needed writes the event in
294 * the kernel logs
295 */
296 return loss_4state(q);
297
298 case CLG_GILB_ELL:
299 /* Gilbert-Elliot loss model algorithm
300 * Extracts a value from the Gilbert-Elliot loss generator,
301 * if it is 1 drops a packet and if needed writes the event in
302 * the kernel logs
303 */
304 return loss_gilb_ell(q);
305 }
306
307 return false; /* not reached */
308}
309
310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311/* tabledist - return a pseudo-randomly distributed value with mean mu and
312 * std deviation sigma. Uses table lookup to approximate the desired
313 * distribution, and a uniformly-distributed pseudo-random source.
314 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700315static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
316 struct crndstate *state,
317 const struct disttable *dist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700319 psched_tdiff_t x;
320 long t;
321 u32 rnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
323 if (sigma == 0)
324 return mu;
325
326 rnd = get_crandom(state);
327
328 /* default uniform distribution */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900329 if (dist == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 return (rnd % (2*sigma)) - sigma + mu;
331
332 t = dist->table[rnd % dist->size];
333 x = (sigma % NETEM_DIST_SCALE) * t;
334 if (x >= 0)
335 x += NETEM_DIST_SCALE/2;
336 else
337 x -= NETEM_DIST_SCALE/2;
338
339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
340}
341
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000342static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000343{
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000344 u64 ticks;
Eric Dumazetfc33cc72011-11-30 23:32:14 +0000345
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000346 len += q->packet_overhead;
347
348 if (q->cell_size) {
349 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
350
351 if (len > cells * q->cell_size) /* extra cell needed for remainder */
352 cells++;
353 len = cells * (q->cell_size + q->cell_overhead);
354 }
355
356 ticks = (u64)len * NSEC_PER_SEC;
357
358 do_div(ticks, q->rate);
Eric Dumazetfc33cc72011-11-30 23:32:14 +0000359 return PSCHED_NS2TICKS(ticks);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000360}
361
stephen hemmingerff704052013-10-06 15:16:49 -0700362static void tfifo_reset(struct Qdisc *sch)
363{
364 struct netem_sched_data *q = qdisc_priv(sch);
365 struct rb_node *p;
366
367 while ((p = rb_first(&q->t_root))) {
368 struct sk_buff *skb = netem_rb_to_skb(p);
369
370 rb_erase(p, &q->t_root);
Eric Dumazet2f08a9a2016-06-13 20:21:57 -0700371 rtnl_kfree_skbs(skb, skb);
stephen hemmingerff704052013-10-06 15:16:49 -0700372 }
373}
374
Eric Dumazet960fb662012-07-03 20:55:21 +0000375static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
Eric Dumazet50612532011-12-28 23:12:02 +0000376{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700377 struct netem_sched_data *q = qdisc_priv(sch);
Eric Dumazet50612532011-12-28 23:12:02 +0000378 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700379 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
Eric Dumazet50612532011-12-28 23:12:02 +0000380
Eric Dumazetaec0a402013-06-28 07:40:57 -0700381 while (*p) {
382 struct sk_buff *skb;
Eric Dumazet50612532011-12-28 23:12:02 +0000383
Eric Dumazetaec0a402013-06-28 07:40:57 -0700384 parent = *p;
385 skb = netem_rb_to_skb(parent);
Eric Dumazet960fb662012-07-03 20:55:21 +0000386 if (tnext >= netem_skb_cb(skb)->time_to_send)
Eric Dumazetaec0a402013-06-28 07:40:57 -0700387 p = &parent->rb_right;
388 else
389 p = &parent->rb_left;
Eric Dumazet50612532011-12-28 23:12:02 +0000390 }
Eric Dumazet56b17422014-11-03 08:19:53 -0800391 rb_link_node(&nskb->rbnode, parent, p);
392 rb_insert_color(&nskb->rbnode, &q->t_root);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700393 sch->q.qlen++;
Eric Dumazet50612532011-12-28 23:12:02 +0000394}
395
Neil Horman6071bd12016-05-02 12:20:15 -0400396/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
397 * when we statistically choose to corrupt one, we instead segment it, returning
398 * the first packet to be corrupted, and re-enqueue the remaining frames
399 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700400static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
401 struct sk_buff **to_free)
Neil Horman6071bd12016-05-02 12:20:15 -0400402{
403 struct sk_buff *segs;
404 netdev_features_t features = netif_skb_features(skb);
405
406 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
407
408 if (IS_ERR_OR_NULL(segs)) {
Eric Dumazet520ac302016-06-21 23:16:49 -0700409 qdisc_drop(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400410 return NULL;
411 }
412 consume_skb(skb);
413 return segs;
414}
415
Florian Westphal48da34b2016-09-18 00:57:34 +0200416static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
417{
418 skb->next = qh->head;
419
420 if (!qh->head)
421 qh->tail = skb;
422 qh->head = skb;
423 qh->qlen++;
424}
425
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700426/*
427 * Insert one skb into qdisc.
428 * Note: parent depends on return value to account for queue length.
429 * NET_XMIT_DROP: queue length didn't change.
430 * NET_XMIT_SUCCESS: one skb was queued.
431 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700432static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
433 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434{
435 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700436 /* We don't fill cb now as skb_unshare() may invalidate it */
437 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700438 struct sk_buff *skb2;
Neil Horman6071bd12016-05-02 12:20:15 -0400439 struct sk_buff *segs = NULL;
440 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
441 int nb = 0;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700442 int count = 1;
Neil Horman6071bd12016-05-02 12:20:15 -0400443 int rc = NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700445 /* Random duplication */
446 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
447 ++count;
448
stephen hemminger661b7972011-02-23 13:04:21 +0000449 /* Drop packet? */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000450 if (loss_event(q)) {
451 if (q->ecn && INET_ECN_set_ce(skb))
John Fastabend25331d62014-09-28 11:53:29 -0700452 qdisc_qstats_drop(sch); /* mark packet */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000453 else
454 --count;
455 }
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700456 if (count == 0) {
John Fastabend25331d62014-09-28 11:53:29 -0700457 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700458 __qdisc_drop(skb, to_free);
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700459 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 }
461
Eric Dumazet5a308f42012-07-14 03:16:27 +0000462 /* If a delay is expected, orphan the skb. (orphaning usually takes
463 * place at TX completion time, so _before_ the link transit delay)
Eric Dumazet5a308f42012-07-14 03:16:27 +0000464 */
Nik Unger5080f392017-03-13 10:16:58 -0700465 if (q->latency || q->jitter || q->rate)
Eric Dumazetf2f872f2013-07-30 17:55:08 -0700466 skb_orphan_partial(skb);
David S. Miller4e8a5202006-10-22 21:00:33 -0700467
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700468 /*
469 * If we need to duplicate packet, then re-insert at top of the
470 * qdisc tree, since parent queuer expects that only one
471 * skb will be queued.
472 */
473 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
David S. Miller7698b4f2008-07-16 01:42:40 -0700474 struct Qdisc *rootq = qdisc_root(sch);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700475 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Eric Dumazetb396cca2015-05-11 09:06:56 -0700477 q->duplicate = 0;
Eric Dumazet520ac302016-06-21 23:16:49 -0700478 rootq->enqueue(skb2, rootq, to_free);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700479 q->duplicate = dupsave;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 }
481
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800482 /*
483 * Randomized packet corruption.
484 * Make copy if needed since we are modifying
485 * If packet is going to be hardware checksummed, then
486 * do it now in software before we mangle it.
487 */
488 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
Neil Horman6071bd12016-05-02 12:20:15 -0400489 if (skb_is_gso(skb)) {
Eric Dumazet520ac302016-06-21 23:16:49 -0700490 segs = netem_segment(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400491 if (!segs)
492 return NET_XMIT_DROP;
493 } else {
494 segs = skb;
495 }
496
497 skb = segs;
498 segs = segs->next;
499
Eric Dumazet8a6e9c62016-06-28 10:30:08 +0200500 skb = skb_unshare(skb, GFP_ATOMIC);
501 if (unlikely(!skb)) {
502 qdisc_qstats_drop(sch);
503 goto finish_segs;
504 }
505 if (skb->ip_summed == CHECKSUM_PARTIAL &&
506 skb_checksum_help(skb)) {
507 qdisc_drop(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400508 goto finish_segs;
509 }
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800510
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500511 skb->data[prandom_u32() % skb_headlen(skb)] ^=
512 1<<(prandom_u32() % 8);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800513 }
514
Florian Westphal97d06782016-09-18 00:57:31 +0200515 if (unlikely(sch->q.qlen >= sch->limit))
Eric Dumazet520ac302016-06-21 23:16:49 -0700516 return qdisc_drop(skb, sch, to_free);
Eric Dumazet960fb662012-07-03 20:55:21 +0000517
John Fastabend25331d62014-09-28 11:53:29 -0700518 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazet960fb662012-07-03 20:55:21 +0000519
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700520 cb = netem_skb_cb(skb);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000521 if (q->gap == 0 || /* not doing reordering */
Vijay Subramaniana42b4792012-01-19 10:20:59 +0000522 q->counter < q->gap - 1 || /* inside last reordering gap */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800523 q->reorder < get_crandom(&q->reorder_cor)) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700524 psched_time_t now;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800525 psched_tdiff_t delay;
526
527 delay = tabledist(q->latency, q->jitter,
528 &q->delay_cor, q->delay_dist);
529
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700530 now = psched_get_time();
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000531
532 if (q->rate) {
Nik Unger5080f392017-03-13 10:16:58 -0700533 struct netem_skb_cb *last = NULL;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000534
Nik Unger5080f392017-03-13 10:16:58 -0700535 if (sch->q.tail)
536 last = netem_skb_cb(sch->q.tail);
537 if (q->t_root.rb_node) {
538 struct sk_buff *t_skb;
539 struct netem_skb_cb *t_last;
540
541 t_skb = netem_rb_to_skb(rb_last(&q->t_root));
542 t_last = netem_skb_cb(t_skb);
543 if (!last ||
544 t_last->time_to_send > last->time_to_send) {
545 last = t_last;
546 }
547 }
548
Eric Dumazetaec0a402013-06-28 07:40:57 -0700549 if (last) {
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000550 /*
Johannes Naaba13d3102013-01-23 11:36:51 +0000551 * Last packet in queue is reference point (now),
552 * calculate this time bonus and subtract
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000553 * from delay.
554 */
Nik Unger5080f392017-03-13 10:16:58 -0700555 delay -= last->time_to_send - now;
Johannes Naaba13d3102013-01-23 11:36:51 +0000556 delay = max_t(psched_tdiff_t, 0, delay);
Nik Unger5080f392017-03-13 10:16:58 -0700557 now = last->time_to_send;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000558 }
Johannes Naaba13d3102013-01-23 11:36:51 +0000559
Yang Yingliang8cfd88d2013-12-25 17:35:14 +0800560 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000561 }
562
Patrick McHardy7c59e252007-03-23 11:27:45 -0700563 cb->time_to_send = now + delay;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700564 cb->tstamp_save = skb->tstamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 ++q->counter;
Eric Dumazet960fb662012-07-03 20:55:21 +0000566 tfifo_enqueue(skb, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 } else {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900568 /*
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700569 * Do re-ordering by putting one out of N packets at the front
570 * of the queue.
571 */
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700572 cb->time_to_send = psched_get_time();
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700573 q->counter = 0;
Jarek Poplawski8ba25da2008-11-02 00:36:03 -0700574
Florian Westphal48da34b2016-09-18 00:57:34 +0200575 netem_enqueue_skb_head(&sch->q, skb);
Hagen Paul Pfeifereb101922012-01-04 17:35:26 +0000576 sch->qstats.requeues++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700577 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
Neil Horman6071bd12016-05-02 12:20:15 -0400579finish_segs:
580 if (segs) {
581 while (segs) {
582 skb2 = segs->next;
583 segs->next = NULL;
584 qdisc_skb_cb(segs)->pkt_len = segs->len;
585 last_len = segs->len;
Eric Dumazet520ac302016-06-21 23:16:49 -0700586 rc = qdisc_enqueue(segs, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400587 if (rc != NET_XMIT_SUCCESS) {
588 if (net_xmit_drop_count(rc))
589 qdisc_qstats_drop(sch);
590 } else {
591 nb++;
592 len += last_len;
593 }
594 segs = skb2;
595 }
596 sch->q.qlen += nb;
597 if (nb > 1)
598 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
599 }
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000600 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601}
602
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603static struct sk_buff *netem_dequeue(struct Qdisc *sch)
604{
605 struct netem_sched_data *q = qdisc_priv(sch);
606 struct sk_buff *skb;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700607 struct rb_node *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
Eric Dumazet50612532011-12-28 23:12:02 +0000609tfifo_dequeue:
Florian Westphaled760cb2016-09-18 00:57:33 +0200610 skb = __qdisc_dequeue_head(&sch->q);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700611 if (skb) {
John Fastabend25331d62014-09-28 11:53:29 -0700612 qdisc_qstats_backlog_dec(sch, skb);
Beshay, Joseph0ad2a832015-04-06 18:00:56 +0000613deliver:
Eric Dumazetaec0a402013-06-28 07:40:57 -0700614 qdisc_bstats_update(sch, skb);
615 return skb;
616 }
617 p = rb_first(&q->t_root);
618 if (p) {
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700619 psched_time_t time_to_send;
620
Eric Dumazetaec0a402013-06-28 07:40:57 -0700621 skb = netem_rb_to_skb(p);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700622
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700623 /* if more time remaining? */
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700624 time_to_send = netem_skb_cb(skb)->time_to_send;
625 if (time_to_send <= psched_get_time()) {
Eric Dumazetaec0a402013-06-28 07:40:57 -0700626 rb_erase(p, &q->t_root);
627
628 sch->q.qlen--;
Beshay, Joseph0ad2a832015-04-06 18:00:56 +0000629 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700630 skb->next = NULL;
631 skb->prev = NULL;
632 skb->tstamp = netem_skb_cb(skb)->tstamp_save;
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700633
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000634#ifdef CONFIG_NET_CLS_ACT
635 /*
636 * If it's at ingress let's pretend the delay is
637 * from the network (tstamp will be updated).
638 */
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500639 if (skb->tc_redirected && skb->tc_from_ingress)
Thomas Gleixner2456e852016-12-25 11:38:40 +0100640 skb->tstamp = 0;
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000641#endif
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000642
Eric Dumazet50612532011-12-28 23:12:02 +0000643 if (q->qdisc) {
Eric Dumazet21de12e2016-06-20 15:00:43 -0700644 unsigned int pkt_len = qdisc_pkt_len(skb);
Eric Dumazet520ac302016-06-21 23:16:49 -0700645 struct sk_buff *to_free = NULL;
646 int err;
Eric Dumazet50612532011-12-28 23:12:02 +0000647
Eric Dumazet520ac302016-06-21 23:16:49 -0700648 err = qdisc_enqueue(skb, q->qdisc, &to_free);
649 kfree_skb_list(to_free);
Eric Dumazet21de12e2016-06-20 15:00:43 -0700650 if (err != NET_XMIT_SUCCESS &&
651 net_xmit_drop_count(err)) {
652 qdisc_qstats_drop(sch);
653 qdisc_tree_reduce_backlog(sch, 1,
654 pkt_len);
Eric Dumazet50612532011-12-28 23:12:02 +0000655 }
656 goto tfifo_dequeue;
657 }
Eric Dumazetaec0a402013-06-28 07:40:57 -0700658 goto deliver;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700659 }
Stephen Hemminger11274e52007-03-22 12:17:42 -0700660
Eric Dumazet50612532011-12-28 23:12:02 +0000661 if (q->qdisc) {
662 skb = q->qdisc->ops->dequeue(q->qdisc);
663 if (skb)
664 goto deliver;
665 }
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700666 qdisc_watchdog_schedule(&q->watchdog, time_to_send);
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700667 }
668
Eric Dumazet50612532011-12-28 23:12:02 +0000669 if (q->qdisc) {
670 skb = q->qdisc->ops->dequeue(q->qdisc);
671 if (skb)
672 goto deliver;
673 }
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700674 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675}
676
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677static void netem_reset(struct Qdisc *sch)
678{
679 struct netem_sched_data *q = qdisc_priv(sch);
680
Eric Dumazet50612532011-12-28 23:12:02 +0000681 qdisc_reset_queue(sch);
stephen hemmingerff704052013-10-06 15:16:49 -0700682 tfifo_reset(sch);
Eric Dumazet50612532011-12-28 23:12:02 +0000683 if (q->qdisc)
684 qdisc_reset(q->qdisc);
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700685 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686}
687
stephen hemminger6373a9a2011-02-23 13:04:18 +0000688static void dist_free(struct disttable *d)
689{
WANG Cong4cb28972014-06-02 15:55:22 -0700690 kvfree(d);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000691}
692
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693/*
694 * Distribution data is a variable size payload containing
695 * signed 16 bit values.
696 */
Patrick McHardy1e904742008-01-22 22:11:17 -0800697static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698{
699 struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000700 size_t n = nla_len(attr)/sizeof(__s16);
Patrick McHardy1e904742008-01-22 22:11:17 -0800701 const __s16 *data = nla_data(attr);
David S. Miller7698b4f2008-07-16 01:42:40 -0700702 spinlock_t *root_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 struct disttable *d;
704 int i;
705
stephen hemmingerdf173bd2011-02-23 13:04:19 +0000706 if (n > NETEM_DIST_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 return -EINVAL;
708
Michal Hocko752ade62017-05-08 15:57:27 -0700709 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 if (!d)
711 return -ENOMEM;
712
713 d->size = n;
714 for (i = 0; i < n; i++)
715 d->table[i] = data[i];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900716
Jarek Poplawski102396a2008-08-29 14:21:52 -0700717 root_lock = qdisc_root_sleeping_lock(sch);
David S. Miller7698b4f2008-07-16 01:42:40 -0700718
719 spin_lock_bh(root_lock);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000720 swap(q->delay_dist, d);
David S. Miller7698b4f2008-07-16 01:42:40 -0700721 spin_unlock_bh(root_lock);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000722
723 dist_free(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 return 0;
725}
726
Yang Yingliang49545a72014-02-14 10:30:42 +0800727static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728{
Patrick McHardy1e904742008-01-22 22:11:17 -0800729 const struct tc_netem_corr *c = nla_data(attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 init_crandom(&q->delay_cor, c->delay_corr);
732 init_crandom(&q->loss_cor, c->loss_corr);
733 init_crandom(&q->dup_cor, c->dup_corr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734}
735
Yang Yingliang49545a72014-02-14 10:30:42 +0800736static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700737{
Patrick McHardy1e904742008-01-22 22:11:17 -0800738 const struct tc_netem_reorder *r = nla_data(attr);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700739
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700740 q->reorder = r->probability;
741 init_crandom(&q->reorder_cor, r->correlation);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700742}
743
Yang Yingliang49545a72014-02-14 10:30:42 +0800744static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800745{
Patrick McHardy1e904742008-01-22 22:11:17 -0800746 const struct tc_netem_corrupt *r = nla_data(attr);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800747
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800748 q->corrupt = r->probability;
749 init_crandom(&q->corrupt_cor, r->correlation);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800750}
751
Yang Yingliang49545a72014-02-14 10:30:42 +0800752static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000753{
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000754 const struct tc_netem_rate *r = nla_data(attr);
755
756 q->rate = r->rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000757 q->packet_overhead = r->packet_overhead;
758 q->cell_size = r->cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100759 q->cell_overhead = r->cell_overhead;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000760 if (q->cell_size)
761 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100762 else
763 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000764}
765
Yang Yingliang49545a72014-02-14 10:30:42 +0800766static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
stephen hemminger661b7972011-02-23 13:04:21 +0000767{
stephen hemminger661b7972011-02-23 13:04:21 +0000768 const struct nlattr *la;
769 int rem;
770
771 nla_for_each_nested(la, attr, rem) {
772 u16 type = nla_type(la);
773
Yang Yingliang833fa742013-12-10 20:55:32 +0800774 switch (type) {
stephen hemminger661b7972011-02-23 13:04:21 +0000775 case NETEM_LOSS_GI: {
776 const struct tc_netem_gimodel *gi = nla_data(la);
777
stephen hemminger24946542011-12-23 09:16:30 +0000778 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
stephen hemminger661b7972011-02-23 13:04:21 +0000779 pr_info("netem: incorrect gi model size\n");
780 return -EINVAL;
781 }
782
783 q->loss_model = CLG_4_STATES;
784
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800785 q->clg.state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000786 q->clg.a1 = gi->p13;
787 q->clg.a2 = gi->p31;
788 q->clg.a3 = gi->p32;
789 q->clg.a4 = gi->p14;
790 q->clg.a5 = gi->p23;
791 break;
792 }
793
794 case NETEM_LOSS_GE: {
795 const struct tc_netem_gemodel *ge = nla_data(la);
796
stephen hemminger24946542011-12-23 09:16:30 +0000797 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
798 pr_info("netem: incorrect ge model size\n");
stephen hemminger661b7972011-02-23 13:04:21 +0000799 return -EINVAL;
800 }
801
802 q->loss_model = CLG_GILB_ELL;
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800803 q->clg.state = GOOD_STATE;
stephen hemminger661b7972011-02-23 13:04:21 +0000804 q->clg.a1 = ge->p;
805 q->clg.a2 = ge->r;
806 q->clg.a3 = ge->h;
807 q->clg.a4 = ge->k1;
808 break;
809 }
810
811 default:
812 pr_info("netem: unknown loss type %u\n", type);
813 return -EINVAL;
814 }
815 }
816
817 return 0;
818}
819
Patrick McHardy27a34212008-01-23 20:35:39 -0800820static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
821 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
822 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
823 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000824 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
stephen hemminger661b7972011-02-23 13:04:21 +0000825 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
Eric Dumazete4ae0042012-04-30 23:11:05 +0000826 [TCA_NETEM_ECN] = { .type = NLA_U32 },
Yang Yingliang6a031f62013-12-25 17:35:15 +0800827 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
Patrick McHardy27a34212008-01-23 20:35:39 -0800828};
829
Thomas Graf2c10b322008-09-02 17:30:27 -0700830static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
831 const struct nla_policy *policy, int len)
832{
833 int nested_len = nla_len(nla) - NLA_ALIGN(len);
834
stephen hemminger661b7972011-02-23 13:04:21 +0000835 if (nested_len < 0) {
836 pr_info("netem: invalid attributes len %d\n", nested_len);
Thomas Graf2c10b322008-09-02 17:30:27 -0700837 return -EINVAL;
stephen hemminger661b7972011-02-23 13:04:21 +0000838 }
839
Thomas Graf2c10b322008-09-02 17:30:27 -0700840 if (nested_len >= nla_attr_size(0))
841 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
Johannes Bergfceb6432017-04-12 14:34:07 +0200842 nested_len, policy, NULL);
stephen hemminger661b7972011-02-23 13:04:21 +0000843
Thomas Graf2c10b322008-09-02 17:30:27 -0700844 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
845 return 0;
846}
847
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800848/* Parse netlink message to set options */
Patrick McHardy1e904742008-01-22 22:11:17 -0800849static int netem_change(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850{
851 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardyb03f4672008-01-23 20:32:21 -0800852 struct nlattr *tb[TCA_NETEM_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 struct tc_netem_qopt *qopt;
Yang Yingliang54a4b052014-02-14 10:30:41 +0800854 struct clgstate old_clg;
855 int old_loss_model = CLG_RANDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 int ret;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900857
Patrick McHardyb03f4672008-01-23 20:32:21 -0800858 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 return -EINVAL;
860
Thomas Graf2c10b322008-09-02 17:30:27 -0700861 qopt = nla_data(opt);
862 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
Patrick McHardyb03f4672008-01-23 20:32:21 -0800863 if (ret < 0)
864 return ret;
865
Yang Yingliang54a4b052014-02-14 10:30:41 +0800866 /* backup q->clg and q->loss_model */
867 old_clg = q->clg;
868 old_loss_model = q->loss_model;
869
870 if (tb[TCA_NETEM_LOSS]) {
Yang Yingliang49545a72014-02-14 10:30:42 +0800871 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
Yang Yingliang54a4b052014-02-14 10:30:41 +0800872 if (ret) {
873 q->loss_model = old_loss_model;
874 return ret;
875 }
876 } else {
877 q->loss_model = CLG_RANDOM;
878 }
879
880 if (tb[TCA_NETEM_DELAY_DIST]) {
881 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
882 if (ret) {
883 /* recover clg and loss_model, in case of
884 * q->clg and q->loss_model were modified
885 * in get_loss_clg()
886 */
887 q->clg = old_clg;
888 q->loss_model = old_loss_model;
889 return ret;
890 }
891 }
892
Eric Dumazet50612532011-12-28 23:12:02 +0000893 sch->limit = qopt->limit;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900894
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 q->latency = qopt->latency;
896 q->jitter = qopt->jitter;
897 q->limit = qopt->limit;
898 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700899 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 q->loss = qopt->loss;
901 q->duplicate = qopt->duplicate;
902
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700903 /* for compatibility with earlier versions.
904 * if gap is set, need to assume 100% probability
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700905 */
Stephen Hemmingera362e0a2007-03-22 12:15:45 -0700906 if (q->gap)
907 q->reorder = ~0;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700908
Stephen Hemminger265eb672008-11-03 21:13:26 -0800909 if (tb[TCA_NETEM_CORR])
Yang Yingliang49545a72014-02-14 10:30:42 +0800910 get_correlation(q, tb[TCA_NETEM_CORR]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Stephen Hemminger265eb672008-11-03 21:13:26 -0800912 if (tb[TCA_NETEM_REORDER])
Yang Yingliang49545a72014-02-14 10:30:42 +0800913 get_reorder(q, tb[TCA_NETEM_REORDER]);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800914
Stephen Hemminger265eb672008-11-03 21:13:26 -0800915 if (tb[TCA_NETEM_CORRUPT])
Yang Yingliang49545a72014-02-14 10:30:42 +0800916 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000918 if (tb[TCA_NETEM_RATE])
Yang Yingliang49545a72014-02-14 10:30:42 +0800919 get_rate(q, tb[TCA_NETEM_RATE]);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000920
Yang Yingliang6a031f62013-12-25 17:35:15 +0800921 if (tb[TCA_NETEM_RATE64])
922 q->rate = max_t(u64, q->rate,
923 nla_get_u64(tb[TCA_NETEM_RATE64]));
924
Eric Dumazete4ae0042012-04-30 23:11:05 +0000925 if (tb[TCA_NETEM_ECN])
926 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
927
stephen hemminger661b7972011-02-23 13:04:21 +0000928 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929}
930
Patrick McHardy1e904742008-01-22 22:11:17 -0800931static int netem_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932{
933 struct netem_sched_data *q = qdisc_priv(sch);
934 int ret;
935
936 if (!opt)
937 return -EINVAL;
938
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700939 qdisc_watchdog_init(&q->watchdog, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
stephen hemminger661b7972011-02-23 13:04:21 +0000941 q->loss_model = CLG_RANDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 ret = netem_change(sch, opt);
Eric Dumazet50612532011-12-28 23:12:02 +0000943 if (ret)
stephen hemminger250a65f2011-02-23 13:04:22 +0000944 pr_info("netem: change failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 return ret;
946}
947
948static void netem_destroy(struct Qdisc *sch)
949{
950 struct netem_sched_data *q = qdisc_priv(sch);
951
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700952 qdisc_watchdog_cancel(&q->watchdog);
Eric Dumazet50612532011-12-28 23:12:02 +0000953 if (q->qdisc)
954 qdisc_destroy(q->qdisc);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000955 dist_free(q->delay_dist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956}
957
stephen hemminger661b7972011-02-23 13:04:21 +0000958static int dump_loss_model(const struct netem_sched_data *q,
959 struct sk_buff *skb)
960{
961 struct nlattr *nest;
962
963 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
964 if (nest == NULL)
965 goto nla_put_failure;
966
967 switch (q->loss_model) {
968 case CLG_RANDOM:
969 /* legacy loss model */
970 nla_nest_cancel(skb, nest);
971 return 0; /* no data */
972
973 case CLG_4_STATES: {
974 struct tc_netem_gimodel gi = {
975 .p13 = q->clg.a1,
976 .p31 = q->clg.a2,
977 .p32 = q->clg.a3,
978 .p14 = q->clg.a4,
979 .p23 = q->clg.a5,
980 };
981
David S. Miller1b34ec42012-03-29 05:11:39 -0400982 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
983 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +0000984 break;
985 }
986 case CLG_GILB_ELL: {
987 struct tc_netem_gemodel ge = {
988 .p = q->clg.a1,
989 .r = q->clg.a2,
990 .h = q->clg.a3,
991 .k1 = q->clg.a4,
992 };
993
David S. Miller1b34ec42012-03-29 05:11:39 -0400994 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
995 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +0000996 break;
997 }
998 }
999
1000 nla_nest_end(skb, nest);
1001 return 0;
1002
1003nla_put_failure:
1004 nla_nest_cancel(skb, nest);
1005 return -1;
1006}
1007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1009{
1010 const struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger861d7f72011-02-23 13:04:17 +00001011 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 struct tc_netem_qopt qopt;
1013 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001014 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001015 struct tc_netem_corrupt corrupt;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001016 struct tc_netem_rate rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
1018 qopt.latency = q->latency;
1019 qopt.jitter = q->jitter;
1020 qopt.limit = q->limit;
1021 qopt.loss = q->loss;
1022 qopt.gap = q->gap;
1023 qopt.duplicate = q->duplicate;
David S. Miller1b34ec42012-03-29 05:11:39 -04001024 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1025 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
1027 cor.delay_corr = q->delay_cor.rho;
1028 cor.loss_corr = q->loss_cor.rho;
1029 cor.dup_corr = q->dup_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001030 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1031 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001032
1033 reorder.probability = q->reorder;
1034 reorder.correlation = q->reorder_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001035 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1036 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001037
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001038 corrupt.probability = q->corrupt;
1039 corrupt.correlation = q->corrupt_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001040 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1041 goto nla_put_failure;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001042
Yang Yingliang6a031f62013-12-25 17:35:15 +08001043 if (q->rate >= (1ULL << 32)) {
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001044 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1045 TCA_NETEM_PAD))
Yang Yingliang6a031f62013-12-25 17:35:15 +08001046 goto nla_put_failure;
1047 rate.rate = ~0U;
1048 } else {
1049 rate.rate = q->rate;
1050 }
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +00001051 rate.packet_overhead = q->packet_overhead;
1052 rate.cell_size = q->cell_size;
1053 rate.cell_overhead = q->cell_overhead;
David S. Miller1b34ec42012-03-29 05:11:39 -04001054 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1055 goto nla_put_failure;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001056
Eric Dumazete4ae0042012-04-30 23:11:05 +00001057 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1058 goto nla_put_failure;
1059
stephen hemminger661b7972011-02-23 13:04:21 +00001060 if (dump_loss_model(q, skb) != 0)
1061 goto nla_put_failure;
1062
stephen hemminger861d7f72011-02-23 13:04:17 +00001063 return nla_nest_end(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064
Patrick McHardy1e904742008-01-22 22:11:17 -08001065nla_put_failure:
stephen hemminger861d7f72011-02-23 13:04:17 +00001066 nlmsg_trim(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 return -1;
1068}
1069
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001070static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1071 struct sk_buff *skb, struct tcmsg *tcm)
1072{
1073 struct netem_sched_data *q = qdisc_priv(sch);
1074
Eric Dumazet50612532011-12-28 23:12:02 +00001075 if (cl != 1 || !q->qdisc) /* only one class */
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001076 return -ENOENT;
1077
1078 tcm->tcm_handle |= TC_H_MIN(1);
1079 tcm->tcm_info = q->qdisc->handle;
1080
1081 return 0;
1082}
1083
1084static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1085 struct Qdisc **old)
1086{
1087 struct netem_sched_data *q = qdisc_priv(sch);
1088
WANG Cong86a79962016-02-25 14:55:00 -08001089 *old = qdisc_replace(sch, new, &q->qdisc);
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001090 return 0;
1091}
1092
1093static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1094{
1095 struct netem_sched_data *q = qdisc_priv(sch);
1096 return q->qdisc;
1097}
1098
1099static unsigned long netem_get(struct Qdisc *sch, u32 classid)
1100{
1101 return 1;
1102}
1103
1104static void netem_put(struct Qdisc *sch, unsigned long arg)
1105{
1106}
1107
1108static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1109{
1110 if (!walker->stop) {
1111 if (walker->count >= walker->skip)
1112 if (walker->fn(sch, 1, walker) < 0) {
1113 walker->stop = 1;
1114 return;
1115 }
1116 walker->count++;
1117 }
1118}
1119
1120static const struct Qdisc_class_ops netem_class_ops = {
1121 .graft = netem_graft,
1122 .leaf = netem_leaf,
1123 .get = netem_get,
1124 .put = netem_put,
1125 .walk = netem_walk,
1126 .dump = netem_dump_class,
1127};
1128
Eric Dumazet20fea082007-11-14 01:44:41 -08001129static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 .id = "netem",
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001131 .cl_ops = &netem_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 .priv_size = sizeof(struct netem_sched_data),
1133 .enqueue = netem_enqueue,
1134 .dequeue = netem_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07001135 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 .init = netem_init,
1137 .reset = netem_reset,
1138 .destroy = netem_destroy,
1139 .change = netem_change,
1140 .dump = netem_dump,
1141 .owner = THIS_MODULE,
1142};
1143
1144
1145static int __init netem_module_init(void)
1146{
Stephen Hemmingereb229c42005-11-03 13:49:01 -08001147 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 return register_qdisc(&netem_qdisc_ops);
1149}
1150static void __exit netem_module_exit(void)
1151{
1152 unregister_qdisc(&netem_qdisc_ops);
1153}
1154module_init(netem_module_init)
1155module_exit(netem_module_exit)
1156MODULE_LICENSE("GPL");