blob: b34331967e020b6f1151b25be8f744e494a80ad6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
Stephen Hemminger798b6b12006-10-22 20:16:57 -07007 * 2 of the License.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Many of the algorithms and ideas for this came from
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * NIST Net which is not copyrighted.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000016#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/skbuff.h>
David S. Miller78776d32011-02-24 22:48:13 -080023#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/rtnetlink.h>
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000025#include <linux/reciprocal_div.h>
Eric Dumazetaec0a402013-06-28 07:40:57 -070026#include <linux/rbtree.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070028#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <net/pkt_sched.h>
Eric Dumazete4ae0042012-04-30 23:11:05 +000030#include <net/inet_ecn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
stephen hemminger250a65f2011-02-23 13:04:22 +000032#define VERSION "1.3"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
stephen hemminger661b7972011-02-23 13:04:21 +000055
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069*/
70
71struct netem_sched_data {
Eric Dumazetaec0a402013-06-28 07:40:57 -070072 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
Eric Dumazet50612532011-12-28 23:12:02 +000074
75 /* optional qdisc for classful handling (NULL at netem init) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 struct Qdisc *qdisc;
Eric Dumazet50612532011-12-28 23:12:02 +000077
Patrick McHardy59cb5c62007-03-16 01:20:31 -070078 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Stephen Hemmingerb4076212007-03-22 12:16:21 -070080 psched_tdiff_t latency;
81 psched_tdiff_t jitter;
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 u32 loss;
Eric Dumazete4ae0042012-04-30 23:11:05 +000084 u32 ecn;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 u32 limit;
86 u32 counter;
87 u32 gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070089 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080090 u32 corrupt;
Yang Yingliang6a031f62013-12-25 17:35:15 +080091 u64 rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000092 s32 packet_overhead;
93 u32 cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +010094 struct reciprocal_value cell_size_reciprocal;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000095 s32 cell_overhead;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97 struct crndstate {
Stephen Hemmingerb4076212007-03-22 12:16:21 -070098 u32 last;
99 u32 rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
stephen hemminger661b7972011-02-23 13:04:21 +0000106
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
112
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
119
Yang Yingliangc045a732014-02-14 10:30:43 +0800120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
124
stephen hemminger661b7972011-02-23 13:04:21 +0000125 /* Correlated Loss Generation models */
126 struct clgstate {
127 /* state of the Markov chain */
128 u8 state;
129
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
136 } clg;
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138};
139
Eric Dumazet50612532011-12-28 23:12:02 +0000140/* Time stamp put into socket buffer control block
141 * Only valid when skbs are in our internal t(ime)fifo queue.
142 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143struct netem_skb_cb {
144 psched_time_t time_to_send;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700145 ktime_t tstamp_save;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146};
147
Eric Dumazetaec0a402013-06-28 07:40:57 -0700148/* Because space in skb->cb[] is tight, netem overloads skb->next/prev/tstamp
149 * to hold a rb_node structure.
150 *
151 * If struct sk_buff layout is changed, the following checks will complain.
152 */
153static struct rb_node *netem_rb_node(struct sk_buff *skb)
154{
155 BUILD_BUG_ON(offsetof(struct sk_buff, next) != 0);
156 BUILD_BUG_ON(offsetof(struct sk_buff, prev) !=
157 offsetof(struct sk_buff, next) + sizeof(skb->next));
158 BUILD_BUG_ON(offsetof(struct sk_buff, tstamp) !=
159 offsetof(struct sk_buff, prev) + sizeof(skb->prev));
160 BUILD_BUG_ON(sizeof(struct rb_node) > sizeof(skb->next) +
161 sizeof(skb->prev) +
162 sizeof(skb->tstamp));
163 return (struct rb_node *)&skb->next;
164}
165
166static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
167{
168 return (struct sk_buff *)rb;
169}
170
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700171static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
172{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700173 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
David S. Miller16bda132012-02-06 15:14:37 -0500174 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700175 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700176}
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178/* init_crandom - initialize correlated random number generator
179 * Use entropy source for initial seed.
180 */
181static void init_crandom(struct crndstate *state, unsigned long rho)
182{
183 state->rho = rho;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500184 state->last = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185}
186
187/* get_crandom - correlated random number generator
188 * Next number depends on last value.
189 * rho is scaled to avoid floating point.
190 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700191static u32 get_crandom(struct crndstate *state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
193 u64 value, rho;
194 unsigned long answer;
195
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700196 if (state->rho == 0) /* no correlation */
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500197 return prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500199 value = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 rho = (u64)state->rho + 1;
201 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
202 state->last = answer;
203 return answer;
204}
205
stephen hemminger661b7972011-02-23 13:04:21 +0000206/* loss_4state - 4-state model loss generator
207 * Generates losses according to the 4-state Markov chain adopted in
208 * the GI (General and Intuitive) loss model.
209 */
210static bool loss_4state(struct netem_sched_data *q)
211{
212 struct clgstate *clg = &q->clg;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500213 u32 rnd = prandom_u32();
stephen hemminger661b7972011-02-23 13:04:21 +0000214
215 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300216 * Makes a comparison between rnd and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000217 * probabilities outgoing from the current state, then decides the
218 * next state and if the next packet has to be transmitted or lost.
219 * The four states correspond to:
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800220 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
221 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
222 * LOST_IN_GAP_PERIOD => lost packets within a burst period
223 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
stephen hemminger661b7972011-02-23 13:04:21 +0000224 */
225 switch (clg->state) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800226 case TX_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000227 if (rnd < clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800228 clg->state = LOST_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000229 return true;
stephen hemmingerab6c27b2013-11-29 11:03:35 -0800230 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800231 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000232 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800233 } else if (clg->a1 + clg->a4 < rnd) {
234 clg->state = TX_IN_GAP_PERIOD;
235 }
stephen hemminger661b7972011-02-23 13:04:21 +0000236
237 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800238 case TX_IN_BURST_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000239 if (rnd < clg->a5) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800240 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000241 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800242 } else {
243 clg->state = TX_IN_BURST_PERIOD;
244 }
stephen hemminger661b7972011-02-23 13:04:21 +0000245
246 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800247 case LOST_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000248 if (rnd < clg->a3)
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800249 clg->state = TX_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000250 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800251 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000252 } else if (clg->a2 + clg->a3 < rnd) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800253 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000254 return true;
255 }
256 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800257 case LOST_IN_BURST_PERIOD:
258 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000259 break;
260 }
261
262 return false;
263}
264
265/* loss_gilb_ell - Gilbert-Elliot model loss generator
266 * Generates losses according to the Gilbert-Elliot loss model or
267 * its special cases (Gilbert or Simple Gilbert)
268 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300269 * Makes a comparison between random number and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000270 * probabilities outgoing from the current state, then decides the
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300271 * next state. A second random number is extracted and the comparison
stephen hemminger661b7972011-02-23 13:04:21 +0000272 * with the loss probability of the current state decides if the next
273 * packet will be transmitted or lost.
274 */
275static bool loss_gilb_ell(struct netem_sched_data *q)
276{
277 struct clgstate *clg = &q->clg;
278
279 switch (clg->state) {
Yang Yingliangc045a732014-02-14 10:30:43 +0800280 case GOOD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500281 if (prandom_u32() < clg->a1)
Yang Yingliangc045a732014-02-14 10:30:43 +0800282 clg->state = BAD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500283 if (prandom_u32() < clg->a4)
stephen hemminger661b7972011-02-23 13:04:21 +0000284 return true;
stephen hemminger7c2781f2013-11-29 11:02:43 -0800285 break;
Yang Yingliangc045a732014-02-14 10:30:43 +0800286 case BAD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500287 if (prandom_u32() < clg->a2)
Yang Yingliangc045a732014-02-14 10:30:43 +0800288 clg->state = GOOD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500289 if (prandom_u32() > clg->a3)
stephen hemminger661b7972011-02-23 13:04:21 +0000290 return true;
291 }
292
293 return false;
294}
295
296static bool loss_event(struct netem_sched_data *q)
297{
298 switch (q->loss_model) {
299 case CLG_RANDOM:
300 /* Random packet drop 0 => none, ~0 => all */
301 return q->loss && q->loss >= get_crandom(&q->loss_cor);
302
303 case CLG_4_STATES:
304 /* 4state loss model algorithm (used also for GI model)
305 * Extracts a value from the markov 4 state loss generator,
306 * if it is 1 drops a packet and if needed writes the event in
307 * the kernel logs
308 */
309 return loss_4state(q);
310
311 case CLG_GILB_ELL:
312 /* Gilbert-Elliot loss model algorithm
313 * Extracts a value from the Gilbert-Elliot loss generator,
314 * if it is 1 drops a packet and if needed writes the event in
315 * the kernel logs
316 */
317 return loss_gilb_ell(q);
318 }
319
320 return false; /* not reached */
321}
322
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324/* tabledist - return a pseudo-randomly distributed value with mean mu and
325 * std deviation sigma. Uses table lookup to approximate the desired
326 * distribution, and a uniformly-distributed pseudo-random source.
327 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700328static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
329 struct crndstate *state,
330 const struct disttable *dist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331{
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700332 psched_tdiff_t x;
333 long t;
334 u32 rnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336 if (sigma == 0)
337 return mu;
338
339 rnd = get_crandom(state);
340
341 /* default uniform distribution */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900342 if (dist == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 return (rnd % (2*sigma)) - sigma + mu;
344
345 t = dist->table[rnd % dist->size];
346 x = (sigma % NETEM_DIST_SCALE) * t;
347 if (x >= 0)
348 x += NETEM_DIST_SCALE/2;
349 else
350 x -= NETEM_DIST_SCALE/2;
351
352 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
353}
354
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000355static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000356{
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000357 u64 ticks;
Eric Dumazetfc33cc72011-11-30 23:32:14 +0000358
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000359 len += q->packet_overhead;
360
361 if (q->cell_size) {
362 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
363
364 if (len > cells * q->cell_size) /* extra cell needed for remainder */
365 cells++;
366 len = cells * (q->cell_size + q->cell_overhead);
367 }
368
369 ticks = (u64)len * NSEC_PER_SEC;
370
371 do_div(ticks, q->rate);
Eric Dumazetfc33cc72011-11-30 23:32:14 +0000372 return PSCHED_NS2TICKS(ticks);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000373}
374
stephen hemmingerff704052013-10-06 15:16:49 -0700375static void tfifo_reset(struct Qdisc *sch)
376{
377 struct netem_sched_data *q = qdisc_priv(sch);
378 struct rb_node *p;
379
380 while ((p = rb_first(&q->t_root))) {
381 struct sk_buff *skb = netem_rb_to_skb(p);
382
383 rb_erase(p, &q->t_root);
384 skb->next = NULL;
385 skb->prev = NULL;
386 kfree_skb(skb);
387 }
388}
389
Eric Dumazet960fb662012-07-03 20:55:21 +0000390static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
Eric Dumazet50612532011-12-28 23:12:02 +0000391{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700392 struct netem_sched_data *q = qdisc_priv(sch);
Eric Dumazet50612532011-12-28 23:12:02 +0000393 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700394 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
Eric Dumazet50612532011-12-28 23:12:02 +0000395
Eric Dumazetaec0a402013-06-28 07:40:57 -0700396 while (*p) {
397 struct sk_buff *skb;
Eric Dumazet50612532011-12-28 23:12:02 +0000398
Eric Dumazetaec0a402013-06-28 07:40:57 -0700399 parent = *p;
400 skb = netem_rb_to_skb(parent);
Eric Dumazet960fb662012-07-03 20:55:21 +0000401 if (tnext >= netem_skb_cb(skb)->time_to_send)
Eric Dumazetaec0a402013-06-28 07:40:57 -0700402 p = &parent->rb_right;
403 else
404 p = &parent->rb_left;
Eric Dumazet50612532011-12-28 23:12:02 +0000405 }
Eric Dumazetaec0a402013-06-28 07:40:57 -0700406 rb_link_node(netem_rb_node(nskb), parent, p);
407 rb_insert_color(netem_rb_node(nskb), &q->t_root);
408 sch->q.qlen++;
Eric Dumazet50612532011-12-28 23:12:02 +0000409}
410
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700411/*
412 * Insert one skb into qdisc.
413 * Note: parent depends on return value to account for queue length.
414 * NET_XMIT_DROP: queue length didn't change.
415 * NET_XMIT_SUCCESS: one skb was queued.
416 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
418{
419 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700420 /* We don't fill cb now as skb_unshare() may invalidate it */
421 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700422 struct sk_buff *skb2;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700423 int count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700425 /* Random duplication */
426 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
427 ++count;
428
stephen hemminger661b7972011-02-23 13:04:21 +0000429 /* Drop packet? */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000430 if (loss_event(q)) {
431 if (q->ecn && INET_ECN_set_ce(skb))
John Fastabend25331d62014-09-28 11:53:29 -0700432 qdisc_qstats_drop(sch); /* mark packet */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000433 else
434 --count;
435 }
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700436 if (count == 0) {
John Fastabend25331d62014-09-28 11:53:29 -0700437 qdisc_qstats_drop(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 kfree_skb(skb);
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700439 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 }
441
Eric Dumazet5a308f42012-07-14 03:16:27 +0000442 /* If a delay is expected, orphan the skb. (orphaning usually takes
443 * place at TX completion time, so _before_ the link transit delay)
Eric Dumazet5a308f42012-07-14 03:16:27 +0000444 */
445 if (q->latency || q->jitter)
Eric Dumazetf2f872f2013-07-30 17:55:08 -0700446 skb_orphan_partial(skb);
David S. Miller4e8a5202006-10-22 21:00:33 -0700447
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700448 /*
449 * If we need to duplicate packet, then re-insert at top of the
450 * qdisc tree, since parent queuer expects that only one
451 * skb will be queued.
452 */
453 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
David S. Miller7698b4f2008-07-16 01:42:40 -0700454 struct Qdisc *rootq = qdisc_root(sch);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700455 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
456 q->duplicate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700458 qdisc_enqueue_root(skb2, rootq);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700459 q->duplicate = dupsave;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 }
461
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800462 /*
463 * Randomized packet corruption.
464 * Make copy if needed since we are modifying
465 * If packet is going to be hardware checksummed, then
466 * do it now in software before we mangle it.
467 */
468 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
Joe Perchesf64f9e72009-11-29 16:55:45 -0800469 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
470 (skb->ip_summed == CHECKSUM_PARTIAL &&
Eric Dumazet116a0fc2012-04-29 09:08:22 +0000471 skb_checksum_help(skb)))
472 return qdisc_drop(skb, sch);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800473
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500474 skb->data[prandom_u32() % skb_headlen(skb)] ^=
475 1<<(prandom_u32() % 8);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800476 }
477
Eric Dumazet960fb662012-07-03 20:55:21 +0000478 if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
479 return qdisc_reshape_fail(skb, sch);
480
John Fastabend25331d62014-09-28 11:53:29 -0700481 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazet960fb662012-07-03 20:55:21 +0000482
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700483 cb = netem_skb_cb(skb);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000484 if (q->gap == 0 || /* not doing reordering */
Vijay Subramaniana42b4792012-01-19 10:20:59 +0000485 q->counter < q->gap - 1 || /* inside last reordering gap */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800486 q->reorder < get_crandom(&q->reorder_cor)) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700487 psched_time_t now;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800488 psched_tdiff_t delay;
489
490 delay = tabledist(q->latency, q->jitter,
491 &q->delay_cor, q->delay_dist);
492
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700493 now = psched_get_time();
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000494
495 if (q->rate) {
Eric Dumazetaec0a402013-06-28 07:40:57 -0700496 struct sk_buff *last;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000497
Eric Dumazetaec0a402013-06-28 07:40:57 -0700498 if (!skb_queue_empty(&sch->q))
499 last = skb_peek_tail(&sch->q);
500 else
501 last = netem_rb_to_skb(rb_last(&q->t_root));
502 if (last) {
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000503 /*
Johannes Naaba13d3102013-01-23 11:36:51 +0000504 * Last packet in queue is reference point (now),
505 * calculate this time bonus and subtract
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000506 * from delay.
507 */
Eric Dumazetaec0a402013-06-28 07:40:57 -0700508 delay -= netem_skb_cb(last)->time_to_send - now;
Johannes Naaba13d3102013-01-23 11:36:51 +0000509 delay = max_t(psched_tdiff_t, 0, delay);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700510 now = netem_skb_cb(last)->time_to_send;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000511 }
Johannes Naaba13d3102013-01-23 11:36:51 +0000512
Yang Yingliang8cfd88d2013-12-25 17:35:14 +0800513 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000514 }
515
Patrick McHardy7c59e252007-03-23 11:27:45 -0700516 cb->time_to_send = now + delay;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700517 cb->tstamp_save = skb->tstamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 ++q->counter;
Eric Dumazet960fb662012-07-03 20:55:21 +0000519 tfifo_enqueue(skb, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 } else {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900521 /*
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700522 * Do re-ordering by putting one out of N packets at the front
523 * of the queue.
524 */
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700525 cb->time_to_send = psched_get_time();
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700526 q->counter = 0;
Jarek Poplawski8ba25da2008-11-02 00:36:03 -0700527
Eric Dumazet50612532011-12-28 23:12:02 +0000528 __skb_queue_head(&sch->q, skb);
Hagen Paul Pfeifereb101922012-01-04 17:35:26 +0000529 sch->qstats.requeues++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700530 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000532 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533}
534
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000535static unsigned int netem_drop(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536{
537 struct netem_sched_data *q = qdisc_priv(sch);
Eric Dumazet50612532011-12-28 23:12:02 +0000538 unsigned int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
Eric Dumazet50612532011-12-28 23:12:02 +0000540 len = qdisc_queue_drop(sch);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700541
542 if (!len) {
543 struct rb_node *p = rb_first(&q->t_root);
544
545 if (p) {
546 struct sk_buff *skb = netem_rb_to_skb(p);
547
548 rb_erase(p, &q->t_root);
549 sch->q.qlen--;
550 skb->next = NULL;
551 skb->prev = NULL;
John Fastabend25331d62014-09-28 11:53:29 -0700552 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700553 kfree_skb(skb);
554 }
555 }
Eric Dumazet50612532011-12-28 23:12:02 +0000556 if (!len && q->qdisc && q->qdisc->ops->drop)
557 len = q->qdisc->ops->drop(q->qdisc);
558 if (len)
John Fastabend25331d62014-09-28 11:53:29 -0700559 qdisc_qstats_drop(sch);
Eric Dumazet50612532011-12-28 23:12:02 +0000560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 return len;
562}
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564static struct sk_buff *netem_dequeue(struct Qdisc *sch)
565{
566 struct netem_sched_data *q = qdisc_priv(sch);
567 struct sk_buff *skb;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700568 struct rb_node *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
Eric Dumazetfd245a42011-01-20 05:27:16 +0000570 if (qdisc_is_throttled(sch))
Stephen Hemminger11274e52007-03-22 12:17:42 -0700571 return NULL;
572
Eric Dumazet50612532011-12-28 23:12:02 +0000573tfifo_dequeue:
Eric Dumazetaec0a402013-06-28 07:40:57 -0700574 skb = __skb_dequeue(&sch->q);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700575 if (skb) {
Eric Dumazetaec0a402013-06-28 07:40:57 -0700576deliver:
John Fastabend25331d62014-09-28 11:53:29 -0700577 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700578 qdisc_unthrottled(sch);
579 qdisc_bstats_update(sch, skb);
580 return skb;
581 }
582 p = rb_first(&q->t_root);
583 if (p) {
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700584 psched_time_t time_to_send;
585
Eric Dumazetaec0a402013-06-28 07:40:57 -0700586 skb = netem_rb_to_skb(p);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700587
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700588 /* if more time remaining? */
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700589 time_to_send = netem_skb_cb(skb)->time_to_send;
590 if (time_to_send <= psched_get_time()) {
Eric Dumazetaec0a402013-06-28 07:40:57 -0700591 rb_erase(p, &q->t_root);
592
593 sch->q.qlen--;
594 skb->next = NULL;
595 skb->prev = NULL;
596 skb->tstamp = netem_skb_cb(skb)->tstamp_save;
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700597
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000598#ifdef CONFIG_NET_CLS_ACT
599 /*
600 * If it's at ingress let's pretend the delay is
601 * from the network (tstamp will be updated).
602 */
603 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
604 skb->tstamp.tv64 = 0;
605#endif
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000606
Eric Dumazet50612532011-12-28 23:12:02 +0000607 if (q->qdisc) {
608 int err = qdisc_enqueue(skb, q->qdisc);
609
610 if (unlikely(err != NET_XMIT_SUCCESS)) {
611 if (net_xmit_drop_count(err)) {
John Fastabend25331d62014-09-28 11:53:29 -0700612 qdisc_qstats_drop(sch);
Eric Dumazet50612532011-12-28 23:12:02 +0000613 qdisc_tree_decrease_qlen(sch, 1);
614 }
615 }
616 goto tfifo_dequeue;
617 }
Eric Dumazetaec0a402013-06-28 07:40:57 -0700618 goto deliver;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700619 }
Stephen Hemminger11274e52007-03-22 12:17:42 -0700620
Eric Dumazet50612532011-12-28 23:12:02 +0000621 if (q->qdisc) {
622 skb = q->qdisc->ops->dequeue(q->qdisc);
623 if (skb)
624 goto deliver;
625 }
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700626 qdisc_watchdog_schedule(&q->watchdog, time_to_send);
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700627 }
628
Eric Dumazet50612532011-12-28 23:12:02 +0000629 if (q->qdisc) {
630 skb = q->qdisc->ops->dequeue(q->qdisc);
631 if (skb)
632 goto deliver;
633 }
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700634 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635}
636
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637static void netem_reset(struct Qdisc *sch)
638{
639 struct netem_sched_data *q = qdisc_priv(sch);
640
Eric Dumazet50612532011-12-28 23:12:02 +0000641 qdisc_reset_queue(sch);
stephen hemmingerff704052013-10-06 15:16:49 -0700642 tfifo_reset(sch);
Eric Dumazet50612532011-12-28 23:12:02 +0000643 if (q->qdisc)
644 qdisc_reset(q->qdisc);
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700645 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646}
647
stephen hemminger6373a9a2011-02-23 13:04:18 +0000648static void dist_free(struct disttable *d)
649{
WANG Cong4cb28972014-06-02 15:55:22 -0700650 kvfree(d);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000651}
652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653/*
654 * Distribution data is a variable size payload containing
655 * signed 16 bit values.
656 */
Patrick McHardy1e904742008-01-22 22:11:17 -0800657static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658{
659 struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000660 size_t n = nla_len(attr)/sizeof(__s16);
Patrick McHardy1e904742008-01-22 22:11:17 -0800661 const __s16 *data = nla_data(attr);
David S. Miller7698b4f2008-07-16 01:42:40 -0700662 spinlock_t *root_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 struct disttable *d;
664 int i;
stephen hemminger6373a9a2011-02-23 13:04:18 +0000665 size_t s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
stephen hemmingerdf173bd2011-02-23 13:04:19 +0000667 if (n > NETEM_DIST_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 return -EINVAL;
669
stephen hemminger6373a9a2011-02-23 13:04:18 +0000670 s = sizeof(struct disttable) + n * sizeof(s16);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000671 d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000672 if (!d)
673 d = vmalloc(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 if (!d)
675 return -ENOMEM;
676
677 d->size = n;
678 for (i = 0; i < n; i++)
679 d->table[i] = data[i];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900680
Jarek Poplawski102396a2008-08-29 14:21:52 -0700681 root_lock = qdisc_root_sleeping_lock(sch);
David S. Miller7698b4f2008-07-16 01:42:40 -0700682
683 spin_lock_bh(root_lock);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000684 swap(q->delay_dist, d);
David S. Miller7698b4f2008-07-16 01:42:40 -0700685 spin_unlock_bh(root_lock);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000686
687 dist_free(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 return 0;
689}
690
Yang Yingliang49545a72014-02-14 10:30:42 +0800691static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692{
Patrick McHardy1e904742008-01-22 22:11:17 -0800693 const struct tc_netem_corr *c = nla_data(attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 init_crandom(&q->delay_cor, c->delay_corr);
696 init_crandom(&q->loss_cor, c->loss_corr);
697 init_crandom(&q->dup_cor, c->dup_corr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698}
699
Yang Yingliang49545a72014-02-14 10:30:42 +0800700static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700701{
Patrick McHardy1e904742008-01-22 22:11:17 -0800702 const struct tc_netem_reorder *r = nla_data(attr);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700703
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700704 q->reorder = r->probability;
705 init_crandom(&q->reorder_cor, r->correlation);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700706}
707
Yang Yingliang49545a72014-02-14 10:30:42 +0800708static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800709{
Patrick McHardy1e904742008-01-22 22:11:17 -0800710 const struct tc_netem_corrupt *r = nla_data(attr);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800711
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800712 q->corrupt = r->probability;
713 init_crandom(&q->corrupt_cor, r->correlation);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800714}
715
Yang Yingliang49545a72014-02-14 10:30:42 +0800716static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000717{
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000718 const struct tc_netem_rate *r = nla_data(attr);
719
720 q->rate = r->rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000721 q->packet_overhead = r->packet_overhead;
722 q->cell_size = r->cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100723 q->cell_overhead = r->cell_overhead;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000724 if (q->cell_size)
725 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100726 else
727 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000728}
729
Yang Yingliang49545a72014-02-14 10:30:42 +0800730static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
stephen hemminger661b7972011-02-23 13:04:21 +0000731{
stephen hemminger661b7972011-02-23 13:04:21 +0000732 const struct nlattr *la;
733 int rem;
734
735 nla_for_each_nested(la, attr, rem) {
736 u16 type = nla_type(la);
737
Yang Yingliang833fa742013-12-10 20:55:32 +0800738 switch (type) {
stephen hemminger661b7972011-02-23 13:04:21 +0000739 case NETEM_LOSS_GI: {
740 const struct tc_netem_gimodel *gi = nla_data(la);
741
stephen hemminger24946542011-12-23 09:16:30 +0000742 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
stephen hemminger661b7972011-02-23 13:04:21 +0000743 pr_info("netem: incorrect gi model size\n");
744 return -EINVAL;
745 }
746
747 q->loss_model = CLG_4_STATES;
748
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800749 q->clg.state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000750 q->clg.a1 = gi->p13;
751 q->clg.a2 = gi->p31;
752 q->clg.a3 = gi->p32;
753 q->clg.a4 = gi->p14;
754 q->clg.a5 = gi->p23;
755 break;
756 }
757
758 case NETEM_LOSS_GE: {
759 const struct tc_netem_gemodel *ge = nla_data(la);
760
stephen hemminger24946542011-12-23 09:16:30 +0000761 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
762 pr_info("netem: incorrect ge model size\n");
stephen hemminger661b7972011-02-23 13:04:21 +0000763 return -EINVAL;
764 }
765
766 q->loss_model = CLG_GILB_ELL;
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800767 q->clg.state = GOOD_STATE;
stephen hemminger661b7972011-02-23 13:04:21 +0000768 q->clg.a1 = ge->p;
769 q->clg.a2 = ge->r;
770 q->clg.a3 = ge->h;
771 q->clg.a4 = ge->k1;
772 break;
773 }
774
775 default:
776 pr_info("netem: unknown loss type %u\n", type);
777 return -EINVAL;
778 }
779 }
780
781 return 0;
782}
783
Patrick McHardy27a34212008-01-23 20:35:39 -0800784static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
785 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
786 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
787 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000788 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
stephen hemminger661b7972011-02-23 13:04:21 +0000789 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
Eric Dumazete4ae0042012-04-30 23:11:05 +0000790 [TCA_NETEM_ECN] = { .type = NLA_U32 },
Yang Yingliang6a031f62013-12-25 17:35:15 +0800791 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
Patrick McHardy27a34212008-01-23 20:35:39 -0800792};
793
Thomas Graf2c10b322008-09-02 17:30:27 -0700794static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
795 const struct nla_policy *policy, int len)
796{
797 int nested_len = nla_len(nla) - NLA_ALIGN(len);
798
stephen hemminger661b7972011-02-23 13:04:21 +0000799 if (nested_len < 0) {
800 pr_info("netem: invalid attributes len %d\n", nested_len);
Thomas Graf2c10b322008-09-02 17:30:27 -0700801 return -EINVAL;
stephen hemminger661b7972011-02-23 13:04:21 +0000802 }
803
Thomas Graf2c10b322008-09-02 17:30:27 -0700804 if (nested_len >= nla_attr_size(0))
805 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
806 nested_len, policy);
stephen hemminger661b7972011-02-23 13:04:21 +0000807
Thomas Graf2c10b322008-09-02 17:30:27 -0700808 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
809 return 0;
810}
811
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800812/* Parse netlink message to set options */
Patrick McHardy1e904742008-01-22 22:11:17 -0800813static int netem_change(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814{
815 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardyb03f4672008-01-23 20:32:21 -0800816 struct nlattr *tb[TCA_NETEM_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 struct tc_netem_qopt *qopt;
Yang Yingliang54a4b052014-02-14 10:30:41 +0800818 struct clgstate old_clg;
819 int old_loss_model = CLG_RANDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 int ret;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900821
Patrick McHardyb03f4672008-01-23 20:32:21 -0800822 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 return -EINVAL;
824
Thomas Graf2c10b322008-09-02 17:30:27 -0700825 qopt = nla_data(opt);
826 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
Patrick McHardyb03f4672008-01-23 20:32:21 -0800827 if (ret < 0)
828 return ret;
829
Yang Yingliang54a4b052014-02-14 10:30:41 +0800830 /* backup q->clg and q->loss_model */
831 old_clg = q->clg;
832 old_loss_model = q->loss_model;
833
834 if (tb[TCA_NETEM_LOSS]) {
Yang Yingliang49545a72014-02-14 10:30:42 +0800835 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
Yang Yingliang54a4b052014-02-14 10:30:41 +0800836 if (ret) {
837 q->loss_model = old_loss_model;
838 return ret;
839 }
840 } else {
841 q->loss_model = CLG_RANDOM;
842 }
843
844 if (tb[TCA_NETEM_DELAY_DIST]) {
845 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
846 if (ret) {
847 /* recover clg and loss_model, in case of
848 * q->clg and q->loss_model were modified
849 * in get_loss_clg()
850 */
851 q->clg = old_clg;
852 q->loss_model = old_loss_model;
853 return ret;
854 }
855 }
856
Eric Dumazet50612532011-12-28 23:12:02 +0000857 sch->limit = qopt->limit;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 q->latency = qopt->latency;
860 q->jitter = qopt->jitter;
861 q->limit = qopt->limit;
862 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700863 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 q->loss = qopt->loss;
865 q->duplicate = qopt->duplicate;
866
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700867 /* for compatibility with earlier versions.
868 * if gap is set, need to assume 100% probability
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700869 */
Stephen Hemmingera362e0a2007-03-22 12:15:45 -0700870 if (q->gap)
871 q->reorder = ~0;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700872
Stephen Hemminger265eb672008-11-03 21:13:26 -0800873 if (tb[TCA_NETEM_CORR])
Yang Yingliang49545a72014-02-14 10:30:42 +0800874 get_correlation(q, tb[TCA_NETEM_CORR]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
Stephen Hemminger265eb672008-11-03 21:13:26 -0800876 if (tb[TCA_NETEM_REORDER])
Yang Yingliang49545a72014-02-14 10:30:42 +0800877 get_reorder(q, tb[TCA_NETEM_REORDER]);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800878
Stephen Hemminger265eb672008-11-03 21:13:26 -0800879 if (tb[TCA_NETEM_CORRUPT])
Yang Yingliang49545a72014-02-14 10:30:42 +0800880 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000882 if (tb[TCA_NETEM_RATE])
Yang Yingliang49545a72014-02-14 10:30:42 +0800883 get_rate(q, tb[TCA_NETEM_RATE]);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000884
Yang Yingliang6a031f62013-12-25 17:35:15 +0800885 if (tb[TCA_NETEM_RATE64])
886 q->rate = max_t(u64, q->rate,
887 nla_get_u64(tb[TCA_NETEM_RATE64]));
888
Eric Dumazete4ae0042012-04-30 23:11:05 +0000889 if (tb[TCA_NETEM_ECN])
890 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
891
stephen hemminger661b7972011-02-23 13:04:21 +0000892 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893}
894
Patrick McHardy1e904742008-01-22 22:11:17 -0800895static int netem_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
897 struct netem_sched_data *q = qdisc_priv(sch);
898 int ret;
899
900 if (!opt)
901 return -EINVAL;
902
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700903 qdisc_watchdog_init(&q->watchdog, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
stephen hemminger661b7972011-02-23 13:04:21 +0000905 q->loss_model = CLG_RANDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 ret = netem_change(sch, opt);
Eric Dumazet50612532011-12-28 23:12:02 +0000907 if (ret)
stephen hemminger250a65f2011-02-23 13:04:22 +0000908 pr_info("netem: change failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 return ret;
910}
911
912static void netem_destroy(struct Qdisc *sch)
913{
914 struct netem_sched_data *q = qdisc_priv(sch);
915
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700916 qdisc_watchdog_cancel(&q->watchdog);
Eric Dumazet50612532011-12-28 23:12:02 +0000917 if (q->qdisc)
918 qdisc_destroy(q->qdisc);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000919 dist_free(q->delay_dist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920}
921
stephen hemminger661b7972011-02-23 13:04:21 +0000922static int dump_loss_model(const struct netem_sched_data *q,
923 struct sk_buff *skb)
924{
925 struct nlattr *nest;
926
927 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
928 if (nest == NULL)
929 goto nla_put_failure;
930
931 switch (q->loss_model) {
932 case CLG_RANDOM:
933 /* legacy loss model */
934 nla_nest_cancel(skb, nest);
935 return 0; /* no data */
936
937 case CLG_4_STATES: {
938 struct tc_netem_gimodel gi = {
939 .p13 = q->clg.a1,
940 .p31 = q->clg.a2,
941 .p32 = q->clg.a3,
942 .p14 = q->clg.a4,
943 .p23 = q->clg.a5,
944 };
945
David S. Miller1b34ec42012-03-29 05:11:39 -0400946 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
947 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +0000948 break;
949 }
950 case CLG_GILB_ELL: {
951 struct tc_netem_gemodel ge = {
952 .p = q->clg.a1,
953 .r = q->clg.a2,
954 .h = q->clg.a3,
955 .k1 = q->clg.a4,
956 };
957
David S. Miller1b34ec42012-03-29 05:11:39 -0400958 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
959 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +0000960 break;
961 }
962 }
963
964 nla_nest_end(skb, nest);
965 return 0;
966
967nla_put_failure:
968 nla_nest_cancel(skb, nest);
969 return -1;
970}
971
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
973{
974 const struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger861d7f72011-02-23 13:04:17 +0000975 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 struct tc_netem_qopt qopt;
977 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700978 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800979 struct tc_netem_corrupt corrupt;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000980 struct tc_netem_rate rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
982 qopt.latency = q->latency;
983 qopt.jitter = q->jitter;
984 qopt.limit = q->limit;
985 qopt.loss = q->loss;
986 qopt.gap = q->gap;
987 qopt.duplicate = q->duplicate;
David S. Miller1b34ec42012-03-29 05:11:39 -0400988 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
989 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
991 cor.delay_corr = q->delay_cor.rho;
992 cor.loss_corr = q->loss_cor.rho;
993 cor.dup_corr = q->dup_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -0400994 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
995 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700996
997 reorder.probability = q->reorder;
998 reorder.correlation = q->reorder_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -0400999 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1000 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001001
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001002 corrupt.probability = q->corrupt;
1003 corrupt.correlation = q->corrupt_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001004 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1005 goto nla_put_failure;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001006
Yang Yingliang6a031f62013-12-25 17:35:15 +08001007 if (q->rate >= (1ULL << 32)) {
1008 if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate))
1009 goto nla_put_failure;
1010 rate.rate = ~0U;
1011 } else {
1012 rate.rate = q->rate;
1013 }
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +00001014 rate.packet_overhead = q->packet_overhead;
1015 rate.cell_size = q->cell_size;
1016 rate.cell_overhead = q->cell_overhead;
David S. Miller1b34ec42012-03-29 05:11:39 -04001017 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1018 goto nla_put_failure;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001019
Eric Dumazete4ae0042012-04-30 23:11:05 +00001020 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1021 goto nla_put_failure;
1022
stephen hemminger661b7972011-02-23 13:04:21 +00001023 if (dump_loss_model(q, skb) != 0)
1024 goto nla_put_failure;
1025
stephen hemminger861d7f72011-02-23 13:04:17 +00001026 return nla_nest_end(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
Patrick McHardy1e904742008-01-22 22:11:17 -08001028nla_put_failure:
stephen hemminger861d7f72011-02-23 13:04:17 +00001029 nlmsg_trim(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 return -1;
1031}
1032
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001033static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1034 struct sk_buff *skb, struct tcmsg *tcm)
1035{
1036 struct netem_sched_data *q = qdisc_priv(sch);
1037
Eric Dumazet50612532011-12-28 23:12:02 +00001038 if (cl != 1 || !q->qdisc) /* only one class */
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001039 return -ENOENT;
1040
1041 tcm->tcm_handle |= TC_H_MIN(1);
1042 tcm->tcm_info = q->qdisc->handle;
1043
1044 return 0;
1045}
1046
1047static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1048 struct Qdisc **old)
1049{
1050 struct netem_sched_data *q = qdisc_priv(sch);
1051
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001052 sch_tree_lock(sch);
1053 *old = q->qdisc;
1054 q->qdisc = new;
Eric Dumazet50612532011-12-28 23:12:02 +00001055 if (*old) {
1056 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1057 qdisc_reset(*old);
1058 }
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001059 sch_tree_unlock(sch);
1060
1061 return 0;
1062}
1063
1064static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1065{
1066 struct netem_sched_data *q = qdisc_priv(sch);
1067 return q->qdisc;
1068}
1069
1070static unsigned long netem_get(struct Qdisc *sch, u32 classid)
1071{
1072 return 1;
1073}
1074
1075static void netem_put(struct Qdisc *sch, unsigned long arg)
1076{
1077}
1078
1079static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1080{
1081 if (!walker->stop) {
1082 if (walker->count >= walker->skip)
1083 if (walker->fn(sch, 1, walker) < 0) {
1084 walker->stop = 1;
1085 return;
1086 }
1087 walker->count++;
1088 }
1089}
1090
1091static const struct Qdisc_class_ops netem_class_ops = {
1092 .graft = netem_graft,
1093 .leaf = netem_leaf,
1094 .get = netem_get,
1095 .put = netem_put,
1096 .walk = netem_walk,
1097 .dump = netem_dump_class,
1098};
1099
Eric Dumazet20fea082007-11-14 01:44:41 -08001100static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 .id = "netem",
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001102 .cl_ops = &netem_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 .priv_size = sizeof(struct netem_sched_data),
1104 .enqueue = netem_enqueue,
1105 .dequeue = netem_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07001106 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 .drop = netem_drop,
1108 .init = netem_init,
1109 .reset = netem_reset,
1110 .destroy = netem_destroy,
1111 .change = netem_change,
1112 .dump = netem_dump,
1113 .owner = THIS_MODULE,
1114};
1115
1116
1117static int __init netem_module_init(void)
1118{
Stephen Hemmingereb229c42005-11-03 13:49:01 -08001119 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 return register_qdisc(&netem_qdisc_ops);
1121}
1122static void __exit netem_module_exit(void)
1123{
1124 unregister_qdisc(&netem_qdisc_ops);
1125}
1126module_init(netem_module_init)
1127module_exit(netem_module_exit)
1128MODULE_LICENSE("GPL");