blob: ad18a205241690070aff0f459c59c099ff8f3e2a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
Stephen Hemminger798b6b12006-10-22 20:16:57 -07007 * 2 of the License.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Many of the algorithms and ideas for this came from
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * NIST Net which is not copyrighted.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000016#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/skbuff.h>
David S. Miller78776d32011-02-24 22:48:13 -080023#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/rtnetlink.h>
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000025#include <linux/reciprocal_div.h>
Eric Dumazetaec0a402013-06-28 07:40:57 -070026#include <linux/rbtree.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070028#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <net/pkt_sched.h>
Eric Dumazete4ae0042012-04-30 23:11:05 +000030#include <net/inet_ecn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
stephen hemminger250a65f2011-02-23 13:04:22 +000032#define VERSION "1.3"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/* Network Emulation Queuing algorithm.
35 ====================================
36
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41 ----------------------------------------------------------------
42
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
50
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
stephen hemminger661b7972011-02-23 13:04:21 +000055
56 Correlated Loss Generator models
57
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
60
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
66
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069*/
70
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -070071struct disttable {
72 u32 size;
73 s16 table[0];
74};
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076struct netem_sched_data {
Eric Dumazetaec0a402013-06-28 07:40:57 -070077 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
78 struct rb_root t_root;
Eric Dumazet50612532011-12-28 23:12:02 +000079
80 /* optional qdisc for classful handling (NULL at netem init) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 struct Qdisc *qdisc;
Eric Dumazet50612532011-12-28 23:12:02 +000082
Patrick McHardy59cb5c62007-03-16 01:20:31 -070083 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Dave Taht112f9cb2017-11-08 15:12:26 -080085 s64 latency;
86 s64 jitter;
Stephen Hemmingerb4076212007-03-22 12:16:21 -070087
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 u32 loss;
Eric Dumazete4ae0042012-04-30 23:11:05 +000089 u32 ecn;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 u32 limit;
91 u32 counter;
92 u32 gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070094 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080095 u32 corrupt;
Yang Yingliang6a031f62013-12-25 17:35:15 +080096 u64 rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000097 s32 packet_overhead;
98 u32 cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +010099 struct reciprocal_value cell_size_reciprocal;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000100 s32 cell_overhead;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 struct crndstate {
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700103 u32 last;
104 u32 rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800105 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700107 struct disttable *delay_dist;
stephen hemminger661b7972011-02-23 13:04:21 +0000108
109 enum {
110 CLG_RANDOM,
111 CLG_4_STATES,
112 CLG_GILB_ELL,
113 } loss_model;
114
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800115 enum {
116 TX_IN_GAP_PERIOD = 1,
117 TX_IN_BURST_PERIOD,
118 LOST_IN_GAP_PERIOD,
119 LOST_IN_BURST_PERIOD,
120 } _4_state_model;
121
Yang Yingliangc045a732014-02-14 10:30:43 +0800122 enum {
123 GOOD_STATE = 1,
124 BAD_STATE,
125 } GE_state_model;
126
stephen hemminger661b7972011-02-23 13:04:21 +0000127 /* Correlated Loss Generation models */
128 struct clgstate {
129 /* state of the Markov chain */
130 u8 state;
131
132 /* 4-states and Gilbert-Elliot models */
133 u32 a1; /* p13 for 4-states or p for GE */
134 u32 a2; /* p31 for 4-states or r for GE */
135 u32 a3; /* p32 for 4-states or h for GE */
136 u32 a4; /* p14 for 4-states or 1-k for GE */
137 u32 a5; /* p23 used only in 4-states */
138 } clg;
139
Dave Taht836af832017-11-08 15:12:28 -0800140 struct tc_netem_slot slot_config;
141 struct slotstate {
142 u64 slot_next;
143 s32 packets_left;
144 s32 bytes_left;
145 } slot;
146
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700147 struct disttable *slot_dist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148};
149
Eric Dumazet50612532011-12-28 23:12:02 +0000150/* Time stamp put into socket buffer control block
151 * Only valid when skbs are in our internal t(ime)fifo queue.
Eric Dumazet56b17422014-11-03 08:19:53 -0800152 *
153 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
154 * and skb->next & skb->prev are scratch space for a qdisc,
155 * we save skb->tstamp value in skb->cb[] before destroying it.
Eric Dumazet50612532011-12-28 23:12:02 +0000156 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157struct netem_skb_cb {
Dave Taht112f9cb2017-11-08 15:12:26 -0800158 u64 time_to_send;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159};
160
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700161static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
162{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700163 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
David S. Miller16bda132012-02-06 15:14:37 -0500164 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700165 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700166}
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168/* init_crandom - initialize correlated random number generator
169 * Use entropy source for initial seed.
170 */
171static void init_crandom(struct crndstate *state, unsigned long rho)
172{
173 state->rho = rho;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500174 state->last = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176
177/* get_crandom - correlated random number generator
178 * Next number depends on last value.
179 * rho is scaled to avoid floating point.
180 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700181static u32 get_crandom(struct crndstate *state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
183 u64 value, rho;
184 unsigned long answer;
185
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700186 if (!state || state->rho == 0) /* no correlation */
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500187 return prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500189 value = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 rho = (u64)state->rho + 1;
191 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
192 state->last = answer;
193 return answer;
194}
195
stephen hemminger661b7972011-02-23 13:04:21 +0000196/* loss_4state - 4-state model loss generator
197 * Generates losses according to the 4-state Markov chain adopted in
198 * the GI (General and Intuitive) loss model.
199 */
200static bool loss_4state(struct netem_sched_data *q)
201{
202 struct clgstate *clg = &q->clg;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500203 u32 rnd = prandom_u32();
stephen hemminger661b7972011-02-23 13:04:21 +0000204
205 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300206 * Makes a comparison between rnd and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000207 * probabilities outgoing from the current state, then decides the
208 * next state and if the next packet has to be transmitted or lost.
209 * The four states correspond to:
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
211 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
212 * LOST_IN_GAP_PERIOD => lost packets within a burst period
213 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
stephen hemminger661b7972011-02-23 13:04:21 +0000214 */
215 switch (clg->state) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800216 case TX_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000217 if (rnd < clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800218 clg->state = LOST_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000219 return true;
stephen hemmingerab6c27b2013-11-29 11:03:35 -0800220 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800221 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000222 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800223 } else if (clg->a1 + clg->a4 < rnd) {
224 clg->state = TX_IN_GAP_PERIOD;
225 }
stephen hemminger661b7972011-02-23 13:04:21 +0000226
227 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800228 case TX_IN_BURST_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000229 if (rnd < clg->a5) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800230 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000231 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800232 } else {
233 clg->state = TX_IN_BURST_PERIOD;
234 }
stephen hemminger661b7972011-02-23 13:04:21 +0000235
236 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800237 case LOST_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000238 if (rnd < clg->a3)
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800239 clg->state = TX_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000240 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800241 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000242 } else if (clg->a2 + clg->a3 < rnd) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800243 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000244 return true;
245 }
246 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800247 case LOST_IN_BURST_PERIOD:
248 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000249 break;
250 }
251
252 return false;
253}
254
255/* loss_gilb_ell - Gilbert-Elliot model loss generator
256 * Generates losses according to the Gilbert-Elliot loss model or
257 * its special cases (Gilbert or Simple Gilbert)
258 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300259 * Makes a comparison between random number and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000260 * probabilities outgoing from the current state, then decides the
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300261 * next state. A second random number is extracted and the comparison
stephen hemminger661b7972011-02-23 13:04:21 +0000262 * with the loss probability of the current state decides if the next
263 * packet will be transmitted or lost.
264 */
265static bool loss_gilb_ell(struct netem_sched_data *q)
266{
267 struct clgstate *clg = &q->clg;
268
269 switch (clg->state) {
Yang Yingliangc045a732014-02-14 10:30:43 +0800270 case GOOD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500271 if (prandom_u32() < clg->a1)
Yang Yingliangc045a732014-02-14 10:30:43 +0800272 clg->state = BAD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500273 if (prandom_u32() < clg->a4)
stephen hemminger661b7972011-02-23 13:04:21 +0000274 return true;
stephen hemminger7c2781f2013-11-29 11:02:43 -0800275 break;
Yang Yingliangc045a732014-02-14 10:30:43 +0800276 case BAD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500277 if (prandom_u32() < clg->a2)
Yang Yingliangc045a732014-02-14 10:30:43 +0800278 clg->state = GOOD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500279 if (prandom_u32() > clg->a3)
stephen hemminger661b7972011-02-23 13:04:21 +0000280 return true;
281 }
282
283 return false;
284}
285
286static bool loss_event(struct netem_sched_data *q)
287{
288 switch (q->loss_model) {
289 case CLG_RANDOM:
290 /* Random packet drop 0 => none, ~0 => all */
291 return q->loss && q->loss >= get_crandom(&q->loss_cor);
292
293 case CLG_4_STATES:
294 /* 4state loss model algorithm (used also for GI model)
295 * Extracts a value from the markov 4 state loss generator,
296 * if it is 1 drops a packet and if needed writes the event in
297 * the kernel logs
298 */
299 return loss_4state(q);
300
301 case CLG_GILB_ELL:
302 /* Gilbert-Elliot loss model algorithm
303 * Extracts a value from the Gilbert-Elliot loss generator,
304 * if it is 1 drops a packet and if needed writes the event in
305 * the kernel logs
306 */
307 return loss_gilb_ell(q);
308 }
309
310 return false; /* not reached */
311}
312
313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314/* tabledist - return a pseudo-randomly distributed value with mean mu and
315 * std deviation sigma. Uses table lookup to approximate the desired
316 * distribution, and a uniformly-distributed pseudo-random source.
317 */
Stephen Hemminger9b0ed892017-11-14 11:27:02 -0800318static s64 tabledist(s64 mu, s32 sigma,
Dave Taht112f9cb2017-11-08 15:12:26 -0800319 struct crndstate *state,
Stephen Hemminger9b0ed892017-11-14 11:27:02 -0800320 const struct disttable *dist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
Dave Taht112f9cb2017-11-08 15:12:26 -0800322 s64 x;
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700323 long t;
324 u32 rnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
326 if (sigma == 0)
327 return mu;
328
329 rnd = get_crandom(state);
330
331 /* default uniform distribution */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900332 if (dist == NULL)
Md. Islam043e3372018-02-06 23:14:18 -0500333 return ((rnd % (2 * sigma)) + mu) - sigma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335 t = dist->table[rnd % dist->size];
336 x = (sigma % NETEM_DIST_SCALE) * t;
337 if (x >= 0)
338 x += NETEM_DIST_SCALE/2;
339 else
340 x -= NETEM_DIST_SCALE/2;
341
342 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
343}
344
Stephen Hemmingerbce552f2017-11-14 11:27:01 -0800345static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000346{
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000347 len += q->packet_overhead;
348
349 if (q->cell_size) {
350 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
351
352 if (len > cells * q->cell_size) /* extra cell needed for remainder */
353 cells++;
354 len = cells * (q->cell_size + q->cell_overhead);
355 }
Stephen Hemmingerbce552f2017-11-14 11:27:01 -0800356
357 return div64_u64(len * NSEC_PER_SEC, q->rate);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000358}
359
stephen hemmingerff704052013-10-06 15:16:49 -0700360static void tfifo_reset(struct Qdisc *sch)
361{
362 struct netem_sched_data *q = qdisc_priv(sch);
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700363 struct rb_node *p = rb_first(&q->t_root);
stephen hemmingerff704052013-10-06 15:16:49 -0700364
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700365 while (p) {
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700366 struct sk_buff *skb = rb_to_skb(p);
stephen hemmingerff704052013-10-06 15:16:49 -0700367
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700368 p = rb_next(p);
369 rb_erase(&skb->rbnode, &q->t_root);
Eric Dumazet2f08a9a2016-06-13 20:21:57 -0700370 rtnl_kfree_skbs(skb, skb);
stephen hemmingerff704052013-10-06 15:16:49 -0700371 }
372}
373
Eric Dumazet960fb662012-07-03 20:55:21 +0000374static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
Eric Dumazet50612532011-12-28 23:12:02 +0000375{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700376 struct netem_sched_data *q = qdisc_priv(sch);
Dave Taht112f9cb2017-11-08 15:12:26 -0800377 u64 tnext = netem_skb_cb(nskb)->time_to_send;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700378 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
Eric Dumazet50612532011-12-28 23:12:02 +0000379
Eric Dumazetaec0a402013-06-28 07:40:57 -0700380 while (*p) {
381 struct sk_buff *skb;
Eric Dumazet50612532011-12-28 23:12:02 +0000382
Eric Dumazetaec0a402013-06-28 07:40:57 -0700383 parent = *p;
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700384 skb = rb_to_skb(parent);
Eric Dumazet960fb662012-07-03 20:55:21 +0000385 if (tnext >= netem_skb_cb(skb)->time_to_send)
Eric Dumazetaec0a402013-06-28 07:40:57 -0700386 p = &parent->rb_right;
387 else
388 p = &parent->rb_left;
Eric Dumazet50612532011-12-28 23:12:02 +0000389 }
Eric Dumazet56b17422014-11-03 08:19:53 -0800390 rb_link_node(&nskb->rbnode, parent, p);
391 rb_insert_color(&nskb->rbnode, &q->t_root);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700392 sch->q.qlen++;
Eric Dumazet50612532011-12-28 23:12:02 +0000393}
394
Neil Horman6071bd12016-05-02 12:20:15 -0400395/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
396 * when we statistically choose to corrupt one, we instead segment it, returning
397 * the first packet to be corrupted, and re-enqueue the remaining frames
398 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700399static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
400 struct sk_buff **to_free)
Neil Horman6071bd12016-05-02 12:20:15 -0400401{
402 struct sk_buff *segs;
403 netdev_features_t features = netif_skb_features(skb);
404
405 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
406
407 if (IS_ERR_OR_NULL(segs)) {
Eric Dumazet520ac302016-06-21 23:16:49 -0700408 qdisc_drop(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400409 return NULL;
410 }
411 consume_skb(skb);
412 return segs;
413}
414
Florian Westphal48da34b2016-09-18 00:57:34 +0200415static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
416{
417 skb->next = qh->head;
418
419 if (!qh->head)
420 qh->tail = skb;
421 qh->head = skb;
422 qh->qlen++;
423}
424
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700425/*
426 * Insert one skb into qdisc.
427 * Note: parent depends on return value to account for queue length.
428 * NET_XMIT_DROP: queue length didn't change.
429 * NET_XMIT_SUCCESS: one skb was queued.
430 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700431static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
432 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
434 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700435 /* We don't fill cb now as skb_unshare() may invalidate it */
436 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700437 struct sk_buff *skb2;
Neil Horman6071bd12016-05-02 12:20:15 -0400438 struct sk_buff *segs = NULL;
439 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
440 int nb = 0;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700441 int count = 1;
Neil Horman6071bd12016-05-02 12:20:15 -0400442 int rc = NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700444 /* Random duplication */
445 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
446 ++count;
447
stephen hemminger661b7972011-02-23 13:04:21 +0000448 /* Drop packet? */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000449 if (loss_event(q)) {
450 if (q->ecn && INET_ECN_set_ce(skb))
John Fastabend25331d62014-09-28 11:53:29 -0700451 qdisc_qstats_drop(sch); /* mark packet */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000452 else
453 --count;
454 }
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700455 if (count == 0) {
John Fastabend25331d62014-09-28 11:53:29 -0700456 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700457 __qdisc_drop(skb, to_free);
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700458 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 }
460
Eric Dumazet5a308f42012-07-14 03:16:27 +0000461 /* If a delay is expected, orphan the skb. (orphaning usually takes
462 * place at TX completion time, so _before_ the link transit delay)
Eric Dumazet5a308f42012-07-14 03:16:27 +0000463 */
Nik Unger5080f392017-03-13 10:16:58 -0700464 if (q->latency || q->jitter || q->rate)
Eric Dumazetf2f872f2013-07-30 17:55:08 -0700465 skb_orphan_partial(skb);
David S. Miller4e8a5202006-10-22 21:00:33 -0700466
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700467 /*
468 * If we need to duplicate packet, then re-insert at top of the
469 * qdisc tree, since parent queuer expects that only one
470 * skb will be queued.
471 */
472 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
David S. Miller7698b4f2008-07-16 01:42:40 -0700473 struct Qdisc *rootq = qdisc_root(sch);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700474 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
Eric Dumazetb396cca2015-05-11 09:06:56 -0700476 q->duplicate = 0;
Eric Dumazet520ac302016-06-21 23:16:49 -0700477 rootq->enqueue(skb2, rootq, to_free);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700478 q->duplicate = dupsave;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 }
480
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800481 /*
482 * Randomized packet corruption.
483 * Make copy if needed since we are modifying
484 * If packet is going to be hardware checksummed, then
485 * do it now in software before we mangle it.
486 */
487 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
Neil Horman6071bd12016-05-02 12:20:15 -0400488 if (skb_is_gso(skb)) {
Eric Dumazet520ac302016-06-21 23:16:49 -0700489 segs = netem_segment(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400490 if (!segs)
491 return NET_XMIT_DROP;
492 } else {
493 segs = skb;
494 }
495
496 skb = segs;
497 segs = segs->next;
498
Eric Dumazet8a6e9c62016-06-28 10:30:08 +0200499 skb = skb_unshare(skb, GFP_ATOMIC);
500 if (unlikely(!skb)) {
501 qdisc_qstats_drop(sch);
502 goto finish_segs;
503 }
504 if (skb->ip_summed == CHECKSUM_PARTIAL &&
505 skb_checksum_help(skb)) {
506 qdisc_drop(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400507 goto finish_segs;
508 }
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800509
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500510 skb->data[prandom_u32() % skb_headlen(skb)] ^=
511 1<<(prandom_u32() % 8);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800512 }
513
Florian Westphal97d06782016-09-18 00:57:31 +0200514 if (unlikely(sch->q.qlen >= sch->limit))
Alexey Kodanev35d889d2018-03-05 20:52:54 +0300515 return qdisc_drop_all(skb, sch, to_free);
Eric Dumazet960fb662012-07-03 20:55:21 +0000516
John Fastabend25331d62014-09-28 11:53:29 -0700517 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazet960fb662012-07-03 20:55:21 +0000518
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700519 cb = netem_skb_cb(skb);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000520 if (q->gap == 0 || /* not doing reordering */
Vijay Subramaniana42b4792012-01-19 10:20:59 +0000521 q->counter < q->gap - 1 || /* inside last reordering gap */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800522 q->reorder < get_crandom(&q->reorder_cor)) {
Dave Taht112f9cb2017-11-08 15:12:26 -0800523 u64 now;
524 s64 delay;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800525
526 delay = tabledist(q->latency, q->jitter,
527 &q->delay_cor, q->delay_dist);
528
Dave Taht112f9cb2017-11-08 15:12:26 -0800529 now = ktime_get_ns();
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000530
531 if (q->rate) {
Nik Unger5080f392017-03-13 10:16:58 -0700532 struct netem_skb_cb *last = NULL;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000533
Nik Unger5080f392017-03-13 10:16:58 -0700534 if (sch->q.tail)
535 last = netem_skb_cb(sch->q.tail);
536 if (q->t_root.rb_node) {
537 struct sk_buff *t_skb;
538 struct netem_skb_cb *t_last;
539
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700540 t_skb = skb_rb_last(&q->t_root);
Nik Unger5080f392017-03-13 10:16:58 -0700541 t_last = netem_skb_cb(t_skb);
542 if (!last ||
543 t_last->time_to_send > last->time_to_send) {
544 last = t_last;
545 }
546 }
547
Eric Dumazetaec0a402013-06-28 07:40:57 -0700548 if (last) {
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000549 /*
Johannes Naaba13d3102013-01-23 11:36:51 +0000550 * Last packet in queue is reference point (now),
551 * calculate this time bonus and subtract
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000552 * from delay.
553 */
Nik Unger5080f392017-03-13 10:16:58 -0700554 delay -= last->time_to_send - now;
Dave Taht112f9cb2017-11-08 15:12:26 -0800555 delay = max_t(s64, 0, delay);
Nik Unger5080f392017-03-13 10:16:58 -0700556 now = last->time_to_send;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000557 }
Johannes Naaba13d3102013-01-23 11:36:51 +0000558
Stephen Hemmingerbce552f2017-11-14 11:27:01 -0800559 delay += packet_time_ns(qdisc_pkt_len(skb), q);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000560 }
561
Patrick McHardy7c59e252007-03-23 11:27:45 -0700562 cb->time_to_send = now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 ++q->counter;
Eric Dumazet960fb662012-07-03 20:55:21 +0000564 tfifo_enqueue(skb, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 } else {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900566 /*
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700567 * Do re-ordering by putting one out of N packets at the front
568 * of the queue.
569 */
Dave Taht112f9cb2017-11-08 15:12:26 -0800570 cb->time_to_send = ktime_get_ns();
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700571 q->counter = 0;
Jarek Poplawski8ba25da2008-11-02 00:36:03 -0700572
Florian Westphal48da34b2016-09-18 00:57:34 +0200573 netem_enqueue_skb_head(&sch->q, skb);
Hagen Paul Pfeifereb101922012-01-04 17:35:26 +0000574 sch->qstats.requeues++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700575 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
Neil Horman6071bd12016-05-02 12:20:15 -0400577finish_segs:
578 if (segs) {
579 while (segs) {
580 skb2 = segs->next;
581 segs->next = NULL;
582 qdisc_skb_cb(segs)->pkt_len = segs->len;
583 last_len = segs->len;
Eric Dumazet520ac302016-06-21 23:16:49 -0700584 rc = qdisc_enqueue(segs, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400585 if (rc != NET_XMIT_SUCCESS) {
586 if (net_xmit_drop_count(rc))
587 qdisc_qstats_drop(sch);
588 } else {
589 nb++;
590 len += last_len;
591 }
592 segs = skb2;
593 }
594 sch->q.qlen += nb;
595 if (nb > 1)
596 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
597 }
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000598 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599}
600
Dave Taht836af832017-11-08 15:12:28 -0800601/* Delay the next round with a new future slot with a
602 * correct number of bytes and packets.
603 */
604
605static void get_slot_next(struct netem_sched_data *q, u64 now)
606{
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700607 s64 next_delay;
608
609 if (!q->slot_dist)
610 next_delay = q->slot_config.min_delay +
611 (prandom_u32() *
612 (q->slot_config.max_delay -
613 q->slot_config.min_delay) >> 32);
614 else
615 next_delay = tabledist(q->slot_config.dist_delay,
616 (s32)(q->slot_config.dist_jitter),
617 NULL, q->slot_dist);
618
619 q->slot.slot_next = now + next_delay;
Dave Taht836af832017-11-08 15:12:28 -0800620 q->slot.packets_left = q->slot_config.max_packets;
621 q->slot.bytes_left = q->slot_config.max_bytes;
622}
623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624static struct sk_buff *netem_dequeue(struct Qdisc *sch)
625{
626 struct netem_sched_data *q = qdisc_priv(sch);
627 struct sk_buff *skb;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700628 struct rb_node *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
Eric Dumazet50612532011-12-28 23:12:02 +0000630tfifo_dequeue:
Florian Westphaled760cb2016-09-18 00:57:33 +0200631 skb = __qdisc_dequeue_head(&sch->q);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700632 if (skb) {
John Fastabend25331d62014-09-28 11:53:29 -0700633 qdisc_qstats_backlog_dec(sch, skb);
Beshay, Joseph0ad2a832015-04-06 18:00:56 +0000634deliver:
Eric Dumazetaec0a402013-06-28 07:40:57 -0700635 qdisc_bstats_update(sch, skb);
636 return skb;
637 }
638 p = rb_first(&q->t_root);
639 if (p) {
Dave Taht112f9cb2017-11-08 15:12:26 -0800640 u64 time_to_send;
Dave Taht836af832017-11-08 15:12:28 -0800641 u64 now = ktime_get_ns();
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700642
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700643 skb = rb_to_skb(p);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700644
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700645 /* if more time remaining? */
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700646 time_to_send = netem_skb_cb(skb)->time_to_send;
Dave Taht836af832017-11-08 15:12:28 -0800647 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
648 get_slot_next(q, now);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700649
Dave Taht836af832017-11-08 15:12:28 -0800650 if (time_to_send <= now && q->slot.slot_next <= now) {
651 rb_erase(p, &q->t_root);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700652 sch->q.qlen--;
Beshay, Joseph0ad2a832015-04-06 18:00:56 +0000653 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700654 skb->next = NULL;
655 skb->prev = NULL;
Eric Dumazetbffa72c2017-09-19 05:14:24 -0700656 /* skb->dev shares skb->rbnode area,
657 * we need to restore its value.
658 */
659 skb->dev = qdisc_dev(sch);
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700660
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000661#ifdef CONFIG_NET_CLS_ACT
662 /*
663 * If it's at ingress let's pretend the delay is
664 * from the network (tstamp will be updated).
665 */
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500666 if (skb->tc_redirected && skb->tc_from_ingress)
Thomas Gleixner2456e852016-12-25 11:38:40 +0100667 skb->tstamp = 0;
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000668#endif
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000669
Dave Taht836af832017-11-08 15:12:28 -0800670 if (q->slot.slot_next) {
671 q->slot.packets_left--;
672 q->slot.bytes_left -= qdisc_pkt_len(skb);
673 if (q->slot.packets_left <= 0 ||
674 q->slot.bytes_left <= 0)
675 get_slot_next(q, now);
676 }
677
Eric Dumazet50612532011-12-28 23:12:02 +0000678 if (q->qdisc) {
Eric Dumazet21de12e2016-06-20 15:00:43 -0700679 unsigned int pkt_len = qdisc_pkt_len(skb);
Eric Dumazet520ac302016-06-21 23:16:49 -0700680 struct sk_buff *to_free = NULL;
681 int err;
Eric Dumazet50612532011-12-28 23:12:02 +0000682
Eric Dumazet520ac302016-06-21 23:16:49 -0700683 err = qdisc_enqueue(skb, q->qdisc, &to_free);
684 kfree_skb_list(to_free);
Eric Dumazet21de12e2016-06-20 15:00:43 -0700685 if (err != NET_XMIT_SUCCESS &&
686 net_xmit_drop_count(err)) {
687 qdisc_qstats_drop(sch);
688 qdisc_tree_reduce_backlog(sch, 1,
689 pkt_len);
Eric Dumazet50612532011-12-28 23:12:02 +0000690 }
691 goto tfifo_dequeue;
692 }
Eric Dumazetaec0a402013-06-28 07:40:57 -0700693 goto deliver;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700694 }
Stephen Hemminger11274e52007-03-22 12:17:42 -0700695
Eric Dumazet50612532011-12-28 23:12:02 +0000696 if (q->qdisc) {
697 skb = q->qdisc->ops->dequeue(q->qdisc);
698 if (skb)
699 goto deliver;
700 }
Dave Taht836af832017-11-08 15:12:28 -0800701
702 qdisc_watchdog_schedule_ns(&q->watchdog,
703 max(time_to_send,
704 q->slot.slot_next));
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700705 }
706
Eric Dumazet50612532011-12-28 23:12:02 +0000707 if (q->qdisc) {
708 skb = q->qdisc->ops->dequeue(q->qdisc);
709 if (skb)
710 goto deliver;
711 }
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700712 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713}
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715static void netem_reset(struct Qdisc *sch)
716{
717 struct netem_sched_data *q = qdisc_priv(sch);
718
Eric Dumazet50612532011-12-28 23:12:02 +0000719 qdisc_reset_queue(sch);
stephen hemmingerff704052013-10-06 15:16:49 -0700720 tfifo_reset(sch);
Eric Dumazet50612532011-12-28 23:12:02 +0000721 if (q->qdisc)
722 qdisc_reset(q->qdisc);
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700723 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724}
725
stephen hemminger6373a9a2011-02-23 13:04:18 +0000726static void dist_free(struct disttable *d)
727{
WANG Cong4cb28972014-06-02 15:55:22 -0700728 kvfree(d);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000729}
730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731/*
732 * Distribution data is a variable size payload containing
733 * signed 16 bit values.
734 */
Dave Taht836af832017-11-08 15:12:28 -0800735
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700736static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
737 const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738{
stephen hemminger6373a9a2011-02-23 13:04:18 +0000739 size_t n = nla_len(attr)/sizeof(__s16);
Patrick McHardy1e904742008-01-22 22:11:17 -0800740 const __s16 *data = nla_data(attr);
David S. Miller7698b4f2008-07-16 01:42:40 -0700741 spinlock_t *root_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 struct disttable *d;
743 int i;
744
stephen hemmingerdf173bd2011-02-23 13:04:19 +0000745 if (n > NETEM_DIST_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 return -EINVAL;
747
Michal Hocko752ade62017-05-08 15:57:27 -0700748 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 if (!d)
750 return -ENOMEM;
751
752 d->size = n;
753 for (i = 0; i < n; i++)
754 d->table[i] = data[i];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900755
Jarek Poplawski102396a2008-08-29 14:21:52 -0700756 root_lock = qdisc_root_sleeping_lock(sch);
David S. Miller7698b4f2008-07-16 01:42:40 -0700757
758 spin_lock_bh(root_lock);
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700759 swap(*tbl, d);
David S. Miller7698b4f2008-07-16 01:42:40 -0700760 spin_unlock_bh(root_lock);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000761
762 dist_free(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 return 0;
764}
765
Dave Taht836af832017-11-08 15:12:28 -0800766static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
767{
768 const struct tc_netem_slot *c = nla_data(attr);
769
770 q->slot_config = *c;
771 if (q->slot_config.max_packets == 0)
772 q->slot_config.max_packets = INT_MAX;
773 if (q->slot_config.max_bytes == 0)
774 q->slot_config.max_bytes = INT_MAX;
775 q->slot.packets_left = q->slot_config.max_packets;
776 q->slot.bytes_left = q->slot_config.max_bytes;
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700777 if (q->slot_config.min_delay | q->slot_config.max_delay |
778 q->slot_config.dist_jitter)
Dave Taht836af832017-11-08 15:12:28 -0800779 q->slot.slot_next = ktime_get_ns();
780 else
781 q->slot.slot_next = 0;
782}
783
Yang Yingliang49545a772014-02-14 10:30:42 +0800784static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785{
Patrick McHardy1e904742008-01-22 22:11:17 -0800786 const struct tc_netem_corr *c = nla_data(attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 init_crandom(&q->delay_cor, c->delay_corr);
789 init_crandom(&q->loss_cor, c->loss_corr);
790 init_crandom(&q->dup_cor, c->dup_corr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791}
792
Yang Yingliang49545a772014-02-14 10:30:42 +0800793static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700794{
Patrick McHardy1e904742008-01-22 22:11:17 -0800795 const struct tc_netem_reorder *r = nla_data(attr);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700796
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700797 q->reorder = r->probability;
798 init_crandom(&q->reorder_cor, r->correlation);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700799}
800
Yang Yingliang49545a772014-02-14 10:30:42 +0800801static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800802{
Patrick McHardy1e904742008-01-22 22:11:17 -0800803 const struct tc_netem_corrupt *r = nla_data(attr);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800804
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800805 q->corrupt = r->probability;
806 init_crandom(&q->corrupt_cor, r->correlation);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800807}
808
Yang Yingliang49545a772014-02-14 10:30:42 +0800809static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000810{
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000811 const struct tc_netem_rate *r = nla_data(attr);
812
813 q->rate = r->rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000814 q->packet_overhead = r->packet_overhead;
815 q->cell_size = r->cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100816 q->cell_overhead = r->cell_overhead;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000817 if (q->cell_size)
818 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100819 else
820 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000821}
822
Yang Yingliang49545a772014-02-14 10:30:42 +0800823static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
stephen hemminger661b7972011-02-23 13:04:21 +0000824{
stephen hemminger661b7972011-02-23 13:04:21 +0000825 const struct nlattr *la;
826 int rem;
827
828 nla_for_each_nested(la, attr, rem) {
829 u16 type = nla_type(la);
830
Yang Yingliang833fa742013-12-10 20:55:32 +0800831 switch (type) {
stephen hemminger661b7972011-02-23 13:04:21 +0000832 case NETEM_LOSS_GI: {
833 const struct tc_netem_gimodel *gi = nla_data(la);
834
stephen hemminger24946542011-12-23 09:16:30 +0000835 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
stephen hemminger661b7972011-02-23 13:04:21 +0000836 pr_info("netem: incorrect gi model size\n");
837 return -EINVAL;
838 }
839
840 q->loss_model = CLG_4_STATES;
841
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800842 q->clg.state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000843 q->clg.a1 = gi->p13;
844 q->clg.a2 = gi->p31;
845 q->clg.a3 = gi->p32;
846 q->clg.a4 = gi->p14;
847 q->clg.a5 = gi->p23;
848 break;
849 }
850
851 case NETEM_LOSS_GE: {
852 const struct tc_netem_gemodel *ge = nla_data(la);
853
stephen hemminger24946542011-12-23 09:16:30 +0000854 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
855 pr_info("netem: incorrect ge model size\n");
stephen hemminger661b7972011-02-23 13:04:21 +0000856 return -EINVAL;
857 }
858
859 q->loss_model = CLG_GILB_ELL;
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800860 q->clg.state = GOOD_STATE;
stephen hemminger661b7972011-02-23 13:04:21 +0000861 q->clg.a1 = ge->p;
862 q->clg.a2 = ge->r;
863 q->clg.a3 = ge->h;
864 q->clg.a4 = ge->k1;
865 break;
866 }
867
868 default:
869 pr_info("netem: unknown loss type %u\n", type);
870 return -EINVAL;
871 }
872 }
873
874 return 0;
875}
876
Patrick McHardy27a34212008-01-23 20:35:39 -0800877static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
878 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
879 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
880 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000881 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
stephen hemminger661b7972011-02-23 13:04:21 +0000882 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
Eric Dumazete4ae0042012-04-30 23:11:05 +0000883 [TCA_NETEM_ECN] = { .type = NLA_U32 },
Yang Yingliang6a031f62013-12-25 17:35:15 +0800884 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
Dave Taht99803172017-11-08 15:12:27 -0800885 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
886 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
Dave Taht836af832017-11-08 15:12:28 -0800887 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
Patrick McHardy27a34212008-01-23 20:35:39 -0800888};
889
Thomas Graf2c10b322008-09-02 17:30:27 -0700890static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
891 const struct nla_policy *policy, int len)
892{
893 int nested_len = nla_len(nla) - NLA_ALIGN(len);
894
stephen hemminger661b7972011-02-23 13:04:21 +0000895 if (nested_len < 0) {
896 pr_info("netem: invalid attributes len %d\n", nested_len);
Thomas Graf2c10b322008-09-02 17:30:27 -0700897 return -EINVAL;
stephen hemminger661b7972011-02-23 13:04:21 +0000898 }
899
Thomas Graf2c10b322008-09-02 17:30:27 -0700900 if (nested_len >= nla_attr_size(0))
901 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
Johannes Bergfceb6432017-04-12 14:34:07 +0200902 nested_len, policy, NULL);
stephen hemminger661b7972011-02-23 13:04:21 +0000903
Thomas Graf2c10b322008-09-02 17:30:27 -0700904 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
905 return 0;
906}
907
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800908/* Parse netlink message to set options */
Alexander Aring20307212017-12-20 12:35:14 -0500909static int netem_change(struct Qdisc *sch, struct nlattr *opt,
910 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911{
912 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardyb03f4672008-01-23 20:32:21 -0800913 struct nlattr *tb[TCA_NETEM_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 struct tc_netem_qopt *qopt;
Yang Yingliang54a4b052014-02-14 10:30:41 +0800915 struct clgstate old_clg;
916 int old_loss_model = CLG_RANDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 int ret;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900918
Patrick McHardyb03f4672008-01-23 20:32:21 -0800919 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return -EINVAL;
921
Thomas Graf2c10b322008-09-02 17:30:27 -0700922 qopt = nla_data(opt);
923 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
Patrick McHardyb03f4672008-01-23 20:32:21 -0800924 if (ret < 0)
925 return ret;
926
Yang Yingliang54a4b052014-02-14 10:30:41 +0800927 /* backup q->clg and q->loss_model */
928 old_clg = q->clg;
929 old_loss_model = q->loss_model;
930
931 if (tb[TCA_NETEM_LOSS]) {
Yang Yingliang49545a772014-02-14 10:30:42 +0800932 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
Yang Yingliang54a4b052014-02-14 10:30:41 +0800933 if (ret) {
934 q->loss_model = old_loss_model;
935 return ret;
936 }
937 } else {
938 q->loss_model = CLG_RANDOM;
939 }
940
941 if (tb[TCA_NETEM_DELAY_DIST]) {
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700942 ret = get_dist_table(sch, &q->delay_dist,
943 tb[TCA_NETEM_DELAY_DIST]);
944 if (ret)
945 goto get_table_failure;
946 }
947
948 if (tb[TCA_NETEM_SLOT_DIST]) {
949 ret = get_dist_table(sch, &q->slot_dist,
950 tb[TCA_NETEM_SLOT_DIST]);
951 if (ret)
952 goto get_table_failure;
Yang Yingliang54a4b052014-02-14 10:30:41 +0800953 }
954
Eric Dumazet50612532011-12-28 23:12:02 +0000955 sch->limit = qopt->limit;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900956
Dave Taht112f9cb2017-11-08 15:12:26 -0800957 q->latency = PSCHED_TICKS2NS(qopt->latency);
958 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 q->limit = qopt->limit;
960 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700961 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 q->loss = qopt->loss;
963 q->duplicate = qopt->duplicate;
964
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700965 /* for compatibility with earlier versions.
966 * if gap is set, need to assume 100% probability
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700967 */
Stephen Hemmingera362e0a2007-03-22 12:15:45 -0700968 if (q->gap)
969 q->reorder = ~0;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700970
Stephen Hemminger265eb672008-11-03 21:13:26 -0800971 if (tb[TCA_NETEM_CORR])
Yang Yingliang49545a772014-02-14 10:30:42 +0800972 get_correlation(q, tb[TCA_NETEM_CORR]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
Stephen Hemminger265eb672008-11-03 21:13:26 -0800974 if (tb[TCA_NETEM_REORDER])
Yang Yingliang49545a772014-02-14 10:30:42 +0800975 get_reorder(q, tb[TCA_NETEM_REORDER]);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800976
Stephen Hemminger265eb672008-11-03 21:13:26 -0800977 if (tb[TCA_NETEM_CORRUPT])
Yang Yingliang49545a772014-02-14 10:30:42 +0800978 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000980 if (tb[TCA_NETEM_RATE])
Yang Yingliang49545a772014-02-14 10:30:42 +0800981 get_rate(q, tb[TCA_NETEM_RATE]);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000982
Yang Yingliang6a031f62013-12-25 17:35:15 +0800983 if (tb[TCA_NETEM_RATE64])
984 q->rate = max_t(u64, q->rate,
985 nla_get_u64(tb[TCA_NETEM_RATE64]));
986
Dave Taht99803172017-11-08 15:12:27 -0800987 if (tb[TCA_NETEM_LATENCY64])
988 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
989
990 if (tb[TCA_NETEM_JITTER64])
991 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
992
Eric Dumazete4ae0042012-04-30 23:11:05 +0000993 if (tb[TCA_NETEM_ECN])
994 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
995
Dave Taht836af832017-11-08 15:12:28 -0800996 if (tb[TCA_NETEM_SLOT])
997 get_slot(q, tb[TCA_NETEM_SLOT]);
998
stephen hemminger661b7972011-02-23 13:04:21 +0000999 return ret;
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -07001000
1001get_table_failure:
1002 /* recover clg and loss_model, in case of
1003 * q->clg and q->loss_model were modified
1004 * in get_loss_clg()
1005 */
1006 q->clg = old_clg;
1007 q->loss_model = old_loss_model;
1008 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009}
1010
Alexander Aringe63d7df2017-12-20 12:35:13 -05001011static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1012 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013{
1014 struct netem_sched_data *q = qdisc_priv(sch);
1015 int ret;
1016
Nikolay Aleksandrov634576a2017-08-30 12:49:03 +03001017 qdisc_watchdog_init(&q->watchdog, sch);
1018
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 if (!opt)
1020 return -EINVAL;
1021
stephen hemminger661b7972011-02-23 13:04:21 +00001022 q->loss_model = CLG_RANDOM;
Alexander Aring20307212017-12-20 12:35:14 -05001023 ret = netem_change(sch, opt, extack);
Eric Dumazet50612532011-12-28 23:12:02 +00001024 if (ret)
stephen hemminger250a65f2011-02-23 13:04:22 +00001025 pr_info("netem: change failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 return ret;
1027}
1028
1029static void netem_destroy(struct Qdisc *sch)
1030{
1031 struct netem_sched_data *q = qdisc_priv(sch);
1032
Patrick McHardy59cb5c62007-03-16 01:20:31 -07001033 qdisc_watchdog_cancel(&q->watchdog);
Eric Dumazet50612532011-12-28 23:12:02 +00001034 if (q->qdisc)
1035 qdisc_destroy(q->qdisc);
stephen hemminger6373a9a2011-02-23 13:04:18 +00001036 dist_free(q->delay_dist);
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -07001037 dist_free(q->slot_dist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038}
1039
stephen hemminger661b7972011-02-23 13:04:21 +00001040static int dump_loss_model(const struct netem_sched_data *q,
1041 struct sk_buff *skb)
1042{
1043 struct nlattr *nest;
1044
1045 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
1046 if (nest == NULL)
1047 goto nla_put_failure;
1048
1049 switch (q->loss_model) {
1050 case CLG_RANDOM:
1051 /* legacy loss model */
1052 nla_nest_cancel(skb, nest);
1053 return 0; /* no data */
1054
1055 case CLG_4_STATES: {
1056 struct tc_netem_gimodel gi = {
1057 .p13 = q->clg.a1,
1058 .p31 = q->clg.a2,
1059 .p32 = q->clg.a3,
1060 .p14 = q->clg.a4,
1061 .p23 = q->clg.a5,
1062 };
1063
David S. Miller1b34ec42012-03-29 05:11:39 -04001064 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1065 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +00001066 break;
1067 }
1068 case CLG_GILB_ELL: {
1069 struct tc_netem_gemodel ge = {
1070 .p = q->clg.a1,
1071 .r = q->clg.a2,
1072 .h = q->clg.a3,
1073 .k1 = q->clg.a4,
1074 };
1075
David S. Miller1b34ec42012-03-29 05:11:39 -04001076 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1077 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +00001078 break;
1079 }
1080 }
1081
1082 nla_nest_end(skb, nest);
1083 return 0;
1084
1085nla_put_failure:
1086 nla_nest_cancel(skb, nest);
1087 return -1;
1088}
1089
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1091{
1092 const struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger861d7f72011-02-23 13:04:17 +00001093 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 struct tc_netem_qopt qopt;
1095 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001096 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001097 struct tc_netem_corrupt corrupt;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001098 struct tc_netem_rate rate;
Dave Taht836af832017-11-08 15:12:28 -08001099 struct tc_netem_slot slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
Dave Taht112f9cb2017-11-08 15:12:26 -08001101 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1102 UINT_MAX);
1103 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1104 UINT_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 qopt.limit = q->limit;
1106 qopt.loss = q->loss;
1107 qopt.gap = q->gap;
1108 qopt.duplicate = q->duplicate;
David S. Miller1b34ec42012-03-29 05:11:39 -04001109 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1110 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
Dave Taht99803172017-11-08 15:12:27 -08001112 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1113 goto nla_put_failure;
1114
1115 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1116 goto nla_put_failure;
1117
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 cor.delay_corr = q->delay_cor.rho;
1119 cor.loss_corr = q->loss_cor.rho;
1120 cor.dup_corr = q->dup_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001121 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1122 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001123
1124 reorder.probability = q->reorder;
1125 reorder.correlation = q->reorder_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001126 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1127 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001128
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001129 corrupt.probability = q->corrupt;
1130 corrupt.correlation = q->corrupt_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001131 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1132 goto nla_put_failure;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001133
Yang Yingliang6a031f62013-12-25 17:35:15 +08001134 if (q->rate >= (1ULL << 32)) {
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001135 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1136 TCA_NETEM_PAD))
Yang Yingliang6a031f62013-12-25 17:35:15 +08001137 goto nla_put_failure;
1138 rate.rate = ~0U;
1139 } else {
1140 rate.rate = q->rate;
1141 }
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +00001142 rate.packet_overhead = q->packet_overhead;
1143 rate.cell_size = q->cell_size;
1144 rate.cell_overhead = q->cell_overhead;
David S. Miller1b34ec42012-03-29 05:11:39 -04001145 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1146 goto nla_put_failure;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001147
Eric Dumazete4ae0042012-04-30 23:11:05 +00001148 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1149 goto nla_put_failure;
1150
stephen hemminger661b7972011-02-23 13:04:21 +00001151 if (dump_loss_model(q, skb) != 0)
1152 goto nla_put_failure;
1153
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -07001154 if (q->slot_config.min_delay | q->slot_config.max_delay |
1155 q->slot_config.dist_jitter) {
Dave Taht836af832017-11-08 15:12:28 -08001156 slot = q->slot_config;
1157 if (slot.max_packets == INT_MAX)
1158 slot.max_packets = 0;
1159 if (slot.max_bytes == INT_MAX)
1160 slot.max_bytes = 0;
1161 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1162 goto nla_put_failure;
1163 }
1164
stephen hemminger861d7f72011-02-23 13:04:17 +00001165 return nla_nest_end(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
Patrick McHardy1e904742008-01-22 22:11:17 -08001167nla_put_failure:
stephen hemminger861d7f72011-02-23 13:04:17 +00001168 nlmsg_trim(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 return -1;
1170}
1171
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001172static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1173 struct sk_buff *skb, struct tcmsg *tcm)
1174{
1175 struct netem_sched_data *q = qdisc_priv(sch);
1176
Eric Dumazet50612532011-12-28 23:12:02 +00001177 if (cl != 1 || !q->qdisc) /* only one class */
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001178 return -ENOENT;
1179
1180 tcm->tcm_handle |= TC_H_MIN(1);
1181 tcm->tcm_info = q->qdisc->handle;
1182
1183 return 0;
1184}
1185
1186static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -05001187 struct Qdisc **old, struct netlink_ext_ack *extack)
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001188{
1189 struct netem_sched_data *q = qdisc_priv(sch);
1190
WANG Cong86a79962016-02-25 14:55:00 -08001191 *old = qdisc_replace(sch, new, &q->qdisc);
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001192 return 0;
1193}
1194
1195static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1196{
1197 struct netem_sched_data *q = qdisc_priv(sch);
1198 return q->qdisc;
1199}
1200
WANG Cong143976c2017-08-24 16:51:29 -07001201static unsigned long netem_find(struct Qdisc *sch, u32 classid)
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001202{
1203 return 1;
1204}
1205
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001206static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1207{
1208 if (!walker->stop) {
1209 if (walker->count >= walker->skip)
1210 if (walker->fn(sch, 1, walker) < 0) {
1211 walker->stop = 1;
1212 return;
1213 }
1214 walker->count++;
1215 }
1216}
1217
1218static const struct Qdisc_class_ops netem_class_ops = {
1219 .graft = netem_graft,
1220 .leaf = netem_leaf,
WANG Cong143976c2017-08-24 16:51:29 -07001221 .find = netem_find,
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001222 .walk = netem_walk,
1223 .dump = netem_dump_class,
1224};
1225
Eric Dumazet20fea082007-11-14 01:44:41 -08001226static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 .id = "netem",
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001228 .cl_ops = &netem_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 .priv_size = sizeof(struct netem_sched_data),
1230 .enqueue = netem_enqueue,
1231 .dequeue = netem_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07001232 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 .init = netem_init,
1234 .reset = netem_reset,
1235 .destroy = netem_destroy,
1236 .change = netem_change,
1237 .dump = netem_dump,
1238 .owner = THIS_MODULE,
1239};
1240
1241
1242static int __init netem_module_init(void)
1243{
Stephen Hemmingereb229c42005-11-03 13:49:01 -08001244 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 return register_qdisc(&netem_qdisc_ops);
1246}
1247static void __exit netem_module_exit(void)
1248{
1249 unregister_qdisc(&netem_qdisc_ops);
1250}
1251module_init(netem_module_init)
1252module_exit(netem_module_exit)
1253MODULE_LICENSE("GPL");