blob: e83d61ca78cab71c7a54c22b7d09cc94611726a8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
Stephen Hemminger798b6b12006-10-22 20:16:57 -07007 * 2 of the License.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Many of the algorithms and ideas for this came from
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * NIST Net which is not copyrighted.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000016#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/skbuff.h>
David S. Miller78776d32011-02-24 22:48:13 -080023#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/rtnetlink.h>
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000025#include <linux/reciprocal_div.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070027#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <net/pkt_sched.h>
29
stephen hemminger250a65f2011-02-23 13:04:22 +000030#define VERSION "1.3"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080031
Linus Torvalds1da177e2005-04-16 15:20:36 -070032/* Network Emulation Queuing algorithm.
33 ====================================
34
35 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
36 Network Emulation Tool
37 [2] Luigi Rizzo, DummyNet for FreeBSD
38
39 ----------------------------------------------------------------
40
41 This started out as a simple way to delay outgoing packets to
42 test TCP but has grown to include most of the functionality
43 of a full blown network emulator like NISTnet. It can delay
44 packets and add random jitter (and correlation). The random
45 distribution can be loaded from a table as well to provide
46 normal, Pareto, or experimental curves. Packet loss,
47 duplication, and reordering can also be emulated.
48
49 This qdisc does not do classification that can be handled in
50 layering other disciplines. It does not need to do bandwidth
51 control either since that can be handled by using token
52 bucket or other rate control.
stephen hemminger661b7972011-02-23 13:04:21 +000053
54 Correlated Loss Generator models
55
56 Added generation of correlated loss according to the
57 "Gilbert-Elliot" model, a 4-state markov model.
58
59 References:
60 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
61 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
62 and intuitive loss model for packet networks and its implementation
63 in the Netem module in the Linux kernel", available in [1]
64
65 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
66 Fabio Ludovici <fabio.ludovici at yahoo.it>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067*/
68
69struct netem_sched_data {
Eric Dumazet50612532011-12-28 23:12:02 +000070 /* internal t(ime)fifo qdisc uses sch->q and sch->limit */
71
72 /* optional qdisc for classful handling (NULL at netem init) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 struct Qdisc *qdisc;
Eric Dumazet50612532011-12-28 23:12:02 +000074
Patrick McHardy59cb5c62007-03-16 01:20:31 -070075 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Stephen Hemmingerb4076212007-03-22 12:16:21 -070077 psched_tdiff_t latency;
78 psched_tdiff_t jitter;
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 u32 loss;
81 u32 limit;
82 u32 counter;
83 u32 gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070085 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080086 u32 corrupt;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +000087 u32 rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000088 s32 packet_overhead;
89 u32 cell_size;
90 u32 cell_size_reciprocal;
91 s32 cell_overhead;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
93 struct crndstate {
Stephen Hemmingerb4076212007-03-22 12:16:21 -070094 u32 last;
95 u32 rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080096 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98 struct disttable {
99 u32 size;
100 s16 table[0];
101 } *delay_dist;
stephen hemminger661b7972011-02-23 13:04:21 +0000102
103 enum {
104 CLG_RANDOM,
105 CLG_4_STATES,
106 CLG_GILB_ELL,
107 } loss_model;
108
109 /* Correlated Loss Generation models */
110 struct clgstate {
111 /* state of the Markov chain */
112 u8 state;
113
114 /* 4-states and Gilbert-Elliot models */
115 u32 a1; /* p13 for 4-states or p for GE */
116 u32 a2; /* p31 for 4-states or r for GE */
117 u32 a3; /* p32 for 4-states or h for GE */
118 u32 a4; /* p14 for 4-states or 1-k for GE */
119 u32 a5; /* p23 used only in 4-states */
120 } clg;
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122};
123
Eric Dumazet50612532011-12-28 23:12:02 +0000124/* Time stamp put into socket buffer control block
125 * Only valid when skbs are in our internal t(ime)fifo queue.
126 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127struct netem_skb_cb {
128 psched_time_t time_to_send;
129};
130
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700131static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
132{
David S. Miller16bda132012-02-06 15:14:37 -0500133 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700134 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700135}
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/* init_crandom - initialize correlated random number generator
138 * Use entropy source for initial seed.
139 */
140static void init_crandom(struct crndstate *state, unsigned long rho)
141{
142 state->rho = rho;
143 state->last = net_random();
144}
145
146/* get_crandom - correlated random number generator
147 * Next number depends on last value.
148 * rho is scaled to avoid floating point.
149 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700150static u32 get_crandom(struct crndstate *state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151{
152 u64 value, rho;
153 unsigned long answer;
154
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700155 if (state->rho == 0) /* no correlation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 return net_random();
157
158 value = net_random();
159 rho = (u64)state->rho + 1;
160 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
161 state->last = answer;
162 return answer;
163}
164
stephen hemminger661b7972011-02-23 13:04:21 +0000165/* loss_4state - 4-state model loss generator
166 * Generates losses according to the 4-state Markov chain adopted in
167 * the GI (General and Intuitive) loss model.
168 */
169static bool loss_4state(struct netem_sched_data *q)
170{
171 struct clgstate *clg = &q->clg;
172 u32 rnd = net_random();
173
174 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300175 * Makes a comparison between rnd and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000176 * probabilities outgoing from the current state, then decides the
177 * next state and if the next packet has to be transmitted or lost.
178 * The four states correspond to:
179 * 1 => successfully transmitted packets within a gap period
180 * 4 => isolated losses within a gap period
181 * 3 => lost packets within a burst period
182 * 2 => successfully transmitted packets within a burst period
183 */
184 switch (clg->state) {
185 case 1:
186 if (rnd < clg->a4) {
187 clg->state = 4;
188 return true;
189 } else if (clg->a4 < rnd && rnd < clg->a1) {
190 clg->state = 3;
191 return true;
192 } else if (clg->a1 < rnd)
193 clg->state = 1;
194
195 break;
196 case 2:
197 if (rnd < clg->a5) {
198 clg->state = 3;
199 return true;
200 } else
201 clg->state = 2;
202
203 break;
204 case 3:
205 if (rnd < clg->a3)
206 clg->state = 2;
207 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
208 clg->state = 1;
209 return true;
210 } else if (clg->a2 + clg->a3 < rnd) {
211 clg->state = 3;
212 return true;
213 }
214 break;
215 case 4:
216 clg->state = 1;
217 break;
218 }
219
220 return false;
221}
222
223/* loss_gilb_ell - Gilbert-Elliot model loss generator
224 * Generates losses according to the Gilbert-Elliot loss model or
225 * its special cases (Gilbert or Simple Gilbert)
226 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300227 * Makes a comparison between random number and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000228 * probabilities outgoing from the current state, then decides the
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300229 * next state. A second random number is extracted and the comparison
stephen hemminger661b7972011-02-23 13:04:21 +0000230 * with the loss probability of the current state decides if the next
231 * packet will be transmitted or lost.
232 */
233static bool loss_gilb_ell(struct netem_sched_data *q)
234{
235 struct clgstate *clg = &q->clg;
236
237 switch (clg->state) {
238 case 1:
239 if (net_random() < clg->a1)
240 clg->state = 2;
241 if (net_random() < clg->a4)
242 return true;
243 case 2:
244 if (net_random() < clg->a2)
245 clg->state = 1;
246 if (clg->a3 > net_random())
247 return true;
248 }
249
250 return false;
251}
252
253static bool loss_event(struct netem_sched_data *q)
254{
255 switch (q->loss_model) {
256 case CLG_RANDOM:
257 /* Random packet drop 0 => none, ~0 => all */
258 return q->loss && q->loss >= get_crandom(&q->loss_cor);
259
260 case CLG_4_STATES:
261 /* 4state loss model algorithm (used also for GI model)
262 * Extracts a value from the markov 4 state loss generator,
263 * if it is 1 drops a packet and if needed writes the event in
264 * the kernel logs
265 */
266 return loss_4state(q);
267
268 case CLG_GILB_ELL:
269 /* Gilbert-Elliot loss model algorithm
270 * Extracts a value from the Gilbert-Elliot loss generator,
271 * if it is 1 drops a packet and if needed writes the event in
272 * the kernel logs
273 */
274 return loss_gilb_ell(q);
275 }
276
277 return false; /* not reached */
278}
279
280
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281/* tabledist - return a pseudo-randomly distributed value with mean mu and
282 * std deviation sigma. Uses table lookup to approximate the desired
283 * distribution, and a uniformly-distributed pseudo-random source.
284 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700285static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
286 struct crndstate *state,
287 const struct disttable *dist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288{
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700289 psched_tdiff_t x;
290 long t;
291 u32 rnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
293 if (sigma == 0)
294 return mu;
295
296 rnd = get_crandom(state);
297
298 /* default uniform distribution */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900299 if (dist == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 return (rnd % (2*sigma)) - sigma + mu;
301
302 t = dist->table[rnd % dist->size];
303 x = (sigma % NETEM_DIST_SCALE) * t;
304 if (x >= 0)
305 x += NETEM_DIST_SCALE/2;
306 else
307 x -= NETEM_DIST_SCALE/2;
308
309 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
310}
311
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000312static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000313{
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000314 u64 ticks;
Eric Dumazetfc33cc72011-11-30 23:32:14 +0000315
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000316 len += q->packet_overhead;
317
318 if (q->cell_size) {
319 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
320
321 if (len > cells * q->cell_size) /* extra cell needed for remainder */
322 cells++;
323 len = cells * (q->cell_size + q->cell_overhead);
324 }
325
326 ticks = (u64)len * NSEC_PER_SEC;
327
328 do_div(ticks, q->rate);
Eric Dumazetfc33cc72011-11-30 23:32:14 +0000329 return PSCHED_NS2TICKS(ticks);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000330}
331
Eric Dumazet50612532011-12-28 23:12:02 +0000332static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
333{
334 struct sk_buff_head *list = &sch->q;
335 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
336 struct sk_buff *skb;
337
338 if (likely(skb_queue_len(list) < sch->limit)) {
339 skb = skb_peek_tail(list);
340 /* Optimize for add at tail */
341 if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
342 return qdisc_enqueue_tail(nskb, sch);
343
344 skb_queue_reverse_walk(list, skb) {
345 if (tnext >= netem_skb_cb(skb)->time_to_send)
346 break;
347 }
348
349 __skb_queue_after(list, skb, nskb);
350 sch->qstats.backlog += qdisc_pkt_len(nskb);
351 return NET_XMIT_SUCCESS;
352 }
353
354 return qdisc_reshape_fail(nskb, sch);
355}
356
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700357/*
358 * Insert one skb into qdisc.
359 * Note: parent depends on return value to account for queue length.
360 * NET_XMIT_DROP: queue length didn't change.
361 * NET_XMIT_SUCCESS: one skb was queued.
362 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
364{
365 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700366 /* We don't fill cb now as skb_unshare() may invalidate it */
367 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700368 struct sk_buff *skb2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 int ret;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700370 int count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700372 /* Random duplication */
373 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
374 ++count;
375
stephen hemminger661b7972011-02-23 13:04:21 +0000376 /* Drop packet? */
377 if (loss_event(q))
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700378 --count;
379
380 if (count == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 sch->qstats.drops++;
382 kfree_skb(skb);
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700383 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 }
385
David S. Miller4e8a5202006-10-22 21:00:33 -0700386 skb_orphan(skb);
387
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700388 /*
389 * If we need to duplicate packet, then re-insert at top of the
390 * qdisc tree, since parent queuer expects that only one
391 * skb will be queued.
392 */
393 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
David S. Miller7698b4f2008-07-16 01:42:40 -0700394 struct Qdisc *rootq = qdisc_root(sch);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700395 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
396 q->duplicate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700398 qdisc_enqueue_root(skb2, rootq);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700399 q->duplicate = dupsave;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 }
401
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800402 /*
403 * Randomized packet corruption.
404 * Make copy if needed since we are modifying
405 * If packet is going to be hardware checksummed, then
406 * do it now in software before we mangle it.
407 */
408 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
Joe Perchesf64f9e72009-11-29 16:55:45 -0800409 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
410 (skb->ip_summed == CHECKSUM_PARTIAL &&
411 skb_checksum_help(skb))) {
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800412 sch->qstats.drops++;
413 return NET_XMIT_DROP;
414 }
415
416 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
417 }
418
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700419 cb = netem_skb_cb(skb);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000420 if (q->gap == 0 || /* not doing reordering */
Vijay Subramaniana42b4792012-01-19 10:20:59 +0000421 q->counter < q->gap - 1 || /* inside last reordering gap */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800422 q->reorder < get_crandom(&q->reorder_cor)) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700423 psched_time_t now;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800424 psched_tdiff_t delay;
425
426 delay = tabledist(q->latency, q->jitter,
427 &q->delay_cor, q->delay_dist);
428
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700429 now = psched_get_time();
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000430
431 if (q->rate) {
Eric Dumazet50612532011-12-28 23:12:02 +0000432 struct sk_buff_head *list = &sch->q;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000433
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000434 delay += packet_len_2_sched_time(skb->len, q);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000435
436 if (!skb_queue_empty(list)) {
437 /*
438 * Last packet in queue is reference point (now).
439 * First packet in queue is already in flight,
440 * calculate this time bonus and substract
441 * from delay.
442 */
443 delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
444 now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
445 }
446 }
447
Patrick McHardy7c59e252007-03-23 11:27:45 -0700448 cb->time_to_send = now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 ++q->counter;
Eric Dumazet50612532011-12-28 23:12:02 +0000450 ret = tfifo_enqueue(skb, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 } else {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900452 /*
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700453 * Do re-ordering by putting one out of N packets at the front
454 * of the queue.
455 */
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700456 cb->time_to_send = psched_get_time();
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700457 q->counter = 0;
Jarek Poplawski8ba25da2008-11-02 00:36:03 -0700458
Eric Dumazet50612532011-12-28 23:12:02 +0000459 __skb_queue_head(&sch->q, skb);
Hagen Paul Pfeifereb101922012-01-04 17:35:26 +0000460 sch->qstats.backlog += qdisc_pkt_len(skb);
461 sch->qstats.requeues++;
Jarek Poplawski8ba25da2008-11-02 00:36:03 -0700462 ret = NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 }
464
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000465 if (ret != NET_XMIT_SUCCESS) {
466 if (net_xmit_drop_count(ret)) {
467 sch->qstats.drops++;
468 return ret;
469 }
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700470 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000472 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473}
474
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000475static unsigned int netem_drop(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476{
477 struct netem_sched_data *q = qdisc_priv(sch);
Eric Dumazet50612532011-12-28 23:12:02 +0000478 unsigned int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
Eric Dumazet50612532011-12-28 23:12:02 +0000480 len = qdisc_queue_drop(sch);
481 if (!len && q->qdisc && q->qdisc->ops->drop)
482 len = q->qdisc->ops->drop(q->qdisc);
483 if (len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 sch->qstats.drops++;
Eric Dumazet50612532011-12-28 23:12:02 +0000485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 return len;
487}
488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489static struct sk_buff *netem_dequeue(struct Qdisc *sch)
490{
491 struct netem_sched_data *q = qdisc_priv(sch);
492 struct sk_buff *skb;
493
Eric Dumazetfd245a42011-01-20 05:27:16 +0000494 if (qdisc_is_throttled(sch))
Stephen Hemminger11274e52007-03-22 12:17:42 -0700495 return NULL;
496
Eric Dumazet50612532011-12-28 23:12:02 +0000497tfifo_dequeue:
498 skb = qdisc_peek_head(sch);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700499 if (skb) {
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700500 const struct netem_skb_cb *cb = netem_skb_cb(skb);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700501
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700502 /* if more time remaining? */
Eric Dumazet50612532011-12-28 23:12:02 +0000503 if (cb->time_to_send <= psched_get_time()) {
504 skb = qdisc_dequeue_tail(sch);
Jarek Poplawski77be1552008-10-31 00:47:01 -0700505 if (unlikely(!skb))
Eric Dumazet50612532011-12-28 23:12:02 +0000506 goto qdisc_dequeue;
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700507
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000508#ifdef CONFIG_NET_CLS_ACT
509 /*
510 * If it's at ingress let's pretend the delay is
511 * from the network (tstamp will be updated).
512 */
513 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
514 skb->tstamp.tv64 = 0;
515#endif
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000516
Eric Dumazet50612532011-12-28 23:12:02 +0000517 if (q->qdisc) {
518 int err = qdisc_enqueue(skb, q->qdisc);
519
520 if (unlikely(err != NET_XMIT_SUCCESS)) {
521 if (net_xmit_drop_count(err)) {
522 sch->qstats.drops++;
523 qdisc_tree_decrease_qlen(sch, 1);
524 }
525 }
526 goto tfifo_dequeue;
527 }
528deliver:
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000529 qdisc_unthrottled(sch);
530 qdisc_bstats_update(sch, skb);
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700531 return skb;
532 }
Stephen Hemminger11274e52007-03-22 12:17:42 -0700533
Eric Dumazet50612532011-12-28 23:12:02 +0000534 if (q->qdisc) {
535 skb = q->qdisc->ops->dequeue(q->qdisc);
536 if (skb)
537 goto deliver;
538 }
Stephen Hemminger11274e52007-03-22 12:17:42 -0700539 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700540 }
541
Eric Dumazet50612532011-12-28 23:12:02 +0000542qdisc_dequeue:
543 if (q->qdisc) {
544 skb = q->qdisc->ops->dequeue(q->qdisc);
545 if (skb)
546 goto deliver;
547 }
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700548 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549}
550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551static void netem_reset(struct Qdisc *sch)
552{
553 struct netem_sched_data *q = qdisc_priv(sch);
554
Eric Dumazet50612532011-12-28 23:12:02 +0000555 qdisc_reset_queue(sch);
556 if (q->qdisc)
557 qdisc_reset(q->qdisc);
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700558 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559}
560
stephen hemminger6373a9a2011-02-23 13:04:18 +0000561static void dist_free(struct disttable *d)
562{
563 if (d) {
564 if (is_vmalloc_addr(d))
565 vfree(d);
566 else
567 kfree(d);
568 }
569}
570
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571/*
572 * Distribution data is a variable size payload containing
573 * signed 16 bit values.
574 */
Patrick McHardy1e904742008-01-22 22:11:17 -0800575static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576{
577 struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000578 size_t n = nla_len(attr)/sizeof(__s16);
Patrick McHardy1e904742008-01-22 22:11:17 -0800579 const __s16 *data = nla_data(attr);
David S. Miller7698b4f2008-07-16 01:42:40 -0700580 spinlock_t *root_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 struct disttable *d;
582 int i;
stephen hemminger6373a9a2011-02-23 13:04:18 +0000583 size_t s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
stephen hemmingerdf173bd2011-02-23 13:04:19 +0000585 if (n > NETEM_DIST_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 return -EINVAL;
587
stephen hemminger6373a9a2011-02-23 13:04:18 +0000588 s = sizeof(struct disttable) + n * sizeof(s16);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000589 d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000590 if (!d)
591 d = vmalloc(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 if (!d)
593 return -ENOMEM;
594
595 d->size = n;
596 for (i = 0; i < n; i++)
597 d->table[i] = data[i];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900598
Jarek Poplawski102396a2008-08-29 14:21:52 -0700599 root_lock = qdisc_root_sleeping_lock(sch);
David S. Miller7698b4f2008-07-16 01:42:40 -0700600
601 spin_lock_bh(root_lock);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000602 swap(q->delay_dist, d);
David S. Miller7698b4f2008-07-16 01:42:40 -0700603 spin_unlock_bh(root_lock);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000604
605 dist_free(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 return 0;
607}
608
Stephen Hemminger265eb672008-11-03 21:13:26 -0800609static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610{
611 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800612 const struct tc_netem_corr *c = nla_data(attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 init_crandom(&q->delay_cor, c->delay_corr);
615 init_crandom(&q->loss_cor, c->loss_corr);
616 init_crandom(&q->dup_cor, c->dup_corr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617}
618
Stephen Hemminger265eb672008-11-03 21:13:26 -0800619static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700620{
621 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800622 const struct tc_netem_reorder *r = nla_data(attr);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700623
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700624 q->reorder = r->probability;
625 init_crandom(&q->reorder_cor, r->correlation);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700626}
627
Stephen Hemminger265eb672008-11-03 21:13:26 -0800628static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800629{
630 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800631 const struct tc_netem_corrupt *r = nla_data(attr);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800632
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800633 q->corrupt = r->probability;
634 init_crandom(&q->corrupt_cor, r->correlation);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800635}
636
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000637static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
638{
639 struct netem_sched_data *q = qdisc_priv(sch);
640 const struct tc_netem_rate *r = nla_data(attr);
641
642 q->rate = r->rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000643 q->packet_overhead = r->packet_overhead;
644 q->cell_size = r->cell_size;
645 if (q->cell_size)
646 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
647 q->cell_overhead = r->cell_overhead;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000648}
649
stephen hemminger661b7972011-02-23 13:04:21 +0000650static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
651{
652 struct netem_sched_data *q = qdisc_priv(sch);
653 const struct nlattr *la;
654 int rem;
655
656 nla_for_each_nested(la, attr, rem) {
657 u16 type = nla_type(la);
658
659 switch(type) {
660 case NETEM_LOSS_GI: {
661 const struct tc_netem_gimodel *gi = nla_data(la);
662
stephen hemminger24946542011-12-23 09:16:30 +0000663 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
stephen hemminger661b7972011-02-23 13:04:21 +0000664 pr_info("netem: incorrect gi model size\n");
665 return -EINVAL;
666 }
667
668 q->loss_model = CLG_4_STATES;
669
670 q->clg.state = 1;
671 q->clg.a1 = gi->p13;
672 q->clg.a2 = gi->p31;
673 q->clg.a3 = gi->p32;
674 q->clg.a4 = gi->p14;
675 q->clg.a5 = gi->p23;
676 break;
677 }
678
679 case NETEM_LOSS_GE: {
680 const struct tc_netem_gemodel *ge = nla_data(la);
681
stephen hemminger24946542011-12-23 09:16:30 +0000682 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
683 pr_info("netem: incorrect ge model size\n");
stephen hemminger661b7972011-02-23 13:04:21 +0000684 return -EINVAL;
685 }
686
687 q->loss_model = CLG_GILB_ELL;
688 q->clg.state = 1;
689 q->clg.a1 = ge->p;
690 q->clg.a2 = ge->r;
691 q->clg.a3 = ge->h;
692 q->clg.a4 = ge->k1;
693 break;
694 }
695
696 default:
697 pr_info("netem: unknown loss type %u\n", type);
698 return -EINVAL;
699 }
700 }
701
702 return 0;
703}
704
Patrick McHardy27a34212008-01-23 20:35:39 -0800705static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
706 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
707 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
708 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000709 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
stephen hemminger661b7972011-02-23 13:04:21 +0000710 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
Patrick McHardy27a34212008-01-23 20:35:39 -0800711};
712
Thomas Graf2c10b322008-09-02 17:30:27 -0700713static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
714 const struct nla_policy *policy, int len)
715{
716 int nested_len = nla_len(nla) - NLA_ALIGN(len);
717
stephen hemminger661b7972011-02-23 13:04:21 +0000718 if (nested_len < 0) {
719 pr_info("netem: invalid attributes len %d\n", nested_len);
Thomas Graf2c10b322008-09-02 17:30:27 -0700720 return -EINVAL;
stephen hemminger661b7972011-02-23 13:04:21 +0000721 }
722
Thomas Graf2c10b322008-09-02 17:30:27 -0700723 if (nested_len >= nla_attr_size(0))
724 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
725 nested_len, policy);
stephen hemminger661b7972011-02-23 13:04:21 +0000726
Thomas Graf2c10b322008-09-02 17:30:27 -0700727 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
728 return 0;
729}
730
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800731/* Parse netlink message to set options */
Patrick McHardy1e904742008-01-22 22:11:17 -0800732static int netem_change(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
734 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardyb03f4672008-01-23 20:32:21 -0800735 struct nlattr *tb[TCA_NETEM_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 struct tc_netem_qopt *qopt;
737 int ret;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900738
Patrick McHardyb03f4672008-01-23 20:32:21 -0800739 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 return -EINVAL;
741
Thomas Graf2c10b322008-09-02 17:30:27 -0700742 qopt = nla_data(opt);
743 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
Patrick McHardyb03f4672008-01-23 20:32:21 -0800744 if (ret < 0)
745 return ret;
746
Eric Dumazet50612532011-12-28 23:12:02 +0000747 sch->limit = qopt->limit;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900748
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 q->latency = qopt->latency;
750 q->jitter = qopt->jitter;
751 q->limit = qopt->limit;
752 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700753 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 q->loss = qopt->loss;
755 q->duplicate = qopt->duplicate;
756
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700757 /* for compatibility with earlier versions.
758 * if gap is set, need to assume 100% probability
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700759 */
Stephen Hemmingera362e0a2007-03-22 12:15:45 -0700760 if (q->gap)
761 q->reorder = ~0;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700762
Stephen Hemminger265eb672008-11-03 21:13:26 -0800763 if (tb[TCA_NETEM_CORR])
764 get_correlation(sch, tb[TCA_NETEM_CORR]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Patrick McHardyb03f4672008-01-23 20:32:21 -0800766 if (tb[TCA_NETEM_DELAY_DIST]) {
767 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
768 if (ret)
769 return ret;
770 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771
Stephen Hemminger265eb672008-11-03 21:13:26 -0800772 if (tb[TCA_NETEM_REORDER])
773 get_reorder(sch, tb[TCA_NETEM_REORDER]);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800774
Stephen Hemminger265eb672008-11-03 21:13:26 -0800775 if (tb[TCA_NETEM_CORRUPT])
776 get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000778 if (tb[TCA_NETEM_RATE])
779 get_rate(sch, tb[TCA_NETEM_RATE]);
780
stephen hemminger661b7972011-02-23 13:04:21 +0000781 q->loss_model = CLG_RANDOM;
782 if (tb[TCA_NETEM_LOSS])
783 ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
784
785 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786}
787
Patrick McHardy1e904742008-01-22 22:11:17 -0800788static int netem_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789{
790 struct netem_sched_data *q = qdisc_priv(sch);
791 int ret;
792
793 if (!opt)
794 return -EINVAL;
795
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700796 qdisc_watchdog_init(&q->watchdog, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
stephen hemminger661b7972011-02-23 13:04:21 +0000798 q->loss_model = CLG_RANDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 ret = netem_change(sch, opt);
Eric Dumazet50612532011-12-28 23:12:02 +0000800 if (ret)
stephen hemminger250a65f2011-02-23 13:04:22 +0000801 pr_info("netem: change failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 return ret;
803}
804
805static void netem_destroy(struct Qdisc *sch)
806{
807 struct netem_sched_data *q = qdisc_priv(sch);
808
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700809 qdisc_watchdog_cancel(&q->watchdog);
Eric Dumazet50612532011-12-28 23:12:02 +0000810 if (q->qdisc)
811 qdisc_destroy(q->qdisc);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000812 dist_free(q->delay_dist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813}
814
stephen hemminger661b7972011-02-23 13:04:21 +0000815static int dump_loss_model(const struct netem_sched_data *q,
816 struct sk_buff *skb)
817{
818 struct nlattr *nest;
819
820 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
821 if (nest == NULL)
822 goto nla_put_failure;
823
824 switch (q->loss_model) {
825 case CLG_RANDOM:
826 /* legacy loss model */
827 nla_nest_cancel(skb, nest);
828 return 0; /* no data */
829
830 case CLG_4_STATES: {
831 struct tc_netem_gimodel gi = {
832 .p13 = q->clg.a1,
833 .p31 = q->clg.a2,
834 .p32 = q->clg.a3,
835 .p14 = q->clg.a4,
836 .p23 = q->clg.a5,
837 };
838
839 NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
840 break;
841 }
842 case CLG_GILB_ELL: {
843 struct tc_netem_gemodel ge = {
844 .p = q->clg.a1,
845 .r = q->clg.a2,
846 .h = q->clg.a3,
847 .k1 = q->clg.a4,
848 };
849
850 NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
851 break;
852 }
853 }
854
855 nla_nest_end(skb, nest);
856 return 0;
857
858nla_put_failure:
859 nla_nest_cancel(skb, nest);
860 return -1;
861}
862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
864{
865 const struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger861d7f72011-02-23 13:04:17 +0000866 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 struct tc_netem_qopt qopt;
868 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700869 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800870 struct tc_netem_corrupt corrupt;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000871 struct tc_netem_rate rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873 qopt.latency = q->latency;
874 qopt.jitter = q->jitter;
875 qopt.limit = q->limit;
876 qopt.loss = q->loss;
877 qopt.gap = q->gap;
878 qopt.duplicate = q->duplicate;
Patrick McHardy1e904742008-01-22 22:11:17 -0800879 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881 cor.delay_corr = q->delay_cor.rho;
882 cor.loss_corr = q->loss_cor.rho;
883 cor.dup_corr = q->dup_cor.rho;
Patrick McHardy1e904742008-01-22 22:11:17 -0800884 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700885
886 reorder.probability = q->reorder;
887 reorder.correlation = q->reorder_cor.rho;
Patrick McHardy1e904742008-01-22 22:11:17 -0800888 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700889
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800890 corrupt.probability = q->corrupt;
891 corrupt.correlation = q->corrupt_cor.rho;
Patrick McHardy1e904742008-01-22 22:11:17 -0800892 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800893
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000894 rate.rate = q->rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000895 rate.packet_overhead = q->packet_overhead;
896 rate.cell_size = q->cell_size;
897 rate.cell_overhead = q->cell_overhead;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000898 NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate);
899
stephen hemminger661b7972011-02-23 13:04:21 +0000900 if (dump_loss_model(q, skb) != 0)
901 goto nla_put_failure;
902
stephen hemminger861d7f72011-02-23 13:04:17 +0000903 return nla_nest_end(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Patrick McHardy1e904742008-01-22 22:11:17 -0800905nla_put_failure:
stephen hemminger861d7f72011-02-23 13:04:17 +0000906 nlmsg_trim(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 return -1;
908}
909
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000910static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
911 struct sk_buff *skb, struct tcmsg *tcm)
912{
913 struct netem_sched_data *q = qdisc_priv(sch);
914
Eric Dumazet50612532011-12-28 23:12:02 +0000915 if (cl != 1 || !q->qdisc) /* only one class */
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000916 return -ENOENT;
917
918 tcm->tcm_handle |= TC_H_MIN(1);
919 tcm->tcm_info = q->qdisc->handle;
920
921 return 0;
922}
923
924static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
925 struct Qdisc **old)
926{
927 struct netem_sched_data *q = qdisc_priv(sch);
928
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000929 sch_tree_lock(sch);
930 *old = q->qdisc;
931 q->qdisc = new;
Eric Dumazet50612532011-12-28 23:12:02 +0000932 if (*old) {
933 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
934 qdisc_reset(*old);
935 }
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000936 sch_tree_unlock(sch);
937
938 return 0;
939}
940
941static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
942{
943 struct netem_sched_data *q = qdisc_priv(sch);
944 return q->qdisc;
945}
946
947static unsigned long netem_get(struct Qdisc *sch, u32 classid)
948{
949 return 1;
950}
951
952static void netem_put(struct Qdisc *sch, unsigned long arg)
953{
954}
955
956static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
957{
958 if (!walker->stop) {
959 if (walker->count >= walker->skip)
960 if (walker->fn(sch, 1, walker) < 0) {
961 walker->stop = 1;
962 return;
963 }
964 walker->count++;
965 }
966}
967
968static const struct Qdisc_class_ops netem_class_ops = {
969 .graft = netem_graft,
970 .leaf = netem_leaf,
971 .get = netem_get,
972 .put = netem_put,
973 .walk = netem_walk,
974 .dump = netem_dump_class,
975};
976
Eric Dumazet20fea082007-11-14 01:44:41 -0800977static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 .id = "netem",
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000979 .cl_ops = &netem_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 .priv_size = sizeof(struct netem_sched_data),
981 .enqueue = netem_enqueue,
982 .dequeue = netem_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -0700983 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 .drop = netem_drop,
985 .init = netem_init,
986 .reset = netem_reset,
987 .destroy = netem_destroy,
988 .change = netem_change,
989 .dump = netem_dump,
990 .owner = THIS_MODULE,
991};
992
993
994static int __init netem_module_init(void)
995{
Stephen Hemmingereb229c42005-11-03 13:49:01 -0800996 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 return register_qdisc(&netem_qdisc_ops);
998}
999static void __exit netem_module_exit(void)
1000{
1001 unregister_qdisc(&netem_qdisc_ops);
1002}
1003module_init(netem_module_init)
1004module_exit(netem_module_exit)
1005MODULE_LICENSE("GPL");