blob: 90aeeb7b7167816a99cd9002cbb643d1642e7f70 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
Stephen Hemminger798b6b12006-10-22 20:16:57 -07007 * 2 of the License.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/module.h>
17#include <linux/bitops.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/rtnetlink.h>
24
25#include <net/pkt_sched.h>
26
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080027#define VERSION "1.2"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029/* Network Emulation Queuing algorithm.
30 ====================================
31
32 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
33 Network Emulation Tool
34 [2] Luigi Rizzo, DummyNet for FreeBSD
35
36 ----------------------------------------------------------------
37
38 This started out as a simple way to delay outgoing packets to
39 test TCP but has grown to include most of the functionality
40 of a full blown network emulator like NISTnet. It can delay
41 packets and add random jitter (and correlation). The random
42 distribution can be loaded from a table as well to provide
43 normal, Pareto, or experimental curves. Packet loss,
44 duplication, and reordering can also be emulated.
45
46 This qdisc does not do classification that can be handled in
47 layering other disciplines. It does not need to do bandwidth
48 control either since that can be handled by using token
49 bucket or other rate control.
50
51 The simulator is limited by the Linux timer resolution
52 and will create packet bursts on the HZ boundary (1ms).
53*/
54
55struct netem_sched_data {
56 struct Qdisc *qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 struct timer_list timer;
58
59 u32 latency;
60 u32 loss;
61 u32 limit;
62 u32 counter;
63 u32 gap;
64 u32 jitter;
65 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070066 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080067 u32 corrupt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69 struct crndstate {
70 unsigned long last;
71 unsigned long rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080072 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
74 struct disttable {
75 u32 size;
76 s16 table[0];
77 } *delay_dist;
78};
79
80/* Time stamp put into socket buffer control block */
81struct netem_skb_cb {
82 psched_time_t time_to_send;
83};
84
85/* init_crandom - initialize correlated random number generator
86 * Use entropy source for initial seed.
87 */
88static void init_crandom(struct crndstate *state, unsigned long rho)
89{
90 state->rho = rho;
91 state->last = net_random();
92}
93
94/* get_crandom - correlated random number generator
95 * Next number depends on last value.
96 * rho is scaled to avoid floating point.
97 */
98static unsigned long get_crandom(struct crndstate *state)
99{
100 u64 value, rho;
101 unsigned long answer;
102
103 if (state->rho == 0) /* no correllation */
104 return net_random();
105
106 value = net_random();
107 rho = (u64)state->rho + 1;
108 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
109 state->last = answer;
110 return answer;
111}
112
113/* tabledist - return a pseudo-randomly distributed value with mean mu and
114 * std deviation sigma. Uses table lookup to approximate the desired
115 * distribution, and a uniformly-distributed pseudo-random source.
116 */
117static long tabledist(unsigned long mu, long sigma,
118 struct crndstate *state, const struct disttable *dist)
119{
120 long t, x;
121 unsigned long rnd;
122
123 if (sigma == 0)
124 return mu;
125
126 rnd = get_crandom(state);
127
128 /* default uniform distribution */
129 if (dist == NULL)
130 return (rnd % (2*sigma)) - sigma + mu;
131
132 t = dist->table[rnd % dist->size];
133 x = (sigma % NETEM_DIST_SCALE) * t;
134 if (x >= 0)
135 x += NETEM_DIST_SCALE/2;
136 else
137 x -= NETEM_DIST_SCALE/2;
138
139 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
140}
141
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700142/*
143 * Insert one skb into qdisc.
144 * Note: parent depends on return value to account for queue length.
145 * NET_XMIT_DROP: queue length didn't change.
146 * NET_XMIT_SUCCESS: one skb was queued.
147 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
149{
150 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700151 /* We don't fill cb now as skb_unshare() may invalidate it */
152 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700153 struct sk_buff *skb2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 int ret;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700155 int count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Stephen Hemminger771018e2005-05-03 16:24:32 -0700157 pr_debug("netem_enqueue skb=%p\n", skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700159 /* Random duplication */
160 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
161 ++count;
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 /* Random packet drop 0 => none, ~0 => all */
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700164 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
165 --count;
166
167 if (count == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 sch->qstats.drops++;
169 kfree_skb(skb);
Stephen Hemminger89bbb0a2006-04-28 12:11:36 -0700170 return NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 }
172
David S. Miller4e8a5202006-10-22 21:00:33 -0700173 skb_orphan(skb);
174
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700175 /*
176 * If we need to duplicate packet, then re-insert at top of the
177 * qdisc tree, since parent queuer expects that only one
178 * skb will be queued.
179 */
180 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
181 struct Qdisc *rootq = sch->dev->qdisc;
182 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
183 q->duplicate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700185 rootq->enqueue(skb2, rootq);
186 q->duplicate = dupsave;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 }
188
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800189 /*
190 * Randomized packet corruption.
191 * Make copy if needed since we are modifying
192 * If packet is going to be hardware checksummed, then
193 * do it now in software before we mangle it.
194 */
195 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
196 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
Patrick McHardy84fa7932006-08-29 16:44:56 -0700197 || (skb->ip_summed == CHECKSUM_PARTIAL
198 && skb_checksum_help(skb))) {
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800199 sch->qstats.drops++;
200 return NET_XMIT_DROP;
201 }
202
203 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
204 }
205
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700206 cb = (struct netem_skb_cb *)skb->cb;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700207 if (q->gap == 0 /* not doing reordering */
208 || q->counter < q->gap /* inside last reordering gap */
209 || q->reorder < get_crandom(&q->reorder_cor)) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700210 psched_time_t now;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800211 psched_tdiff_t delay;
212
213 delay = tabledist(q->latency, q->jitter,
214 &q->delay_cor, q->delay_dist);
215
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700216 PSCHED_GET_TIME(now);
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800217 PSCHED_TADD2(now, delay, cb->time_to_send);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 ++q->counter;
219 ret = q->qdisc->enqueue(skb, q->qdisc);
220 } else {
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700221 /*
222 * Do re-ordering by putting one out of N packets at the front
223 * of the queue.
224 */
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700225 PSCHED_GET_TIME(cb->time_to_send);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700226 q->counter = 0;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700227 ret = q->qdisc->ops->requeue(skb, q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 }
229
230 if (likely(ret == NET_XMIT_SUCCESS)) {
231 sch->q.qlen++;
232 sch->bstats.bytes += skb->len;
233 sch->bstats.packets++;
234 } else
235 sch->qstats.drops++;
236
Stephen Hemmingerd5d75cd2005-05-03 16:24:57 -0700237 pr_debug("netem: enqueue ret %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 return ret;
239}
240
241/* Requeue packets but don't change time stamp */
242static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
243{
244 struct netem_sched_data *q = qdisc_priv(sch);
245 int ret;
246
247 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
248 sch->q.qlen++;
249 sch->qstats.requeues++;
250 }
251
252 return ret;
253}
254
255static unsigned int netem_drop(struct Qdisc* sch)
256{
257 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy6d037a22006-03-20 19:00:49 -0800258 unsigned int len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Patrick McHardy6d037a22006-03-20 19:00:49 -0800260 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 sch->q.qlen--;
262 sch->qstats.drops++;
263 }
264 return len;
265}
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267static struct sk_buff *netem_dequeue(struct Qdisc *sch)
268{
269 struct netem_sched_data *q = qdisc_priv(sch);
270 struct sk_buff *skb;
271
272 skb = q->qdisc->dequeue(q->qdisc);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700273 if (skb) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700274 const struct netem_skb_cb *cb
275 = (const struct netem_skb_cb *)skb->cb;
276 psched_time_t now;
Stephen Hemminger771018e2005-05-03 16:24:32 -0700277
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700278 /* if more time remaining? */
279 PSCHED_GET_TIME(now);
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800280
281 if (PSCHED_TLESS(cb->time_to_send, now)) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700282 pr_debug("netem_dequeue: return skb=%p\n", skb);
283 sch->q.qlen--;
284 sch->flags &= ~TCQ_F_THROTTLED;
285 return skb;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800286 } else {
287 psched_tdiff_t delay = PSCHED_TDIFF(cb->time_to_send, now);
288
289 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
290 sch->qstats.drops++;
291
292 /* After this qlen is confused */
293 printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
294 q->qdisc->ops->id);
295
296 sch->q.qlen--;
297 }
298
299 mod_timer(&q->timer, jiffies + PSCHED_US2JIFFIE(delay));
300 sch->flags |= TCQ_F_THROTTLED;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700301 }
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700302 }
303
304 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305}
306
307static void netem_watchdog(unsigned long arg)
308{
309 struct Qdisc *sch = (struct Qdisc *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
Stephen Hemminger771018e2005-05-03 16:24:32 -0700311 pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen);
312 sch->flags &= ~TCQ_F_THROTTLED;
313 netif_schedule(sch->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314}
315
316static void netem_reset(struct Qdisc *sch)
317{
318 struct netem_sched_data *q = qdisc_priv(sch);
319
320 qdisc_reset(q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 sch->q.qlen = 0;
Stephen Hemminger771018e2005-05-03 16:24:32 -0700322 sch->flags &= ~TCQ_F_THROTTLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 del_timer_sync(&q->timer);
324}
325
Stephen Hemminger300ce172005-10-30 13:47:34 -0800326/* Pass size change message down to embedded FIFO */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327static int set_fifo_limit(struct Qdisc *q, int limit)
328{
329 struct rtattr *rta;
330 int ret = -ENOMEM;
331
Stephen Hemminger300ce172005-10-30 13:47:34 -0800332 /* Hack to avoid sending change message to non-FIFO */
333 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
334 return 0;
335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
337 if (rta) {
338 rta->rta_type = RTM_NEWQDISC;
339 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
340 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
341
342 ret = q->ops->change(q, rta);
343 kfree(rta);
344 }
345 return ret;
346}
347
348/*
349 * Distribution data is a variable size payload containing
350 * signed 16 bit values.
351 */
352static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
353{
354 struct netem_sched_data *q = qdisc_priv(sch);
355 unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
356 const __s16 *data = RTA_DATA(attr);
357 struct disttable *d;
358 int i;
359
360 if (n > 65536)
361 return -EINVAL;
362
363 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
364 if (!d)
365 return -ENOMEM;
366
367 d->size = n;
368 for (i = 0; i < n; i++)
369 d->table[i] = data[i];
370
371 spin_lock_bh(&sch->dev->queue_lock);
372 d = xchg(&q->delay_dist, d);
373 spin_unlock_bh(&sch->dev->queue_lock);
374
375 kfree(d);
376 return 0;
377}
378
379static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
380{
381 struct netem_sched_data *q = qdisc_priv(sch);
382 const struct tc_netem_corr *c = RTA_DATA(attr);
383
384 if (RTA_PAYLOAD(attr) != sizeof(*c))
385 return -EINVAL;
386
387 init_crandom(&q->delay_cor, c->delay_corr);
388 init_crandom(&q->loss_cor, c->loss_corr);
389 init_crandom(&q->dup_cor, c->dup_corr);
390 return 0;
391}
392
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700393static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
394{
395 struct netem_sched_data *q = qdisc_priv(sch);
396 const struct tc_netem_reorder *r = RTA_DATA(attr);
397
398 if (RTA_PAYLOAD(attr) != sizeof(*r))
399 return -EINVAL;
400
401 q->reorder = r->probability;
402 init_crandom(&q->reorder_cor, r->correlation);
403 return 0;
404}
405
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800406static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
407{
408 struct netem_sched_data *q = qdisc_priv(sch);
409 const struct tc_netem_corrupt *r = RTA_DATA(attr);
410
411 if (RTA_PAYLOAD(attr) != sizeof(*r))
412 return -EINVAL;
413
414 q->corrupt = r->probability;
415 init_crandom(&q->corrupt_cor, r->correlation);
416 return 0;
417}
418
419/* Parse netlink message to set options */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420static int netem_change(struct Qdisc *sch, struct rtattr *opt)
421{
422 struct netem_sched_data *q = qdisc_priv(sch);
423 struct tc_netem_qopt *qopt;
424 int ret;
425
426 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
427 return -EINVAL;
428
429 qopt = RTA_DATA(opt);
430 ret = set_fifo_limit(q->qdisc, qopt->limit);
431 if (ret) {
432 pr_debug("netem: can't set fifo limit\n");
433 return ret;
434 }
435
436 q->latency = qopt->latency;
437 q->jitter = qopt->jitter;
438 q->limit = qopt->limit;
439 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700440 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 q->loss = qopt->loss;
442 q->duplicate = qopt->duplicate;
443
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700444 /* for compatiablity with earlier versions.
445 * if gap is set, need to assume 100% probablity
446 */
447 q->reorder = ~0;
448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 /* Handle nested options after initial queue options.
450 * Should have put all options in nested format but too late now.
451 */
452 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
453 struct rtattr *tb[TCA_NETEM_MAX];
454 if (rtattr_parse(tb, TCA_NETEM_MAX,
455 RTA_DATA(opt) + sizeof(*qopt),
456 RTA_PAYLOAD(opt) - sizeof(*qopt)))
457 return -EINVAL;
458
459 if (tb[TCA_NETEM_CORR-1]) {
460 ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
461 if (ret)
462 return ret;
463 }
464
465 if (tb[TCA_NETEM_DELAY_DIST-1]) {
466 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
467 if (ret)
468 return ret;
469 }
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800470
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700471 if (tb[TCA_NETEM_REORDER-1]) {
472 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
473 if (ret)
474 return ret;
475 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800477 if (tb[TCA_NETEM_CORRUPT-1]) {
478 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
479 if (ret)
480 return ret;
481 }
482 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
484 return 0;
485}
486
Stephen Hemminger300ce172005-10-30 13:47:34 -0800487/*
488 * Special case version of FIFO queue for use by netem.
489 * It queues in order based on timestamps in skb's
490 */
491struct fifo_sched_data {
492 u32 limit;
493};
494
495static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
496{
497 struct fifo_sched_data *q = qdisc_priv(sch);
498 struct sk_buff_head *list = &sch->q;
499 const struct netem_skb_cb *ncb
500 = (const struct netem_skb_cb *)nskb->cb;
501 struct sk_buff *skb;
502
503 if (likely(skb_queue_len(list) < q->limit)) {
504 skb_queue_reverse_walk(list, skb) {
505 const struct netem_skb_cb *cb
506 = (const struct netem_skb_cb *)skb->cb;
507
Andrea Bittauaa875162005-11-20 13:41:05 -0800508 if (!PSCHED_TLESS(ncb->time_to_send, cb->time_to_send))
Stephen Hemminger300ce172005-10-30 13:47:34 -0800509 break;
510 }
511
512 __skb_queue_after(list, skb, nskb);
513
514 sch->qstats.backlog += nskb->len;
515 sch->bstats.bytes += nskb->len;
516 sch->bstats.packets++;
517
518 return NET_XMIT_SUCCESS;
519 }
520
521 return qdisc_drop(nskb, sch);
522}
523
524static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
525{
526 struct fifo_sched_data *q = qdisc_priv(sch);
527
528 if (opt) {
529 struct tc_fifo_qopt *ctl = RTA_DATA(opt);
530 if (RTA_PAYLOAD(opt) < sizeof(*ctl))
531 return -EINVAL;
532
533 q->limit = ctl->limit;
534 } else
535 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
536
537 return 0;
538}
539
540static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
541{
542 struct fifo_sched_data *q = qdisc_priv(sch);
543 struct tc_fifo_qopt opt = { .limit = q->limit };
544
545 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
546 return skb->len;
547
548rtattr_failure:
549 return -1;
550}
551
552static struct Qdisc_ops tfifo_qdisc_ops = {
553 .id = "tfifo",
554 .priv_size = sizeof(struct fifo_sched_data),
555 .enqueue = tfifo_enqueue,
556 .dequeue = qdisc_dequeue_head,
557 .requeue = qdisc_requeue,
558 .drop = qdisc_queue_drop,
559 .init = tfifo_init,
560 .reset = qdisc_reset_queue,
561 .change = tfifo_init,
562 .dump = tfifo_dump,
563};
564
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565static int netem_init(struct Qdisc *sch, struct rtattr *opt)
566{
567 struct netem_sched_data *q = qdisc_priv(sch);
568 int ret;
569
570 if (!opt)
571 return -EINVAL;
572
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 init_timer(&q->timer);
574 q->timer.function = netem_watchdog;
575 q->timer.data = (unsigned long) sch;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
Patrick McHardy9f9afec2006-11-29 17:35:18 -0800577 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
578 TC_H_MAKE(sch->handle, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 if (!q->qdisc) {
580 pr_debug("netem: qdisc create failed\n");
581 return -ENOMEM;
582 }
583
584 ret = netem_change(sch, opt);
585 if (ret) {
586 pr_debug("netem: change failed\n");
587 qdisc_destroy(q->qdisc);
588 }
589 return ret;
590}
591
592static void netem_destroy(struct Qdisc *sch)
593{
594 struct netem_sched_data *q = qdisc_priv(sch);
595
596 del_timer_sync(&q->timer);
597 qdisc_destroy(q->qdisc);
598 kfree(q->delay_dist);
599}
600
601static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
602{
603 const struct netem_sched_data *q = qdisc_priv(sch);
604 unsigned char *b = skb->tail;
605 struct rtattr *rta = (struct rtattr *) b;
606 struct tc_netem_qopt qopt;
607 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700608 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800609 struct tc_netem_corrupt corrupt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611 qopt.latency = q->latency;
612 qopt.jitter = q->jitter;
613 qopt.limit = q->limit;
614 qopt.loss = q->loss;
615 qopt.gap = q->gap;
616 qopt.duplicate = q->duplicate;
617 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
618
619 cor.delay_corr = q->delay_cor.rho;
620 cor.loss_corr = q->loss_cor.rho;
621 cor.dup_corr = q->dup_cor.rho;
622 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700623
624 reorder.probability = q->reorder;
625 reorder.correlation = q->reorder_cor.rho;
626 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
627
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800628 corrupt.probability = q->corrupt;
629 corrupt.correlation = q->corrupt_cor.rho;
630 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
631
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 rta->rta_len = skb->tail - b;
633
634 return skb->len;
635
636rtattr_failure:
637 skb_trim(skb, b - skb->data);
638 return -1;
639}
640
641static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
642 struct sk_buff *skb, struct tcmsg *tcm)
643{
644 struct netem_sched_data *q = qdisc_priv(sch);
645
646 if (cl != 1) /* only one class */
647 return -ENOENT;
648
649 tcm->tcm_handle |= TC_H_MIN(1);
650 tcm->tcm_info = q->qdisc->handle;
651
652 return 0;
653}
654
655static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
656 struct Qdisc **old)
657{
658 struct netem_sched_data *q = qdisc_priv(sch);
659
660 if (new == NULL)
661 new = &noop_qdisc;
662
663 sch_tree_lock(sch);
664 *old = xchg(&q->qdisc, new);
665 qdisc_reset(*old);
666 sch->q.qlen = 0;
667 sch_tree_unlock(sch);
668
669 return 0;
670}
671
672static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
673{
674 struct netem_sched_data *q = qdisc_priv(sch);
675 return q->qdisc;
676}
677
678static unsigned long netem_get(struct Qdisc *sch, u32 classid)
679{
680 return 1;
681}
682
683static void netem_put(struct Qdisc *sch, unsigned long arg)
684{
685}
686
687static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
688 struct rtattr **tca, unsigned long *arg)
689{
690 return -ENOSYS;
691}
692
693static int netem_delete(struct Qdisc *sch, unsigned long arg)
694{
695 return -ENOSYS;
696}
697
698static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
699{
700 if (!walker->stop) {
701 if (walker->count >= walker->skip)
702 if (walker->fn(sch, 1, walker) < 0) {
703 walker->stop = 1;
704 return;
705 }
706 walker->count++;
707 }
708}
709
710static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
711{
712 return NULL;
713}
714
715static struct Qdisc_class_ops netem_class_ops = {
716 .graft = netem_graft,
717 .leaf = netem_leaf,
718 .get = netem_get,
719 .put = netem_put,
720 .change = netem_change_class,
721 .delete = netem_delete,
722 .walk = netem_walk,
723 .tcf_chain = netem_find_tcf,
724 .dump = netem_dump_class,
725};
726
727static struct Qdisc_ops netem_qdisc_ops = {
728 .id = "netem",
729 .cl_ops = &netem_class_ops,
730 .priv_size = sizeof(struct netem_sched_data),
731 .enqueue = netem_enqueue,
732 .dequeue = netem_dequeue,
733 .requeue = netem_requeue,
734 .drop = netem_drop,
735 .init = netem_init,
736 .reset = netem_reset,
737 .destroy = netem_destroy,
738 .change = netem_change,
739 .dump = netem_dump,
740 .owner = THIS_MODULE,
741};
742
743
744static int __init netem_module_init(void)
745{
Stephen Hemmingereb229c42005-11-03 13:49:01 -0800746 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 return register_qdisc(&netem_qdisc_ops);
748}
749static void __exit netem_module_exit(void)
750{
751 unregister_qdisc(&netem_qdisc_ops);
752}
753module_init(netem_module_init)
754module_exit(netem_module_exit)
755MODULE_LICENSE("GPL");