blob: 5d9d8bc9cc3a039b76a7c93127b0a00ed8ca8295 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
Stephen Hemminger798b6b12006-10-22 20:16:57 -07007 * 2 of the License.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Many of the algorithms and ideas for this came from
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * NIST Net which is not copyrighted.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/module.h>
17#include <linux/bitops.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/rtnetlink.h>
24
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070025#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <net/pkt_sched.h>
27
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080028#define VERSION "1.2"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080029
Linus Torvalds1da177e2005-04-16 15:20:36 -070030/* Network Emulation Queuing algorithm.
31 ====================================
32
33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 Network Emulation Tool
35 [2] Luigi Rizzo, DummyNet for FreeBSD
36
37 ----------------------------------------------------------------
38
39 This started out as a simple way to delay outgoing packets to
40 test TCP but has grown to include most of the functionality
41 of a full blown network emulator like NISTnet. It can delay
42 packets and add random jitter (and correlation). The random
43 distribution can be loaded from a table as well to provide
44 normal, Pareto, or experimental curves. Packet loss,
45 duplication, and reordering can also be emulated.
46
47 This qdisc does not do classification that can be handled in
48 layering other disciplines. It does not need to do bandwidth
49 control either since that can be handled by using token
50 bucket or other rate control.
51
52 The simulator is limited by the Linux timer resolution
53 and will create packet bursts on the HZ boundary (1ms).
54*/
55
56struct netem_sched_data {
57 struct Qdisc *qdisc;
Patrick McHardy59cb5c62007-03-16 01:20:31 -070058 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Stephen Hemmingerb4076212007-03-22 12:16:21 -070060 psched_tdiff_t latency;
61 psched_tdiff_t jitter;
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 u32 loss;
64 u32 limit;
65 u32 counter;
66 u32 gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070068 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080069 u32 corrupt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
71 struct crndstate {
Stephen Hemmingerb4076212007-03-22 12:16:21 -070072 u32 last;
73 u32 rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080074 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76 struct disttable {
77 u32 size;
78 s16 table[0];
79 } *delay_dist;
80};
81
82/* Time stamp put into socket buffer control block */
83struct netem_skb_cb {
84 psched_time_t time_to_send;
85};
86
87/* init_crandom - initialize correlated random number generator
88 * Use entropy source for initial seed.
89 */
90static void init_crandom(struct crndstate *state, unsigned long rho)
91{
92 state->rho = rho;
93 state->last = net_random();
94}
95
96/* get_crandom - correlated random number generator
97 * Next number depends on last value.
98 * rho is scaled to avoid floating point.
99 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700100static u32 get_crandom(struct crndstate *state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101{
102 u64 value, rho;
103 unsigned long answer;
104
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700105 if (state->rho == 0) /* no correlation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 return net_random();
107
108 value = net_random();
109 rho = (u64)state->rho + 1;
110 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
111 state->last = answer;
112 return answer;
113}
114
115/* tabledist - return a pseudo-randomly distributed value with mean mu and
116 * std deviation sigma. Uses table lookup to approximate the desired
117 * distribution, and a uniformly-distributed pseudo-random source.
118 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700119static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
120 struct crndstate *state,
121 const struct disttable *dist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700123 psched_tdiff_t x;
124 long t;
125 u32 rnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127 if (sigma == 0)
128 return mu;
129
130 rnd = get_crandom(state);
131
132 /* default uniform distribution */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900133 if (dist == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 return (rnd % (2*sigma)) - sigma + mu;
135
136 t = dist->table[rnd % dist->size];
137 x = (sigma % NETEM_DIST_SCALE) * t;
138 if (x >= 0)
139 x += NETEM_DIST_SCALE/2;
140 else
141 x -= NETEM_DIST_SCALE/2;
142
143 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
144}
145
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700146/*
147 * Insert one skb into qdisc.
148 * Note: parent depends on return value to account for queue length.
149 * NET_XMIT_DROP: queue length didn't change.
150 * NET_XMIT_SUCCESS: one skb was queued.
151 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
153{
154 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700155 /* We don't fill cb now as skb_unshare() may invalidate it */
156 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700157 struct sk_buff *skb2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 int ret;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700159 int count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Stephen Hemminger771018e2005-05-03 16:24:32 -0700161 pr_debug("netem_enqueue skb=%p\n", skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700163 /* Random duplication */
164 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
165 ++count;
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 /* Random packet drop 0 => none, ~0 => all */
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700168 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
169 --count;
170
171 if (count == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 sch->qstats.drops++;
173 kfree_skb(skb);
Stephen Hemminger89bbb0a32006-04-28 12:11:36 -0700174 return NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 }
176
David S. Miller4e8a5202006-10-22 21:00:33 -0700177 skb_orphan(skb);
178
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700179 /*
180 * If we need to duplicate packet, then re-insert at top of the
181 * qdisc tree, since parent queuer expects that only one
182 * skb will be queued.
183 */
184 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
185 struct Qdisc *rootq = sch->dev->qdisc;
186 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
187 q->duplicate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700189 rootq->enqueue(skb2, rootq);
190 q->duplicate = dupsave;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 }
192
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800193 /*
194 * Randomized packet corruption.
195 * Make copy if needed since we are modifying
196 * If packet is going to be hardware checksummed, then
197 * do it now in software before we mangle it.
198 */
199 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
200 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
Patrick McHardy84fa7932006-08-29 16:44:56 -0700201 || (skb->ip_summed == CHECKSUM_PARTIAL
202 && skb_checksum_help(skb))) {
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800203 sch->qstats.drops++;
204 return NET_XMIT_DROP;
205 }
206
207 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
208 }
209
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700210 cb = (struct netem_skb_cb *)skb->cb;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700211 if (q->gap == 0 /* not doing reordering */
212 || q->counter < q->gap /* inside last reordering gap */
213 || q->reorder < get_crandom(&q->reorder_cor)) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700214 psched_time_t now;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800215 psched_tdiff_t delay;
216
217 delay = tabledist(q->latency, q->jitter,
218 &q->delay_cor, q->delay_dist);
219
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700220 now = psched_get_time();
Patrick McHardy7c59e252007-03-23 11:27:45 -0700221 cb->time_to_send = now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 ++q->counter;
223 ret = q->qdisc->enqueue(skb, q->qdisc);
224 } else {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900225 /*
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700226 * Do re-ordering by putting one out of N packets at the front
227 * of the queue.
228 */
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700229 cb->time_to_send = psched_get_time();
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700230 q->counter = 0;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700231 ret = q->qdisc->ops->requeue(skb, q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 }
233
234 if (likely(ret == NET_XMIT_SUCCESS)) {
235 sch->q.qlen++;
236 sch->bstats.bytes += skb->len;
237 sch->bstats.packets++;
238 } else
239 sch->qstats.drops++;
240
Stephen Hemmingerd5d75cd2005-05-03 16:24:57 -0700241 pr_debug("netem: enqueue ret %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 return ret;
243}
244
245/* Requeue packets but don't change time stamp */
246static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
247{
248 struct netem_sched_data *q = qdisc_priv(sch);
249 int ret;
250
251 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
252 sch->q.qlen++;
253 sch->qstats.requeues++;
254 }
255
256 return ret;
257}
258
259static unsigned int netem_drop(struct Qdisc* sch)
260{
261 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy6d037a22006-03-20 19:00:49 -0800262 unsigned int len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
Patrick McHardy6d037a22006-03-20 19:00:49 -0800264 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 sch->q.qlen--;
266 sch->qstats.drops++;
267 }
268 return len;
269}
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271static struct sk_buff *netem_dequeue(struct Qdisc *sch)
272{
273 struct netem_sched_data *q = qdisc_priv(sch);
274 struct sk_buff *skb;
275
Stephen Hemminger11274e52007-03-22 12:17:42 -0700276 smp_mb();
277 if (sch->flags & TCQ_F_THROTTLED)
278 return NULL;
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 skb = q->qdisc->dequeue(q->qdisc);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700281 if (skb) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700282 const struct netem_skb_cb *cb
283 = (const struct netem_skb_cb *)skb->cb;
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700284 psched_time_t now = psched_get_time();
Stephen Hemminger771018e2005-05-03 16:24:32 -0700285
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700286 /* if more time remaining? */
Patrick McHardy104e0872007-03-23 11:28:07 -0700287 if (cb->time_to_send <= now) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700288 pr_debug("netem_dequeue: return skb=%p\n", skb);
289 sch->q.qlen--;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700290 return skb;
291 }
Stephen Hemminger11274e52007-03-22 12:17:42 -0700292
293 if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
294 qdisc_tree_decrease_qlen(q->qdisc, 1);
295 sch->qstats.drops++;
296 printk(KERN_ERR "netem: %s could not requeue\n",
297 q->qdisc->ops->id);
298 }
299
300 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700301 }
302
303 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304}
305
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306static void netem_reset(struct Qdisc *sch)
307{
308 struct netem_sched_data *q = qdisc_priv(sch);
309
310 qdisc_reset(q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 sch->q.qlen = 0;
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700312 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
Stephen Hemminger300ce172005-10-30 13:47:34 -0800315/* Pass size change message down to embedded FIFO */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316static int set_fifo_limit(struct Qdisc *q, int limit)
317{
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900318 struct rtattr *rta;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 int ret = -ENOMEM;
320
Stephen Hemminger300ce172005-10-30 13:47:34 -0800321 /* Hack to avoid sending change message to non-FIFO */
322 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
323 return 0;
324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
326 if (rta) {
327 rta->rta_type = RTM_NEWQDISC;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900328 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900330
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 ret = q->ops->change(q, rta);
332 kfree(rta);
333 }
334 return ret;
335}
336
337/*
338 * Distribution data is a variable size payload containing
339 * signed 16 bit values.
340 */
341static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
342{
343 struct netem_sched_data *q = qdisc_priv(sch);
344 unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
345 const __s16 *data = RTA_DATA(attr);
346 struct disttable *d;
347 int i;
348
349 if (n > 65536)
350 return -EINVAL;
351
352 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
353 if (!d)
354 return -ENOMEM;
355
356 d->size = n;
357 for (i = 0; i < n; i++)
358 d->table[i] = data[i];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900359
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 spin_lock_bh(&sch->dev->queue_lock);
361 d = xchg(&q->delay_dist, d);
362 spin_unlock_bh(&sch->dev->queue_lock);
363
364 kfree(d);
365 return 0;
366}
367
368static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
369{
370 struct netem_sched_data *q = qdisc_priv(sch);
371 const struct tc_netem_corr *c = RTA_DATA(attr);
372
373 if (RTA_PAYLOAD(attr) != sizeof(*c))
374 return -EINVAL;
375
376 init_crandom(&q->delay_cor, c->delay_corr);
377 init_crandom(&q->loss_cor, c->loss_corr);
378 init_crandom(&q->dup_cor, c->dup_corr);
379 return 0;
380}
381
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700382static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
383{
384 struct netem_sched_data *q = qdisc_priv(sch);
385 const struct tc_netem_reorder *r = RTA_DATA(attr);
386
387 if (RTA_PAYLOAD(attr) != sizeof(*r))
388 return -EINVAL;
389
390 q->reorder = r->probability;
391 init_crandom(&q->reorder_cor, r->correlation);
392 return 0;
393}
394
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800395static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
396{
397 struct netem_sched_data *q = qdisc_priv(sch);
398 const struct tc_netem_corrupt *r = RTA_DATA(attr);
399
400 if (RTA_PAYLOAD(attr) != sizeof(*r))
401 return -EINVAL;
402
403 q->corrupt = r->probability;
404 init_crandom(&q->corrupt_cor, r->correlation);
405 return 0;
406}
407
408/* Parse netlink message to set options */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409static int netem_change(struct Qdisc *sch, struct rtattr *opt)
410{
411 struct netem_sched_data *q = qdisc_priv(sch);
412 struct tc_netem_qopt *qopt;
413 int ret;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
416 return -EINVAL;
417
418 qopt = RTA_DATA(opt);
419 ret = set_fifo_limit(q->qdisc, qopt->limit);
420 if (ret) {
421 pr_debug("netem: can't set fifo limit\n");
422 return ret;
423 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900424
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 q->latency = qopt->latency;
426 q->jitter = qopt->jitter;
427 q->limit = qopt->limit;
428 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700429 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 q->loss = qopt->loss;
431 q->duplicate = qopt->duplicate;
432
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700433 /* for compatibility with earlier versions.
434 * if gap is set, need to assume 100% probability
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700435 */
Stephen Hemmingera362e0a2007-03-22 12:15:45 -0700436 if (q->gap)
437 q->reorder = ~0;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 /* Handle nested options after initial queue options.
440 * Should have put all options in nested format but too late now.
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900441 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
443 struct rtattr *tb[TCA_NETEM_MAX];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900444 if (rtattr_parse(tb, TCA_NETEM_MAX,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 RTA_DATA(opt) + sizeof(*qopt),
446 RTA_PAYLOAD(opt) - sizeof(*qopt)))
447 return -EINVAL;
448
449 if (tb[TCA_NETEM_CORR-1]) {
450 ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
451 if (ret)
452 return ret;
453 }
454
455 if (tb[TCA_NETEM_DELAY_DIST-1]) {
456 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
457 if (ret)
458 return ret;
459 }
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800460
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700461 if (tb[TCA_NETEM_REORDER-1]) {
462 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
463 if (ret)
464 return ret;
465 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800467 if (tb[TCA_NETEM_CORRUPT-1]) {
468 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
469 if (ret)
470 return ret;
471 }
472 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
474 return 0;
475}
476
Stephen Hemminger300ce172005-10-30 13:47:34 -0800477/*
478 * Special case version of FIFO queue for use by netem.
479 * It queues in order based on timestamps in skb's
480 */
481struct fifo_sched_data {
482 u32 limit;
Stephen Hemminger075aa572007-03-22 12:17:05 -0700483 psched_time_t oldest;
Stephen Hemminger300ce172005-10-30 13:47:34 -0800484};
485
486static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
487{
488 struct fifo_sched_data *q = qdisc_priv(sch);
489 struct sk_buff_head *list = &sch->q;
Stephen Hemminger075aa572007-03-22 12:17:05 -0700490 psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send;
Stephen Hemminger300ce172005-10-30 13:47:34 -0800491 struct sk_buff *skb;
492
493 if (likely(skb_queue_len(list) < q->limit)) {
Stephen Hemminger075aa572007-03-22 12:17:05 -0700494 /* Optimize for add at tail */
Patrick McHardy104e0872007-03-23 11:28:07 -0700495 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
Stephen Hemminger075aa572007-03-22 12:17:05 -0700496 q->oldest = tnext;
497 return qdisc_enqueue_tail(nskb, sch);
498 }
499
Stephen Hemminger300ce172005-10-30 13:47:34 -0800500 skb_queue_reverse_walk(list, skb) {
501 const struct netem_skb_cb *cb
502 = (const struct netem_skb_cb *)skb->cb;
503
Patrick McHardy104e0872007-03-23 11:28:07 -0700504 if (tnext >= cb->time_to_send)
Stephen Hemminger300ce172005-10-30 13:47:34 -0800505 break;
506 }
507
508 __skb_queue_after(list, skb, nskb);
509
510 sch->qstats.backlog += nskb->len;
511 sch->bstats.bytes += nskb->len;
512 sch->bstats.packets++;
513
514 return NET_XMIT_SUCCESS;
515 }
516
Stephen Hemminger075aa572007-03-22 12:17:05 -0700517 return qdisc_reshape_fail(nskb, sch);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800518}
519
520static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
521{
522 struct fifo_sched_data *q = qdisc_priv(sch);
523
524 if (opt) {
525 struct tc_fifo_qopt *ctl = RTA_DATA(opt);
526 if (RTA_PAYLOAD(opt) < sizeof(*ctl))
527 return -EINVAL;
528
529 q->limit = ctl->limit;
530 } else
531 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
532
Patrick McHardya0849802007-03-23 11:28:30 -0700533 q->oldest = PSCHED_PASTPERFECT;
Stephen Hemminger300ce172005-10-30 13:47:34 -0800534 return 0;
535}
536
537static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
538{
539 struct fifo_sched_data *q = qdisc_priv(sch);
540 struct tc_fifo_qopt opt = { .limit = q->limit };
541
542 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
543 return skb->len;
544
545rtattr_failure:
546 return -1;
547}
548
549static struct Qdisc_ops tfifo_qdisc_ops = {
550 .id = "tfifo",
551 .priv_size = sizeof(struct fifo_sched_data),
552 .enqueue = tfifo_enqueue,
553 .dequeue = qdisc_dequeue_head,
554 .requeue = qdisc_requeue,
555 .drop = qdisc_queue_drop,
556 .init = tfifo_init,
557 .reset = qdisc_reset_queue,
558 .change = tfifo_init,
559 .dump = tfifo_dump,
560};
561
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562static int netem_init(struct Qdisc *sch, struct rtattr *opt)
563{
564 struct netem_sched_data *q = qdisc_priv(sch);
565 int ret;
566
567 if (!opt)
568 return -EINVAL;
569
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700570 qdisc_watchdog_init(&q->watchdog, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
Patrick McHardy9f9afec2006-11-29 17:35:18 -0800572 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
573 TC_H_MAKE(sch->handle, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 if (!q->qdisc) {
575 pr_debug("netem: qdisc create failed\n");
576 return -ENOMEM;
577 }
578
579 ret = netem_change(sch, opt);
580 if (ret) {
581 pr_debug("netem: change failed\n");
582 qdisc_destroy(q->qdisc);
583 }
584 return ret;
585}
586
587static void netem_destroy(struct Qdisc *sch)
588{
589 struct netem_sched_data *q = qdisc_priv(sch);
590
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700591 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 qdisc_destroy(q->qdisc);
593 kfree(q->delay_dist);
594}
595
596static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
597{
598 const struct netem_sched_data *q = qdisc_priv(sch);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700599 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 struct rtattr *rta = (struct rtattr *) b;
601 struct tc_netem_qopt qopt;
602 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700603 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800604 struct tc_netem_corrupt corrupt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
606 qopt.latency = q->latency;
607 qopt.jitter = q->jitter;
608 qopt.limit = q->limit;
609 qopt.loss = q->loss;
610 qopt.gap = q->gap;
611 qopt.duplicate = q->duplicate;
612 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
613
614 cor.delay_corr = q->delay_cor.rho;
615 cor.loss_corr = q->loss_cor.rho;
616 cor.dup_corr = q->dup_cor.rho;
617 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700618
619 reorder.probability = q->reorder;
620 reorder.correlation = q->reorder_cor.rho;
621 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
622
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800623 corrupt.probability = q->corrupt;
624 corrupt.correlation = q->corrupt_cor.rho;
625 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
626
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700627 rta->rta_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
629 return skb->len;
630
631rtattr_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -0700632 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 return -1;
634}
635
636static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
637 struct sk_buff *skb, struct tcmsg *tcm)
638{
639 struct netem_sched_data *q = qdisc_priv(sch);
640
641 if (cl != 1) /* only one class */
642 return -ENOENT;
643
644 tcm->tcm_handle |= TC_H_MIN(1);
645 tcm->tcm_info = q->qdisc->handle;
646
647 return 0;
648}
649
650static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
651 struct Qdisc **old)
652{
653 struct netem_sched_data *q = qdisc_priv(sch);
654
655 if (new == NULL)
656 new = &noop_qdisc;
657
658 sch_tree_lock(sch);
659 *old = xchg(&q->qdisc, new);
Patrick McHardy5e50da02006-11-29 17:36:20 -0800660 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 qdisc_reset(*old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 sch_tree_unlock(sch);
663
664 return 0;
665}
666
667static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
668{
669 struct netem_sched_data *q = qdisc_priv(sch);
670 return q->qdisc;
671}
672
673static unsigned long netem_get(struct Qdisc *sch, u32 classid)
674{
675 return 1;
676}
677
678static void netem_put(struct Qdisc *sch, unsigned long arg)
679{
680}
681
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900682static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 struct rtattr **tca, unsigned long *arg)
684{
685 return -ENOSYS;
686}
687
688static int netem_delete(struct Qdisc *sch, unsigned long arg)
689{
690 return -ENOSYS;
691}
692
693static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
694{
695 if (!walker->stop) {
696 if (walker->count >= walker->skip)
697 if (walker->fn(sch, 1, walker) < 0) {
698 walker->stop = 1;
699 return;
700 }
701 walker->count++;
702 }
703}
704
705static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
706{
707 return NULL;
708}
709
710static struct Qdisc_class_ops netem_class_ops = {
711 .graft = netem_graft,
712 .leaf = netem_leaf,
713 .get = netem_get,
714 .put = netem_put,
715 .change = netem_change_class,
716 .delete = netem_delete,
717 .walk = netem_walk,
718 .tcf_chain = netem_find_tcf,
719 .dump = netem_dump_class,
720};
721
722static struct Qdisc_ops netem_qdisc_ops = {
723 .id = "netem",
724 .cl_ops = &netem_class_ops,
725 .priv_size = sizeof(struct netem_sched_data),
726 .enqueue = netem_enqueue,
727 .dequeue = netem_dequeue,
728 .requeue = netem_requeue,
729 .drop = netem_drop,
730 .init = netem_init,
731 .reset = netem_reset,
732 .destroy = netem_destroy,
733 .change = netem_change,
734 .dump = netem_dump,
735 .owner = THIS_MODULE,
736};
737
738
739static int __init netem_module_init(void)
740{
Stephen Hemmingereb229c42005-11-03 13:49:01 -0800741 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 return register_qdisc(&netem_qdisc_ops);
743}
744static void __exit netem_module_exit(void)
745{
746 unregister_qdisc(&netem_qdisc_ops);
747}
748module_init(netem_module_init)
749module_exit(netem_module_exit)
750MODULE_LICENSE("GPL");