blob: d8b10e054627a8aea17fe2818833587e11ef63af [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
Stephen Hemminger798b6b12006-10-22 20:16:57 -07007 * 2 of the License.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Many of the algorithms and ideas for this came from
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * NIST Net which is not copyrighted.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
22
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070023#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <net/pkt_sched.h>
25
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080026#define VERSION "1.2"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080027
Linus Torvalds1da177e2005-04-16 15:20:36 -070028/* Network Emulation Queuing algorithm.
29 ====================================
30
31 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
32 Network Emulation Tool
33 [2] Luigi Rizzo, DummyNet for FreeBSD
34
35 ----------------------------------------------------------------
36
37 This started out as a simple way to delay outgoing packets to
38 test TCP but has grown to include most of the functionality
39 of a full blown network emulator like NISTnet. It can delay
40 packets and add random jitter (and correlation). The random
41 distribution can be loaded from a table as well to provide
42 normal, Pareto, or experimental curves. Packet loss,
43 duplication, and reordering can also be emulated.
44
45 This qdisc does not do classification that can be handled in
46 layering other disciplines. It does not need to do bandwidth
47 control either since that can be handled by using token
48 bucket or other rate control.
Linus Torvalds1da177e2005-04-16 15:20:36 -070049*/
50
51struct netem_sched_data {
52 struct Qdisc *qdisc;
Patrick McHardy59cb5c62007-03-16 01:20:31 -070053 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Stephen Hemmingerb4076212007-03-22 12:16:21 -070055 psched_tdiff_t latency;
56 psched_tdiff_t jitter;
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 u32 loss;
59 u32 limit;
60 u32 counter;
61 u32 gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070063 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080064 u32 corrupt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66 struct crndstate {
Stephen Hemmingerb4076212007-03-22 12:16:21 -070067 u32 last;
68 u32 rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080069 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
71 struct disttable {
72 u32 size;
73 s16 table[0];
74 } *delay_dist;
75};
76
77/* Time stamp put into socket buffer control block */
78struct netem_skb_cb {
79 psched_time_t time_to_send;
80};
81
Jussi Kivilinna5f861732008-07-20 00:08:04 -070082static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
83{
Jussi Kivilinna175f9c12008-07-20 00:08:47 -070084 BUILD_BUG_ON(sizeof(skb->cb) <
85 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
86 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
Jussi Kivilinna5f861732008-07-20 00:08:04 -070087}
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089/* init_crandom - initialize correlated random number generator
90 * Use entropy source for initial seed.
91 */
92static void init_crandom(struct crndstate *state, unsigned long rho)
93{
94 state->rho = rho;
95 state->last = net_random();
96}
97
98/* get_crandom - correlated random number generator
99 * Next number depends on last value.
100 * rho is scaled to avoid floating point.
101 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700102static u32 get_crandom(struct crndstate *state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103{
104 u64 value, rho;
105 unsigned long answer;
106
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700107 if (state->rho == 0) /* no correlation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 return net_random();
109
110 value = net_random();
111 rho = (u64)state->rho + 1;
112 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
113 state->last = answer;
114 return answer;
115}
116
117/* tabledist - return a pseudo-randomly distributed value with mean mu and
118 * std deviation sigma. Uses table lookup to approximate the desired
119 * distribution, and a uniformly-distributed pseudo-random source.
120 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700121static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
122 struct crndstate *state,
123 const struct disttable *dist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124{
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700125 psched_tdiff_t x;
126 long t;
127 u32 rnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129 if (sigma == 0)
130 return mu;
131
132 rnd = get_crandom(state);
133
134 /* default uniform distribution */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900135 if (dist == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 return (rnd % (2*sigma)) - sigma + mu;
137
138 t = dist->table[rnd % dist->size];
139 x = (sigma % NETEM_DIST_SCALE) * t;
140 if (x >= 0)
141 x += NETEM_DIST_SCALE/2;
142 else
143 x -= NETEM_DIST_SCALE/2;
144
145 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
146}
147
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700148/*
149 * Insert one skb into qdisc.
150 * Note: parent depends on return value to account for queue length.
151 * NET_XMIT_DROP: queue length didn't change.
152 * NET_XMIT_SUCCESS: one skb was queued.
153 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
155{
156 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700157 /* We don't fill cb now as skb_unshare() may invalidate it */
158 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700159 struct sk_buff *skb2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 int ret;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700161 int count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Stephen Hemminger771018e2005-05-03 16:24:32 -0700163 pr_debug("netem_enqueue skb=%p\n", skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700165 /* Random duplication */
166 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
167 ++count;
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 /* Random packet drop 0 => none, ~0 => all */
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700170 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
171 --count;
172
173 if (count == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 sch->qstats.drops++;
175 kfree_skb(skb);
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700176 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 }
178
David S. Miller4e8a5202006-10-22 21:00:33 -0700179 skb_orphan(skb);
180
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700181 /*
182 * If we need to duplicate packet, then re-insert at top of the
183 * qdisc tree, since parent queuer expects that only one
184 * skb will be queued.
185 */
186 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
David S. Miller7698b4f2008-07-16 01:42:40 -0700187 struct Qdisc *rootq = qdisc_root(sch);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700188 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
189 q->duplicate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700191 qdisc_enqueue_root(skb2, rootq);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700192 q->duplicate = dupsave;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 }
194
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800195 /*
196 * Randomized packet corruption.
197 * Make copy if needed since we are modifying
198 * If packet is going to be hardware checksummed, then
199 * do it now in software before we mangle it.
200 */
201 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
Joe Perchesf64f9e72009-11-29 16:55:45 -0800202 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
203 (skb->ip_summed == CHECKSUM_PARTIAL &&
204 skb_checksum_help(skb))) {
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800205 sch->qstats.drops++;
206 return NET_XMIT_DROP;
207 }
208
209 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
210 }
211
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700212 cb = netem_skb_cb(skb);
Joe Perchesf64f9e72009-11-29 16:55:45 -0800213 if (q->gap == 0 || /* not doing reordering */
214 q->counter < q->gap || /* inside last reordering gap */
215 q->reorder < get_crandom(&q->reorder_cor)) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700216 psched_time_t now;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800217 psched_tdiff_t delay;
218
219 delay = tabledist(q->latency, q->jitter,
220 &q->delay_cor, q->delay_dist);
221
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700222 now = psched_get_time();
Patrick McHardy7c59e252007-03-23 11:27:45 -0700223 cb->time_to_send = now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 ++q->counter;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700225 ret = qdisc_enqueue(skb, q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 } else {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900227 /*
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700228 * Do re-ordering by putting one out of N packets at the front
229 * of the queue.
230 */
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700231 cb->time_to_send = psched_get_time();
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700232 q->counter = 0;
Jarek Poplawski8ba25da2008-11-02 00:36:03 -0700233
234 __skb_queue_head(&q->qdisc->q, skb);
235 q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
236 q->qdisc->qstats.requeues++;
237 ret = NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 }
239
240 if (likely(ret == NET_XMIT_SUCCESS)) {
241 sch->q.qlen++;
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700242 sch->bstats.bytes += qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 sch->bstats.packets++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700244 } else if (net_xmit_drop_count(ret)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 sch->qstats.drops++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Stephen Hemmingerd5d75cd2005-05-03 16:24:57 -0700248 pr_debug("netem: enqueue ret %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 return ret;
250}
251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252static unsigned int netem_drop(struct Qdisc* sch)
253{
254 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy6d037a22006-03-20 19:00:49 -0800255 unsigned int len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Patrick McHardy6d037a22006-03-20 19:00:49 -0800257 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 sch->q.qlen--;
259 sch->qstats.drops++;
260 }
261 return len;
262}
263
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264static struct sk_buff *netem_dequeue(struct Qdisc *sch)
265{
266 struct netem_sched_data *q = qdisc_priv(sch);
267 struct sk_buff *skb;
268
Stephen Hemminger11274e52007-03-22 12:17:42 -0700269 if (sch->flags & TCQ_F_THROTTLED)
270 return NULL;
271
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700272 skb = q->qdisc->ops->peek(q->qdisc);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700273 if (skb) {
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700274 const struct netem_skb_cb *cb = netem_skb_cb(skb);
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700275 psched_time_t now = psched_get_time();
Stephen Hemminger771018e2005-05-03 16:24:32 -0700276
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700277 /* if more time remaining? */
Patrick McHardy104e0872007-03-23 11:28:07 -0700278 if (cb->time_to_send <= now) {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700279 skb = qdisc_dequeue_peeked(q->qdisc);
280 if (unlikely(!skb))
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700281 return NULL;
282
Jarek Poplawski8caf1532009-04-17 10:08:49 +0000283#ifdef CONFIG_NET_CLS_ACT
284 /*
285 * If it's at ingress let's pretend the delay is
286 * from the network (tstamp will be updated).
287 */
288 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
289 skb->tstamp.tv64 = 0;
290#endif
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700291 pr_debug("netem_dequeue: return skb=%p\n", skb);
292 sch->q.qlen--;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700293 return skb;
294 }
Stephen Hemminger11274e52007-03-22 12:17:42 -0700295
Stephen Hemminger11274e52007-03-22 12:17:42 -0700296 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700297 }
298
299 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300}
301
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302static void netem_reset(struct Qdisc *sch)
303{
304 struct netem_sched_data *q = qdisc_priv(sch);
305
306 qdisc_reset(q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 sch->q.qlen = 0;
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700308 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309}
310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311/*
312 * Distribution data is a variable size payload containing
313 * signed 16 bit values.
314 */
Patrick McHardy1e904742008-01-22 22:11:17 -0800315static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316{
317 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800318 unsigned long n = nla_len(attr)/sizeof(__s16);
319 const __s16 *data = nla_data(attr);
David S. Miller7698b4f2008-07-16 01:42:40 -0700320 spinlock_t *root_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 struct disttable *d;
322 int i;
323
324 if (n > 65536)
325 return -EINVAL;
326
327 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
328 if (!d)
329 return -ENOMEM;
330
331 d->size = n;
332 for (i = 0; i < n; i++)
333 d->table[i] = data[i];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900334
Jarek Poplawski102396a2008-08-29 14:21:52 -0700335 root_lock = qdisc_root_sleeping_lock(sch);
David S. Miller7698b4f2008-07-16 01:42:40 -0700336
337 spin_lock_bh(root_lock);
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800338 kfree(q->delay_dist);
339 q->delay_dist = d;
David S. Miller7698b4f2008-07-16 01:42:40 -0700340 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 return 0;
342}
343
Stephen Hemminger265eb672008-11-03 21:13:26 -0800344static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
346 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800347 const struct tc_netem_corr *c = nla_data(attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 init_crandom(&q->delay_cor, c->delay_corr);
350 init_crandom(&q->loss_cor, c->loss_corr);
351 init_crandom(&q->dup_cor, c->dup_corr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
353
Stephen Hemminger265eb672008-11-03 21:13:26 -0800354static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700355{
356 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800357 const struct tc_netem_reorder *r = nla_data(attr);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700358
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700359 q->reorder = r->probability;
360 init_crandom(&q->reorder_cor, r->correlation);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700361}
362
Stephen Hemminger265eb672008-11-03 21:13:26 -0800363static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800364{
365 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800366 const struct tc_netem_corrupt *r = nla_data(attr);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800367
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800368 q->corrupt = r->probability;
369 init_crandom(&q->corrupt_cor, r->correlation);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800370}
371
Patrick McHardy27a34212008-01-23 20:35:39 -0800372static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
373 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
374 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
375 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
376};
377
Thomas Graf2c10b322008-09-02 17:30:27 -0700378static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
379 const struct nla_policy *policy, int len)
380{
381 int nested_len = nla_len(nla) - NLA_ALIGN(len);
382
383 if (nested_len < 0)
384 return -EINVAL;
385 if (nested_len >= nla_attr_size(0))
386 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
387 nested_len, policy);
388 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
389 return 0;
390}
391
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800392/* Parse netlink message to set options */
Patrick McHardy1e904742008-01-22 22:11:17 -0800393static int netem_change(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394{
395 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardyb03f4672008-01-23 20:32:21 -0800396 struct nlattr *tb[TCA_NETEM_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 struct tc_netem_qopt *qopt;
398 int ret;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900399
Patrick McHardyb03f4672008-01-23 20:32:21 -0800400 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 return -EINVAL;
402
Thomas Graf2c10b322008-09-02 17:30:27 -0700403 qopt = nla_data(opt);
404 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
Patrick McHardyb03f4672008-01-23 20:32:21 -0800405 if (ret < 0)
406 return ret;
407
Patrick McHardyfb0305c2008-07-05 23:40:21 -0700408 ret = fifo_set_limit(q->qdisc, qopt->limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 if (ret) {
410 pr_debug("netem: can't set fifo limit\n");
411 return ret;
412 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900413
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 q->latency = qopt->latency;
415 q->jitter = qopt->jitter;
416 q->limit = qopt->limit;
417 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700418 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 q->loss = qopt->loss;
420 q->duplicate = qopt->duplicate;
421
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700422 /* for compatibility with earlier versions.
423 * if gap is set, need to assume 100% probability
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700424 */
Stephen Hemmingera362e0a2007-03-22 12:15:45 -0700425 if (q->gap)
426 q->reorder = ~0;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700427
Stephen Hemminger265eb672008-11-03 21:13:26 -0800428 if (tb[TCA_NETEM_CORR])
429 get_correlation(sch, tb[TCA_NETEM_CORR]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
Patrick McHardyb03f4672008-01-23 20:32:21 -0800431 if (tb[TCA_NETEM_DELAY_DIST]) {
432 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
433 if (ret)
434 return ret;
435 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Stephen Hemminger265eb672008-11-03 21:13:26 -0800437 if (tb[TCA_NETEM_REORDER])
438 get_reorder(sch, tb[TCA_NETEM_REORDER]);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800439
Stephen Hemminger265eb672008-11-03 21:13:26 -0800440 if (tb[TCA_NETEM_CORRUPT])
441 get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
443 return 0;
444}
445
Stephen Hemminger300ce172005-10-30 13:47:34 -0800446/*
447 * Special case version of FIFO queue for use by netem.
448 * It queues in order based on timestamps in skb's
449 */
450struct fifo_sched_data {
451 u32 limit;
Stephen Hemminger075aa572007-03-22 12:17:05 -0700452 psched_time_t oldest;
Stephen Hemminger300ce172005-10-30 13:47:34 -0800453};
454
455static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
456{
457 struct fifo_sched_data *q = qdisc_priv(sch);
458 struct sk_buff_head *list = &sch->q;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700459 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
Stephen Hemminger300ce172005-10-30 13:47:34 -0800460 struct sk_buff *skb;
461
462 if (likely(skb_queue_len(list) < q->limit)) {
Stephen Hemminger075aa572007-03-22 12:17:05 -0700463 /* Optimize for add at tail */
Patrick McHardy104e0872007-03-23 11:28:07 -0700464 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
Stephen Hemminger075aa572007-03-22 12:17:05 -0700465 q->oldest = tnext;
466 return qdisc_enqueue_tail(nskb, sch);
467 }
468
Stephen Hemminger300ce172005-10-30 13:47:34 -0800469 skb_queue_reverse_walk(list, skb) {
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700470 const struct netem_skb_cb *cb = netem_skb_cb(skb);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800471
Patrick McHardy104e0872007-03-23 11:28:07 -0700472 if (tnext >= cb->time_to_send)
Stephen Hemminger300ce172005-10-30 13:47:34 -0800473 break;
474 }
475
476 __skb_queue_after(list, skb, nskb);
477
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700478 sch->qstats.backlog += qdisc_pkt_len(nskb);
479 sch->bstats.bytes += qdisc_pkt_len(nskb);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800480 sch->bstats.packets++;
481
482 return NET_XMIT_SUCCESS;
483 }
484
Stephen Hemminger075aa572007-03-22 12:17:05 -0700485 return qdisc_reshape_fail(nskb, sch);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800486}
487
Patrick McHardy1e904742008-01-22 22:11:17 -0800488static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
Stephen Hemminger300ce172005-10-30 13:47:34 -0800489{
490 struct fifo_sched_data *q = qdisc_priv(sch);
491
492 if (opt) {
Patrick McHardy1e904742008-01-22 22:11:17 -0800493 struct tc_fifo_qopt *ctl = nla_data(opt);
494 if (nla_len(opt) < sizeof(*ctl))
Stephen Hemminger300ce172005-10-30 13:47:34 -0800495 return -EINVAL;
496
497 q->limit = ctl->limit;
498 } else
David S. Miller5ce2d482008-07-08 17:06:30 -0700499 q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800500
Patrick McHardya0849802007-03-23 11:28:30 -0700501 q->oldest = PSCHED_PASTPERFECT;
Stephen Hemminger300ce172005-10-30 13:47:34 -0800502 return 0;
503}
504
505static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
506{
507 struct fifo_sched_data *q = qdisc_priv(sch);
508 struct tc_fifo_qopt opt = { .limit = q->limit };
509
Patrick McHardy1e904742008-01-22 22:11:17 -0800510 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800511 return skb->len;
512
Patrick McHardy1e904742008-01-22 22:11:17 -0800513nla_put_failure:
Stephen Hemminger300ce172005-10-30 13:47:34 -0800514 return -1;
515}
516
Eric Dumazet20fea082007-11-14 01:44:41 -0800517static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
Stephen Hemminger300ce172005-10-30 13:47:34 -0800518 .id = "tfifo",
519 .priv_size = sizeof(struct fifo_sched_data),
520 .enqueue = tfifo_enqueue,
521 .dequeue = qdisc_dequeue_head,
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700522 .peek = qdisc_peek_head,
Stephen Hemminger300ce172005-10-30 13:47:34 -0800523 .drop = qdisc_queue_drop,
524 .init = tfifo_init,
525 .reset = qdisc_reset_queue,
526 .change = tfifo_init,
527 .dump = tfifo_dump,
528};
529
Patrick McHardy1e904742008-01-22 22:11:17 -0800530static int netem_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531{
532 struct netem_sched_data *q = qdisc_priv(sch);
533 int ret;
534
535 if (!opt)
536 return -EINVAL;
537
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700538 qdisc_watchdog_init(&q->watchdog, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
David S. Miller5ce2d482008-07-08 17:06:30 -0700540 q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
David S. Millerbb949fb2008-07-08 16:55:56 -0700541 &tfifo_qdisc_ops,
Patrick McHardy9f9afec2006-11-29 17:35:18 -0800542 TC_H_MAKE(sch->handle, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 if (!q->qdisc) {
544 pr_debug("netem: qdisc create failed\n");
545 return -ENOMEM;
546 }
547
548 ret = netem_change(sch, opt);
549 if (ret) {
550 pr_debug("netem: change failed\n");
551 qdisc_destroy(q->qdisc);
552 }
553 return ret;
554}
555
556static void netem_destroy(struct Qdisc *sch)
557{
558 struct netem_sched_data *q = qdisc_priv(sch);
559
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700560 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 qdisc_destroy(q->qdisc);
562 kfree(q->delay_dist);
563}
564
565static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
566{
567 const struct netem_sched_data *q = qdisc_priv(sch);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700568 unsigned char *b = skb_tail_pointer(skb);
Patrick McHardy1e904742008-01-22 22:11:17 -0800569 struct nlattr *nla = (struct nlattr *) b;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 struct tc_netem_qopt qopt;
571 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700572 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800573 struct tc_netem_corrupt corrupt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
575 qopt.latency = q->latency;
576 qopt.jitter = q->jitter;
577 qopt.limit = q->limit;
578 qopt.loss = q->loss;
579 qopt.gap = q->gap;
580 qopt.duplicate = q->duplicate;
Patrick McHardy1e904742008-01-22 22:11:17 -0800581 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
583 cor.delay_corr = q->delay_cor.rho;
584 cor.loss_corr = q->loss_cor.rho;
585 cor.dup_corr = q->dup_cor.rho;
Patrick McHardy1e904742008-01-22 22:11:17 -0800586 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700587
588 reorder.probability = q->reorder;
589 reorder.correlation = q->reorder_cor.rho;
Patrick McHardy1e904742008-01-22 22:11:17 -0800590 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700591
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800592 corrupt.probability = q->corrupt;
593 corrupt.correlation = q->corrupt_cor.rho;
Patrick McHardy1e904742008-01-22 22:11:17 -0800594 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800595
Patrick McHardy1e904742008-01-22 22:11:17 -0800596 nla->nla_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
598 return skb->len;
599
Patrick McHardy1e904742008-01-22 22:11:17 -0800600nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -0700601 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return -1;
603}
604
Eric Dumazet20fea082007-11-14 01:44:41 -0800605static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 .id = "netem",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 .priv_size = sizeof(struct netem_sched_data),
608 .enqueue = netem_enqueue,
609 .dequeue = netem_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -0700610 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 .drop = netem_drop,
612 .init = netem_init,
613 .reset = netem_reset,
614 .destroy = netem_destroy,
615 .change = netem_change,
616 .dump = netem_dump,
617 .owner = THIS_MODULE,
618};
619
620
621static int __init netem_module_init(void)
622{
Stephen Hemmingereb229c42005-11-03 13:49:01 -0800623 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 return register_qdisc(&netem_qdisc_ops);
625}
626static void __exit netem_module_exit(void)
627{
628 unregister_qdisc(&netem_qdisc_ops);
629}
630module_init(netem_module_init)
631module_exit(netem_module_exit)
632MODULE_LICENSE("GPL");