blob: d876b873484852d0a1da004e51fc4198d65627de [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
Stephen Hemminger798b6b12006-10-22 20:16:57 -07007 * 2 of the License.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Many of the algorithms and ideas for this came from
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * NIST Net which is not copyrighted.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
22
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070023#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <net/pkt_sched.h>
25
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080026#define VERSION "1.2"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080027
Linus Torvalds1da177e2005-04-16 15:20:36 -070028/* Network Emulation Queuing algorithm.
29 ====================================
30
31 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
32 Network Emulation Tool
33 [2] Luigi Rizzo, DummyNet for FreeBSD
34
35 ----------------------------------------------------------------
36
37 This started out as a simple way to delay outgoing packets to
38 test TCP but has grown to include most of the functionality
39 of a full blown network emulator like NISTnet. It can delay
40 packets and add random jitter (and correlation). The random
41 distribution can be loaded from a table as well to provide
42 normal, Pareto, or experimental curves. Packet loss,
43 duplication, and reordering can also be emulated.
44
45 This qdisc does not do classification that can be handled in
46 layering other disciplines. It does not need to do bandwidth
47 control either since that can be handled by using token
48 bucket or other rate control.
Linus Torvalds1da177e2005-04-16 15:20:36 -070049*/
50
51struct netem_sched_data {
52 struct Qdisc *qdisc;
Patrick McHardy59cb5c62007-03-16 01:20:31 -070053 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Stephen Hemmingerb4076212007-03-22 12:16:21 -070055 psched_tdiff_t latency;
56 psched_tdiff_t jitter;
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 u32 loss;
59 u32 limit;
60 u32 counter;
61 u32 gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070063 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080064 u32 corrupt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66 struct crndstate {
Stephen Hemmingerb4076212007-03-22 12:16:21 -070067 u32 last;
68 u32 rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080069 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
71 struct disttable {
72 u32 size;
73 s16 table[0];
74 } *delay_dist;
75};
76
77/* Time stamp put into socket buffer control block */
78struct netem_skb_cb {
79 psched_time_t time_to_send;
80};
81
Jussi Kivilinna5f861732008-07-20 00:08:04 -070082static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
83{
Jussi Kivilinna175f9c12008-07-20 00:08:47 -070084 BUILD_BUG_ON(sizeof(skb->cb) <
85 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
86 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
Jussi Kivilinna5f861732008-07-20 00:08:04 -070087}
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089/* init_crandom - initialize correlated random number generator
90 * Use entropy source for initial seed.
91 */
92static void init_crandom(struct crndstate *state, unsigned long rho)
93{
94 state->rho = rho;
95 state->last = net_random();
96}
97
98/* get_crandom - correlated random number generator
99 * Next number depends on last value.
100 * rho is scaled to avoid floating point.
101 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700102static u32 get_crandom(struct crndstate *state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103{
104 u64 value, rho;
105 unsigned long answer;
106
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700107 if (state->rho == 0) /* no correlation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 return net_random();
109
110 value = net_random();
111 rho = (u64)state->rho + 1;
112 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
113 state->last = answer;
114 return answer;
115}
116
117/* tabledist - return a pseudo-randomly distributed value with mean mu and
118 * std deviation sigma. Uses table lookup to approximate the desired
119 * distribution, and a uniformly-distributed pseudo-random source.
120 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700121static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
122 struct crndstate *state,
123 const struct disttable *dist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124{
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700125 psched_tdiff_t x;
126 long t;
127 u32 rnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129 if (sigma == 0)
130 return mu;
131
132 rnd = get_crandom(state);
133
134 /* default uniform distribution */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900135 if (dist == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 return (rnd % (2*sigma)) - sigma + mu;
137
138 t = dist->table[rnd % dist->size];
139 x = (sigma % NETEM_DIST_SCALE) * t;
140 if (x >= 0)
141 x += NETEM_DIST_SCALE/2;
142 else
143 x -= NETEM_DIST_SCALE/2;
144
145 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
146}
147
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700148/*
149 * Insert one skb into qdisc.
150 * Note: parent depends on return value to account for queue length.
151 * NET_XMIT_DROP: queue length didn't change.
152 * NET_XMIT_SUCCESS: one skb was queued.
153 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
155{
156 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700157 /* We don't fill cb now as skb_unshare() may invalidate it */
158 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700159 struct sk_buff *skb2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 int ret;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700161 int count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Stephen Hemminger771018e2005-05-03 16:24:32 -0700163 pr_debug("netem_enqueue skb=%p\n", skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700165 /* Random duplication */
166 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
167 ++count;
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 /* Random packet drop 0 => none, ~0 => all */
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700170 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
171 --count;
172
173 if (count == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 sch->qstats.drops++;
175 kfree_skb(skb);
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700176 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 }
178
David S. Miller4e8a5202006-10-22 21:00:33 -0700179 skb_orphan(skb);
180
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700181 /*
182 * If we need to duplicate packet, then re-insert at top of the
183 * qdisc tree, since parent queuer expects that only one
184 * skb will be queued.
185 */
186 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
David S. Miller7698b4f2008-07-16 01:42:40 -0700187 struct Qdisc *rootq = qdisc_root(sch);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700188 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
189 q->duplicate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700191 qdisc_enqueue_root(skb2, rootq);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700192 q->duplicate = dupsave;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 }
194
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800195 /*
196 * Randomized packet corruption.
197 * Make copy if needed since we are modifying
198 * If packet is going to be hardware checksummed, then
199 * do it now in software before we mangle it.
200 */
201 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
202 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
Patrick McHardy84fa7932006-08-29 16:44:56 -0700203 || (skb->ip_summed == CHECKSUM_PARTIAL
204 && skb_checksum_help(skb))) {
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800205 sch->qstats.drops++;
206 return NET_XMIT_DROP;
207 }
208
209 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
210 }
211
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700212 cb = netem_skb_cb(skb);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700213 if (q->gap == 0 /* not doing reordering */
214 || q->counter < q->gap /* inside last reordering gap */
215 || q->reorder < get_crandom(&q->reorder_cor)) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700216 psched_time_t now;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800217 psched_tdiff_t delay;
218
219 delay = tabledist(q->latency, q->jitter,
220 &q->delay_cor, q->delay_dist);
221
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700222 now = psched_get_time();
Patrick McHardy7c59e252007-03-23 11:27:45 -0700223 cb->time_to_send = now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 ++q->counter;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700225 ret = qdisc_enqueue(skb, q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 } else {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900227 /*
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700228 * Do re-ordering by putting one out of N packets at the front
229 * of the queue.
230 */
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700231 cb->time_to_send = psched_get_time();
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700232 q->counter = 0;
Jarek Poplawski8ba25da2008-11-02 00:36:03 -0700233
234 __skb_queue_head(&q->qdisc->q, skb);
235 q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
236 q->qdisc->qstats.requeues++;
237 ret = NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 }
239
240 if (likely(ret == NET_XMIT_SUCCESS)) {
241 sch->q.qlen++;
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700242 sch->bstats.bytes += qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 sch->bstats.packets++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700244 } else if (net_xmit_drop_count(ret)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 sch->qstats.drops++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Stephen Hemmingerd5d75cd2005-05-03 16:24:57 -0700248 pr_debug("netem: enqueue ret %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 return ret;
250}
251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252static unsigned int netem_drop(struct Qdisc* sch)
253{
254 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy6d037a22006-03-20 19:00:49 -0800255 unsigned int len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Patrick McHardy6d037a22006-03-20 19:00:49 -0800257 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 sch->q.qlen--;
259 sch->qstats.drops++;
260 }
261 return len;
262}
263
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264static struct sk_buff *netem_dequeue(struct Qdisc *sch)
265{
266 struct netem_sched_data *q = qdisc_priv(sch);
267 struct sk_buff *skb;
268
Stephen Hemminger11274e52007-03-22 12:17:42 -0700269 if (sch->flags & TCQ_F_THROTTLED)
270 return NULL;
271
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700272 skb = q->qdisc->ops->peek(q->qdisc);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700273 if (skb) {
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700274 const struct netem_skb_cb *cb = netem_skb_cb(skb);
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700275 psched_time_t now = psched_get_time();
Stephen Hemminger771018e2005-05-03 16:24:32 -0700276
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700277 /* if more time remaining? */
Patrick McHardy104e0872007-03-23 11:28:07 -0700278 if (cb->time_to_send <= now) {
Jarek Poplawski77be1552008-10-31 00:47:01 -0700279 skb = qdisc_dequeue_peeked(q->qdisc);
280 if (unlikely(!skb))
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700281 return NULL;
282
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700283 pr_debug("netem_dequeue: return skb=%p\n", skb);
284 sch->q.qlen--;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700285 return skb;
286 }
Stephen Hemminger11274e52007-03-22 12:17:42 -0700287
Stephen Hemminger11274e52007-03-22 12:17:42 -0700288 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700289 }
290
291 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292}
293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294static void netem_reset(struct Qdisc *sch)
295{
296 struct netem_sched_data *q = qdisc_priv(sch);
297
298 qdisc_reset(q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 sch->q.qlen = 0;
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700300 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303/*
304 * Distribution data is a variable size payload containing
305 * signed 16 bit values.
306 */
Patrick McHardy1e904742008-01-22 22:11:17 -0800307static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308{
309 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800310 unsigned long n = nla_len(attr)/sizeof(__s16);
311 const __s16 *data = nla_data(attr);
David S. Miller7698b4f2008-07-16 01:42:40 -0700312 spinlock_t *root_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 struct disttable *d;
314 int i;
315
316 if (n > 65536)
317 return -EINVAL;
318
319 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
320 if (!d)
321 return -ENOMEM;
322
323 d->size = n;
324 for (i = 0; i < n; i++)
325 d->table[i] = data[i];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900326
Jarek Poplawski102396a2008-08-29 14:21:52 -0700327 root_lock = qdisc_root_sleeping_lock(sch);
David S. Miller7698b4f2008-07-16 01:42:40 -0700328
329 spin_lock_bh(root_lock);
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800330 kfree(q->delay_dist);
331 q->delay_dist = d;
David S. Miller7698b4f2008-07-16 01:42:40 -0700332 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 return 0;
334}
335
Stephen Hemminger265eb672008-11-03 21:13:26 -0800336static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
338 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800339 const struct tc_netem_corr *c = nla_data(attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 init_crandom(&q->delay_cor, c->delay_corr);
342 init_crandom(&q->loss_cor, c->loss_corr);
343 init_crandom(&q->dup_cor, c->dup_corr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
Stephen Hemminger265eb672008-11-03 21:13:26 -0800346static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700347{
348 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800349 const struct tc_netem_reorder *r = nla_data(attr);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700350
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700351 q->reorder = r->probability;
352 init_crandom(&q->reorder_cor, r->correlation);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700353}
354
Stephen Hemminger265eb672008-11-03 21:13:26 -0800355static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800356{
357 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800358 const struct tc_netem_corrupt *r = nla_data(attr);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800359
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800360 q->corrupt = r->probability;
361 init_crandom(&q->corrupt_cor, r->correlation);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800362}
363
Patrick McHardy27a34212008-01-23 20:35:39 -0800364static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
365 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
366 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
367 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
368};
369
Thomas Graf2c10b322008-09-02 17:30:27 -0700370static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
371 const struct nla_policy *policy, int len)
372{
373 int nested_len = nla_len(nla) - NLA_ALIGN(len);
374
375 if (nested_len < 0)
376 return -EINVAL;
377 if (nested_len >= nla_attr_size(0))
378 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
379 nested_len, policy);
380 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
381 return 0;
382}
383
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800384/* Parse netlink message to set options */
Patrick McHardy1e904742008-01-22 22:11:17 -0800385static int netem_change(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386{
387 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardyb03f4672008-01-23 20:32:21 -0800388 struct nlattr *tb[TCA_NETEM_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 struct tc_netem_qopt *qopt;
390 int ret;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900391
Patrick McHardyb03f4672008-01-23 20:32:21 -0800392 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 return -EINVAL;
394
Thomas Graf2c10b322008-09-02 17:30:27 -0700395 qopt = nla_data(opt);
396 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
Patrick McHardyb03f4672008-01-23 20:32:21 -0800397 if (ret < 0)
398 return ret;
399
Patrick McHardyfb0305c2008-07-05 23:40:21 -0700400 ret = fifo_set_limit(q->qdisc, qopt->limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 if (ret) {
402 pr_debug("netem: can't set fifo limit\n");
403 return ret;
404 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 q->latency = qopt->latency;
407 q->jitter = qopt->jitter;
408 q->limit = qopt->limit;
409 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700410 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 q->loss = qopt->loss;
412 q->duplicate = qopt->duplicate;
413
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700414 /* for compatibility with earlier versions.
415 * if gap is set, need to assume 100% probability
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700416 */
Stephen Hemmingera362e0a2007-03-22 12:15:45 -0700417 if (q->gap)
418 q->reorder = ~0;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700419
Stephen Hemminger265eb672008-11-03 21:13:26 -0800420 if (tb[TCA_NETEM_CORR])
421 get_correlation(sch, tb[TCA_NETEM_CORR]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
Patrick McHardyb03f4672008-01-23 20:32:21 -0800423 if (tb[TCA_NETEM_DELAY_DIST]) {
424 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
425 if (ret)
426 return ret;
427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Stephen Hemminger265eb672008-11-03 21:13:26 -0800429 if (tb[TCA_NETEM_REORDER])
430 get_reorder(sch, tb[TCA_NETEM_REORDER]);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800431
Stephen Hemminger265eb672008-11-03 21:13:26 -0800432 if (tb[TCA_NETEM_CORRUPT])
433 get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
435 return 0;
436}
437
Stephen Hemminger300ce172005-10-30 13:47:34 -0800438/*
439 * Special case version of FIFO queue for use by netem.
440 * It queues in order based on timestamps in skb's
441 */
442struct fifo_sched_data {
443 u32 limit;
Stephen Hemminger075aa572007-03-22 12:17:05 -0700444 psched_time_t oldest;
Stephen Hemminger300ce172005-10-30 13:47:34 -0800445};
446
447static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
448{
449 struct fifo_sched_data *q = qdisc_priv(sch);
450 struct sk_buff_head *list = &sch->q;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700451 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
Stephen Hemminger300ce172005-10-30 13:47:34 -0800452 struct sk_buff *skb;
453
454 if (likely(skb_queue_len(list) < q->limit)) {
Stephen Hemminger075aa572007-03-22 12:17:05 -0700455 /* Optimize for add at tail */
Patrick McHardy104e0872007-03-23 11:28:07 -0700456 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
Stephen Hemminger075aa572007-03-22 12:17:05 -0700457 q->oldest = tnext;
458 return qdisc_enqueue_tail(nskb, sch);
459 }
460
Stephen Hemminger300ce172005-10-30 13:47:34 -0800461 skb_queue_reverse_walk(list, skb) {
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700462 const struct netem_skb_cb *cb = netem_skb_cb(skb);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800463
Patrick McHardy104e0872007-03-23 11:28:07 -0700464 if (tnext >= cb->time_to_send)
Stephen Hemminger300ce172005-10-30 13:47:34 -0800465 break;
466 }
467
468 __skb_queue_after(list, skb, nskb);
469
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700470 sch->qstats.backlog += qdisc_pkt_len(nskb);
471 sch->bstats.bytes += qdisc_pkt_len(nskb);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800472 sch->bstats.packets++;
473
474 return NET_XMIT_SUCCESS;
475 }
476
Stephen Hemminger075aa572007-03-22 12:17:05 -0700477 return qdisc_reshape_fail(nskb, sch);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800478}
479
Patrick McHardy1e904742008-01-22 22:11:17 -0800480static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
Stephen Hemminger300ce172005-10-30 13:47:34 -0800481{
482 struct fifo_sched_data *q = qdisc_priv(sch);
483
484 if (opt) {
Patrick McHardy1e904742008-01-22 22:11:17 -0800485 struct tc_fifo_qopt *ctl = nla_data(opt);
486 if (nla_len(opt) < sizeof(*ctl))
Stephen Hemminger300ce172005-10-30 13:47:34 -0800487 return -EINVAL;
488
489 q->limit = ctl->limit;
490 } else
David S. Miller5ce2d482008-07-08 17:06:30 -0700491 q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800492
Patrick McHardya0849802007-03-23 11:28:30 -0700493 q->oldest = PSCHED_PASTPERFECT;
Stephen Hemminger300ce172005-10-30 13:47:34 -0800494 return 0;
495}
496
497static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
498{
499 struct fifo_sched_data *q = qdisc_priv(sch);
500 struct tc_fifo_qopt opt = { .limit = q->limit };
501
Patrick McHardy1e904742008-01-22 22:11:17 -0800502 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800503 return skb->len;
504
Patrick McHardy1e904742008-01-22 22:11:17 -0800505nla_put_failure:
Stephen Hemminger300ce172005-10-30 13:47:34 -0800506 return -1;
507}
508
Eric Dumazet20fea082007-11-14 01:44:41 -0800509static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
Stephen Hemminger300ce172005-10-30 13:47:34 -0800510 .id = "tfifo",
511 .priv_size = sizeof(struct fifo_sched_data),
512 .enqueue = tfifo_enqueue,
513 .dequeue = qdisc_dequeue_head,
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700514 .peek = qdisc_peek_head,
Stephen Hemminger300ce172005-10-30 13:47:34 -0800515 .drop = qdisc_queue_drop,
516 .init = tfifo_init,
517 .reset = qdisc_reset_queue,
518 .change = tfifo_init,
519 .dump = tfifo_dump,
520};
521
Patrick McHardy1e904742008-01-22 22:11:17 -0800522static int netem_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523{
524 struct netem_sched_data *q = qdisc_priv(sch);
525 int ret;
526
527 if (!opt)
528 return -EINVAL;
529
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700530 qdisc_watchdog_init(&q->watchdog, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
David S. Miller5ce2d482008-07-08 17:06:30 -0700532 q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
David S. Millerbb949fb2008-07-08 16:55:56 -0700533 &tfifo_qdisc_ops,
Patrick McHardy9f9afec2006-11-29 17:35:18 -0800534 TC_H_MAKE(sch->handle, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 if (!q->qdisc) {
536 pr_debug("netem: qdisc create failed\n");
537 return -ENOMEM;
538 }
539
540 ret = netem_change(sch, opt);
541 if (ret) {
542 pr_debug("netem: change failed\n");
543 qdisc_destroy(q->qdisc);
544 }
545 return ret;
546}
547
548static void netem_destroy(struct Qdisc *sch)
549{
550 struct netem_sched_data *q = qdisc_priv(sch);
551
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700552 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 qdisc_destroy(q->qdisc);
554 kfree(q->delay_dist);
555}
556
557static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
558{
559 const struct netem_sched_data *q = qdisc_priv(sch);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700560 unsigned char *b = skb_tail_pointer(skb);
Patrick McHardy1e904742008-01-22 22:11:17 -0800561 struct nlattr *nla = (struct nlattr *) b;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 struct tc_netem_qopt qopt;
563 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700564 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800565 struct tc_netem_corrupt corrupt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
567 qopt.latency = q->latency;
568 qopt.jitter = q->jitter;
569 qopt.limit = q->limit;
570 qopt.loss = q->loss;
571 qopt.gap = q->gap;
572 qopt.duplicate = q->duplicate;
Patrick McHardy1e904742008-01-22 22:11:17 -0800573 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
575 cor.delay_corr = q->delay_cor.rho;
576 cor.loss_corr = q->loss_cor.rho;
577 cor.dup_corr = q->dup_cor.rho;
Patrick McHardy1e904742008-01-22 22:11:17 -0800578 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700579
580 reorder.probability = q->reorder;
581 reorder.correlation = q->reorder_cor.rho;
Patrick McHardy1e904742008-01-22 22:11:17 -0800582 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700583
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800584 corrupt.probability = q->corrupt;
585 corrupt.correlation = q->corrupt_cor.rho;
Patrick McHardy1e904742008-01-22 22:11:17 -0800586 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800587
Patrick McHardy1e904742008-01-22 22:11:17 -0800588 nla->nla_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
590 return skb->len;
591
Patrick McHardy1e904742008-01-22 22:11:17 -0800592nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -0700593 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 return -1;
595}
596
Eric Dumazet20fea082007-11-14 01:44:41 -0800597static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 .id = "netem",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 .priv_size = sizeof(struct netem_sched_data),
600 .enqueue = netem_enqueue,
601 .dequeue = netem_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -0700602 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 .drop = netem_drop,
604 .init = netem_init,
605 .reset = netem_reset,
606 .destroy = netem_destroy,
607 .change = netem_change,
608 .dump = netem_dump,
609 .owner = THIS_MODULE,
610};
611
612
613static int __init netem_module_init(void)
614{
Stephen Hemmingereb229c42005-11-03 13:49:01 -0800615 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 return register_qdisc(&netem_qdisc_ops);
617}
618static void __exit netem_module_exit(void)
619{
620 unregister_qdisc(&netem_qdisc_ops);
621}
622module_init(netem_module_init)
623module_exit(netem_module_exit)
624MODULE_LICENSE("GPL");