blob: 9cc9f95b109e72abaa5135d99cd0665d889415fa [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/gen_estimator.c Simple rate estimator.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 * Jamal Hadi Salim - moved it to net/core and reshulfed
13 * names to make it usable in general net subsystem.
14 */
15
16#include <asm/uaccess.h>
17#include <asm/system.h>
Jiri Slaby1977f032007-10-18 23:40:25 -070018#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/jiffies.h>
23#include <linux/string.h>
24#include <linux/mm.h>
25#include <linux/socket.h>
26#include <linux/sockios.h>
27#include <linux/in.h>
28#include <linux/errno.h>
29#include <linux/interrupt.h>
30#include <linux/netdevice.h>
31#include <linux/skbuff.h>
32#include <linux/rtnetlink.h>
33#include <linux/init.h>
Jarek Poplawski4db0acf2008-11-24 15:48:05 -080034#include <linux/rbtree.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <net/sock.h>
36#include <net/gen_stats.h>
37
38/*
39 This code is NOT intended to be used for statistics collection,
40 its purpose is to provide a base for statistical multiplexing
41 for controlled load service.
42 If you need only statistics, run a user level daemon which
43 periodically reads byte counters.
44
45 Unfortunately, rate estimation is not a very easy task.
46 F.e. I did not find a simple way to estimate the current peak rate
47 and even failed to formulate the problem 8)8)
48
49 So I preferred not to built an estimator into the scheduler,
50 but run this task separately.
51 Ideally, it should be kernel thread(s), but for now it runs
52 from timers, which puts apparent top bounds on the number of rated
53 flows, has minimal overhead on small, but is enough
54 to handle controlled load service, sets of aggregates.
55
56 We measure rate over A=(1<<interval) seconds and evaluate EWMA:
57
58 avrate = avrate*(1-W) + rate*W
59
60 where W is chosen as negative power of 2: W = 2^(-ewma_log)
61
62 The resulting time constant is:
63
64 T = A/(-ln(1-W))
65
66
67 NOTES.
68
69 * The stored value for avbps is scaled by 2^5, so that maximal
70 rate is ~1Gbit, avpps is scaled by 2^10.
71
72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor
73 for HZ=100 and HZ=1024 8)), maximal interval
74 is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
75 are too expensive, longer ones can be implemented
76 at user level painlessly.
77 */
78
79#define EST_MAX_INTERVAL 5
80
81struct gen_estimator
82{
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -070083 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 struct gnet_stats_basic *bstats;
85 struct gnet_stats_rate_est *rate_est;
86 spinlock_t *stats_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 int ewma_log;
88 u64 last_bytes;
89 u32 last_packets;
90 u32 avpps;
91 u32 avbps;
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -070092 struct rcu_head e_rcu;
Jarek Poplawski4db0acf2008-11-24 15:48:05 -080093 struct rb_node node;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094};
95
96struct gen_estimator_head
97{
98 struct timer_list timer;
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -070099 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100};
101
102static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
103
David S. Millerdeb3abf2008-08-18 22:32:10 -0700104/* Protects against NULL dereference */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105static DEFINE_RWLOCK(est_lock);
106
Jarek Poplawski4db0acf2008-11-24 15:48:05 -0800107/* Protects against soft lockup during large deletion */
108static struct rb_root est_root = RB_ROOT;
109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110static void est_timer(unsigned long arg)
111{
112 int idx = (int)arg;
113 struct gen_estimator *e;
114
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700115 rcu_read_lock();
116 list_for_each_entry_rcu(e, &elist[idx].list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 u64 nbytes;
118 u32 npackets;
119 u32 rate;
120
121 spin_lock(e->stats_lock);
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700122 read_lock(&est_lock);
123 if (e->bstats == NULL)
124 goto skip;
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 nbytes = e->bstats->bytes;
127 npackets = e->bstats->packets;
128 rate = (nbytes - e->last_bytes)<<(7 - idx);
129 e->last_bytes = nbytes;
130 e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log;
131 e->rate_est->bps = (e->avbps+0xF)>>5;
132
133 rate = (npackets - e->last_packets)<<(12 - idx);
134 e->last_packets = npackets;
135 e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
136 e->rate_est->pps = (e->avpps+0x1FF)>>10;
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700137skip:
138 read_unlock(&est_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 spin_unlock(e->stats_lock);
140 }
141
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700142 if (!list_empty(&elist[idx].list))
Eric Dumazet789675e2008-01-03 20:40:01 -0800143 mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700144 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145}
146
Jarek Poplawski4db0acf2008-11-24 15:48:05 -0800147static void gen_add_node(struct gen_estimator *est)
148{
149 struct rb_node **p = &est_root.rb_node, *parent = NULL;
150
151 while (*p) {
152 struct gen_estimator *e;
153
154 parent = *p;
155 e = rb_entry(parent, struct gen_estimator, node);
156
157 if (est->bstats > e->bstats)
158 p = &parent->rb_right;
159 else
160 p = &parent->rb_left;
161 }
162 rb_link_node(&est->node, parent, p);
163 rb_insert_color(&est->node, &est_root);
164}
165
Jarek Poplawski244e6c22008-11-26 15:24:32 -0800166static
167struct gen_estimator *gen_find_node(const struct gnet_stats_basic *bstats,
168 const struct gnet_stats_rate_est *rate_est)
Jarek Poplawski4db0acf2008-11-24 15:48:05 -0800169{
170 struct rb_node *p = est_root.rb_node;
171
172 while (p) {
173 struct gen_estimator *e;
174
175 e = rb_entry(p, struct gen_estimator, node);
176
177 if (bstats > e->bstats)
178 p = p->rb_right;
179 else if (bstats < e->bstats || rate_est != e->rate_est)
180 p = p->rb_left;
181 else
182 return e;
183 }
184 return NULL;
185}
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187/**
188 * gen_new_estimator - create a new rate estimator
189 * @bstats: basic statistics
190 * @rate_est: rate estimator statistics
191 * @stats_lock: statistics lock
192 * @opt: rate estimator configuration TLV
193 *
194 * Creates a new rate estimator with &bstats as source and &rate_est
195 * as destination. A new timer with the interval specified in the
196 * configuration TLV is created. Upon each interval, the latest statistics
197 * will be read from &bstats and the estimated rate will be stored in
198 * &rate_est with the statistics lock grabed during this period.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900199 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 * Returns 0 on success or a negative error code.
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700201 *
202 * NOTE: Called under rtnl_mutex
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 */
204int gen_new_estimator(struct gnet_stats_basic *bstats,
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700205 struct gnet_stats_rate_est *rate_est,
206 spinlock_t *stats_lock,
Patrick McHardy1e904742008-01-22 22:11:17 -0800207 struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208{
209 struct gen_estimator *est;
Patrick McHardy1e904742008-01-22 22:11:17 -0800210 struct gnet_estimator *parm = nla_data(opt);
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700211 int idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Patrick McHardy1e904742008-01-22 22:11:17 -0800213 if (nla_len(opt) < sizeof(*parm))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 return -EINVAL;
215
216 if (parm->interval < -2 || parm->interval > 3)
217 return -EINVAL;
218
Andrew Morton77d04bd2006-04-07 14:52:59 -0700219 est = kzalloc(sizeof(*est), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 if (est == NULL)
221 return -ENOBUFS;
222
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700223 idx = parm->interval + 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 est->bstats = bstats;
225 est->rate_est = rate_est;
226 est->stats_lock = stats_lock;
227 est->ewma_log = parm->ewma_log;
228 est->last_bytes = bstats->bytes;
229 est->avbps = rate_est->bps<<5;
230 est->last_packets = bstats->packets;
231 est->avpps = rate_est->pps<<10;
232
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700233 if (!elist[idx].timer.function) {
234 INIT_LIST_HEAD(&elist[idx].list);
235 setup_timer(&elist[idx].timer, est_timer, idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700237
238 if (list_empty(&elist[idx].list))
Eric Dumazet789675e2008-01-03 20:40:01 -0800239 mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700240
241 list_add_rcu(&est->list, &elist[idx].list);
Jarek Poplawski4db0acf2008-11-24 15:48:05 -0800242 gen_add_node(est);
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 return 0;
245}
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800246EXPORT_SYMBOL(gen_new_estimator);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700248static void __gen_kill_estimator(struct rcu_head *head)
249{
250 struct gen_estimator *e = container_of(head,
251 struct gen_estimator, e_rcu);
252 kfree(e);
253}
254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255/**
256 * gen_kill_estimator - remove a rate estimator
257 * @bstats: basic statistics
258 * @rate_est: rate estimator statistics
259 *
Jarek Poplawski4db0acf2008-11-24 15:48:05 -0800260 * Removes the rate estimator specified by &bstats and &rate_est.
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700261 *
David S. Millerdeb3abf2008-08-18 22:32:10 -0700262 * NOTE: Called under rtnl_mutex
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 */
264void gen_kill_estimator(struct gnet_stats_basic *bstats,
Jarek Poplawski4db0acf2008-11-24 15:48:05 -0800265 struct gnet_stats_rate_est *rate_est)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
Jarek Poplawski4db0acf2008-11-24 15:48:05 -0800267 struct gen_estimator *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Jarek Poplawski4db0acf2008-11-24 15:48:05 -0800269 while ((e = gen_find_node(bstats, rate_est))) {
270 rb_erase(&e->node, &est_root);
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700271
Jarek Poplawski4db0acf2008-11-24 15:48:05 -0800272 write_lock_bh(&est_lock);
273 e->bstats = NULL;
274 write_unlock_bh(&est_lock);
Ranko Zivojnovic0929c2d2007-07-16 18:28:32 -0700275
Jarek Poplawski4db0acf2008-11-24 15:48:05 -0800276 list_del_rcu(&e->list);
277 call_rcu(&e->e_rcu, __gen_kill_estimator);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 }
279}
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800280EXPORT_SYMBOL(gen_kill_estimator);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
282/**
Jarek Poplawski96750162008-01-21 02:36:02 -0800283 * gen_replace_estimator - replace rate estimator configuration
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 * @bstats: basic statistics
285 * @rate_est: rate estimator statistics
286 * @stats_lock: statistics lock
287 * @opt: rate estimator configuration TLV
288 *
289 * Replaces the configuration of a rate estimator by calling
290 * gen_kill_estimator() and gen_new_estimator().
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900291 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 * Returns 0 on success or a negative error code.
293 */
Jarek Poplawski96750162008-01-21 02:36:02 -0800294int gen_replace_estimator(struct gnet_stats_basic *bstats,
295 struct gnet_stats_rate_est *rate_est,
Patrick McHardy1e904742008-01-22 22:11:17 -0800296 spinlock_t *stats_lock, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297{
Jarek Poplawski96750162008-01-21 02:36:02 -0800298 gen_kill_estimator(bstats, rate_est);
299 return gen_new_estimator(bstats, rate_est, stats_lock, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301EXPORT_SYMBOL(gen_replace_estimator);
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800302
303/**
304 * gen_estimator_active - test if estimator is currently in use
Jarek Poplawski244e6c22008-11-26 15:24:32 -0800305 * @bstats: basic statistics
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800306 * @rate_est: rate estimator statistics
307 *
Jarek Poplawski244e6c22008-11-26 15:24:32 -0800308 * Returns true if estimator is active, and false if not.
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800309 */
Jarek Poplawski244e6c22008-11-26 15:24:32 -0800310bool gen_estimator_active(const struct gnet_stats_basic *bstats,
311 const struct gnet_stats_rate_est *rate_est)
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800312{
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800313 ASSERT_RTNL();
314
Jarek Poplawski244e6c22008-11-26 15:24:32 -0800315 return gen_find_node(bstats, rate_est) != NULL;
Stephen Hemmingerc1b56872008-11-25 21:14:06 -0800316}
317EXPORT_SYMBOL(gen_estimator_active);