blob: 2ab9bbb6faffb799560df98b093d4cbc1207d816 [file] [log] [blame]
Stephen Hemmingerc4622382007-04-20 17:07:51 -07001/*
2 * TCP Illinois congestion control.
3 * Home page:
4 * http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
5 *
6 * The algorithm is described in:
7 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
8 * for High-Speed Networks"
Justin P. Mattock631dd1a2010-10-18 11:03:14 +02009 * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf
Stephen Hemmingerc4622382007-04-20 17:07:51 -070010 *
11 * Implemented from description in paper and ns-2 simulation.
12 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
13 */
14
15#include <linux/module.h>
16#include <linux/skbuff.h>
17#include <linux/inet_diag.h>
18#include <asm/div64.h>
19#include <net/tcp.h>
20
21#define ALPHA_SHIFT 7
22#define ALPHA_SCALE (1u<<ALPHA_SHIFT)
23#define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */
24#define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */
25#define ALPHA_BASE ALPHA_SCALE /* 1.0 */
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070026#define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */
Stephen Hemmingerc4622382007-04-20 17:07:51 -070027
28#define BETA_SHIFT 6
29#define BETA_SCALE (1u<<BETA_SHIFT)
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070030#define BETA_MIN (BETA_SCALE/8) /* 0.125 */
31#define BETA_MAX (BETA_SCALE/2) /* 0.5 */
32#define BETA_BASE BETA_MAX
Stephen Hemmingerc4622382007-04-20 17:07:51 -070033
34static int win_thresh __read_mostly = 15;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070035module_param(win_thresh, int, 0);
Stephen Hemmingerc4622382007-04-20 17:07:51 -070036MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing");
37
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070038static int theta __read_mostly = 5;
39module_param(theta, int, 0);
40MODULE_PARM_DESC(theta, "# of fast RTT's before full growth");
Stephen Hemmingerc4622382007-04-20 17:07:51 -070041
42/* TCP Illinois Parameters */
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070043struct illinois {
44 u64 sum_rtt; /* sum of rtt's measured within last rtt */
45 u16 cnt_rtt; /* # of rtts measured within last rtt */
46 u32 base_rtt; /* min of all rtt in usec */
47 u32 max_rtt; /* max of all rtt in usec */
48 u32 end_seq; /* right edge of current RTT */
49 u32 alpha; /* Additive increase */
50 u32 beta; /* Muliplicative decrease */
51 u16 acked; /* # packets acked by current ACK */
52 u8 rtt_above; /* average rtt has gone above threshold */
53 u8 rtt_low; /* # of rtts measurements below threshold */
Stephen Hemmingerc4622382007-04-20 17:07:51 -070054};
55
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070056static void rtt_reset(struct sock *sk)
57{
58 struct tcp_sock *tp = tcp_sk(sk);
59 struct illinois *ca = inet_csk_ca(sk);
60
61 ca->end_seq = tp->snd_nxt;
62 ca->cnt_rtt = 0;
63 ca->sum_rtt = 0;
64
65 /* TODO: age max_rtt? */
66}
67
Stephen Hemmingerc4622382007-04-20 17:07:51 -070068static void tcp_illinois_init(struct sock *sk)
69{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070070 struct illinois *ca = inet_csk_ca(sk);
Stephen Hemmingerc4622382007-04-20 17:07:51 -070071
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070072 ca->alpha = ALPHA_MAX;
73 ca->beta = BETA_BASE;
74 ca->base_rtt = 0x7fffffff;
75 ca->max_rtt = 0;
76
77 ca->acked = 0;
78 ca->rtt_low = 0;
79 ca->rtt_above = 0;
80
81 rtt_reset(sk);
Stephen Hemmingerc4622382007-04-20 17:07:51 -070082}
83
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070084/* Measure RTT for each ack. */
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -070085static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt)
Stephen Hemmingerc4622382007-04-20 17:07:51 -070086{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070087 struct illinois *ca = inet_csk_ca(sk);
Stephen Hemminger164891a2007-04-23 22:26:16 -070088
89 ca->acked = pkts_acked;
90
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -070091 /* dup ack, no rtt sample */
92 if (rtt < 0)
Ilpo Järvinenb9ce2042007-06-15 15:08:43 -070093 return;
94
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070095 /* ignore bogus values, this prevents wraparound in alpha math */
96 if (rtt > RTT_MAX)
97 rtt = RTT_MAX;
98
99 /* keep track of minimum RTT seen so far */
100 if (ca->base_rtt > rtt)
101 ca->base_rtt = rtt;
102
103 /* and max */
104 if (ca->max_rtt < rtt)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700105 ca->max_rtt = rtt;
106
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700107 ++ca->cnt_rtt;
108 ca->sum_rtt += rtt;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700109}
110
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700111/* Maximum queuing delay */
112static inline u32 max_delay(const struct illinois *ca)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700113{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700114 return ca->max_rtt - ca->base_rtt;
115}
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700116
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700117/* Average queuing delay */
118static inline u32 avg_delay(const struct illinois *ca)
119{
120 u64 t = ca->sum_rtt;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700121
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700122 do_div(t, ca->cnt_rtt);
123 return t - ca->base_rtt;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700124}
125
126/*
127 * Compute value of alpha used for additive increase.
128 * If small window then use 1.0, equivalent to Reno.
129 *
130 * For larger windows, adjust based on average delay.
131 * A. If average delay is at minimum (we are uncongested),
132 * then use large alpha (10.0) to increase faster.
133 * B. If average delay is at maximum (getting congested)
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700134 * then use small alpha (0.3)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700135 *
136 * The result is a convex window growth curve.
137 */
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700138static u32 alpha(struct illinois *ca, u32 da, u32 dm)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700139{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700140 u32 d1 = dm / 100; /* Low threshold */
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700141
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700142 if (da <= d1) {
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700143 /* If never got out of low delay zone, then use max */
144 if (!ca->rtt_above)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700145 return ALPHA_MAX;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700146
147 /* Wait for 5 good RTT's before allowing alpha to go alpha max.
148 * This prevents one good RTT from causing sudden window increase.
149 */
150 if (++ca->rtt_low < theta)
151 return ca->alpha;
152
153 ca->rtt_low = 0;
154 ca->rtt_above = 0;
155 return ALPHA_MAX;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700156 }
157
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700158 ca->rtt_above = 1;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700159
160 /*
161 * Based on:
162 *
163 * (dm - d1) amin amax
164 * k1 = -------------------
165 * amax - amin
166 *
167 * (dm - d1) amin
168 * k2 = ---------------- - d1
169 * amax - amin
170 *
171 * k1
172 * alpha = ----------
173 * k2 + da
174 */
175
176 dm -= d1;
177 da -= d1;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700178 return (dm * ALPHA_MAX) /
179 (dm + (da * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700180}
181
182/*
183 * Beta used for multiplicative decrease.
184 * For small window sizes returns same value as Reno (0.5)
185 *
186 * If delay is small (10% of max) then beta = 1/8
187 * If delay is up to 80% of max then beta = 1/2
188 * In between is a linear function
189 */
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700190static u32 beta(u32 da, u32 dm)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700191{
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700192 u32 d2, d3;
193
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700194 d2 = dm / 10;
195 if (da <= d2)
196 return BETA_MIN;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700197
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700198 d3 = (8 * dm) / 10;
199 if (da >= d3 || d3 <= d2)
200 return BETA_MAX;
201
202 /*
203 * Based on:
204 *
205 * bmin d3 - bmax d2
206 * k3 = -------------------
207 * d3 - d2
208 *
209 * bmax - bmin
210 * k4 = -------------
211 * d3 - d2
212 *
213 * b = k3 + k4 da
214 */
215 return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da)
216 / (d3 - d2);
217}
218
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700219/* Update alpha and beta values once per RTT */
220static void update_params(struct sock *sk)
221{
222 struct tcp_sock *tp = tcp_sk(sk);
223 struct illinois *ca = inet_csk_ca(sk);
224
225 if (tp->snd_cwnd < win_thresh) {
226 ca->alpha = ALPHA_BASE;
227 ca->beta = BETA_BASE;
228 } else if (ca->cnt_rtt > 0) {
229 u32 dm = max_delay(ca);
230 u32 da = avg_delay(ca);
231
232 ca->alpha = alpha(ca, da, dm);
233 ca->beta = beta(da, dm);
234 }
235
236 rtt_reset(sk);
237}
238
239/*
240 * In case of loss, reset to default values
241 */
242static void tcp_illinois_state(struct sock *sk, u8 new_state)
243{
244 struct illinois *ca = inet_csk_ca(sk);
245
246 if (new_state == TCP_CA_Loss) {
247 ca->alpha = ALPHA_BASE;
248 ca->beta = BETA_BASE;
249 ca->rtt_low = 0;
250 ca->rtt_above = 0;
251 rtt_reset(sk);
252 }
253}
254
255/*
256 * Increase window in response to successful acknowledgment.
257 */
Eric Dumazet24901552014-05-02 21:18:05 -0700258static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700259{
260 struct tcp_sock *tp = tcp_sk(sk);
261 struct illinois *ca = inet_csk_ca(sk);
262
263 if (after(ack, ca->end_seq))
264 update_params(sk);
265
266 /* RFC2861 only increase cwnd if fully utilized */
Eric Dumazet24901552014-05-02 21:18:05 -0700267 if (!tcp_is_cwnd_limited(sk))
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700268 return;
269
270 /* In slow start */
Yuchung Cheng071d5082015-07-09 13:16:29 -0700271 if (tcp_in_slow_start(tp))
Yuchung Cheng9f9843a72013-10-31 11:07:31 -0700272 tcp_slow_start(tp, acked);
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700273
274 else {
275 u32 delta;
276
277 /* snd_cwnd_cnt is # of packets since last cwnd increment */
278 tp->snd_cwnd_cnt += ca->acked;
279 ca->acked = 1;
280
281 /* This is close approximation of:
282 * tp->snd_cwnd += alpha/tp->snd_cwnd
283 */
284 delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
285 if (delta >= tp->snd_cwnd) {
286 tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd,
stephen hemminger688d1942014-08-29 23:32:05 -0700287 (u32)tp->snd_cwnd_clamp);
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700288 tp->snd_cwnd_cnt = 0;
289 }
290 }
291}
292
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700293static u32 tcp_illinois_ssthresh(struct sock *sk)
294{
295 struct tcp_sock *tp = tcp_sk(sk);
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700296 struct illinois *ca = inet_csk_ca(sk);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700297
298 /* Multiplicative decrease */
Stephen Hemmingera357dde2007-11-30 01:10:55 +1100299 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700300}
301
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700302/* Extract info for Tcp socket info provided via netlink. */
Eric Dumazet64f40ff2015-04-28 16:23:48 -0700303static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
304 union tcp_cc_info *info)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700305{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700306 const struct illinois *ca = inet_csk_ca(sk);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700307
308 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
Eric Dumazet64f40ff2015-04-28 16:23:48 -0700309 info->vegas.tcpv_enabled = 1;
310 info->vegas.tcpv_rttcnt = ca->cnt_rtt;
311 info->vegas.tcpv_minrtt = ca->base_rtt;
312 info->vegas.tcpv_rtt = 0;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700313
Eric Dumazet64f40ff2015-04-28 16:23:48 -0700314 if (info->vegas.tcpv_rttcnt > 0) {
Jesper Dangaard Brouer8f363b72012-10-31 02:45:32 +0000315 u64 t = ca->sum_rtt;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700316
Eric Dumazet64f40ff2015-04-28 16:23:48 -0700317 do_div(t, info->vegas.tcpv_rttcnt);
318 info->vegas.tcpv_rtt = t;
Jesper Dangaard Brouer8f363b72012-10-31 02:45:32 +0000319 }
Eric Dumazet64f40ff2015-04-28 16:23:48 -0700320 *attr = INET_DIAG_VEGASINFO;
321 return sizeof(struct tcpvegas_info);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700322 }
Eric Dumazet521f1cf2015-04-16 18:10:35 -0700323 return 0;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700324}
325
Stephen Hemmingera252beb2011-03-10 00:40:17 -0800326static struct tcp_congestion_ops tcp_illinois __read_mostly = {
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700327 .init = tcp_illinois_init,
328 .ssthresh = tcp_illinois_ssthresh,
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700329 .cong_avoid = tcp_illinois_cong_avoid,
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700330 .set_state = tcp_illinois_state,
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700331 .get_info = tcp_illinois_info,
332 .pkts_acked = tcp_illinois_acked,
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700333
334 .owner = THIS_MODULE,
335 .name = "illinois",
336};
337
338static int __init tcp_illinois_register(void)
339{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700340 BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700341 return tcp_register_congestion_control(&tcp_illinois);
342}
343
344static void __exit tcp_illinois_unregister(void)
345{
346 tcp_unregister_congestion_control(&tcp_illinois);
347}
348
349module_init(tcp_illinois_register);
350module_exit(tcp_illinois_unregister);
351
352MODULE_AUTHOR("Stephen Hemminger, Shao Liu");
353MODULE_LICENSE("GPL");
354MODULE_DESCRIPTION("TCP Illinois");
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700355MODULE_VERSION("1.0");