blob: 8a520996f3d268b6a3a1407fce986c3cf781a871 [file] [log] [blame]
Stephen Hemmingerc4622382007-04-20 17:07:51 -07001/*
2 * TCP Illinois congestion control.
3 * Home page:
4 * http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
5 *
6 * The algorithm is described in:
7 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
8 * for High-Speed Networks"
Justin P. Mattock631dd1a2010-10-18 11:03:14 +02009 * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf
Stephen Hemmingerc4622382007-04-20 17:07:51 -070010 *
11 * Implemented from description in paper and ns-2 simulation.
12 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
13 */
14
15#include <linux/module.h>
16#include <linux/skbuff.h>
17#include <linux/inet_diag.h>
18#include <asm/div64.h>
19#include <net/tcp.h>
20
21#define ALPHA_SHIFT 7
22#define ALPHA_SCALE (1u<<ALPHA_SHIFT)
23#define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */
24#define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */
25#define ALPHA_BASE ALPHA_SCALE /* 1.0 */
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070026#define U32_MAX ((u32)~0U)
27#define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */
Stephen Hemmingerc4622382007-04-20 17:07:51 -070028
29#define BETA_SHIFT 6
30#define BETA_SCALE (1u<<BETA_SHIFT)
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070031#define BETA_MIN (BETA_SCALE/8) /* 0.125 */
32#define BETA_MAX (BETA_SCALE/2) /* 0.5 */
33#define BETA_BASE BETA_MAX
Stephen Hemmingerc4622382007-04-20 17:07:51 -070034
35static int win_thresh __read_mostly = 15;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070036module_param(win_thresh, int, 0);
Stephen Hemmingerc4622382007-04-20 17:07:51 -070037MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing");
38
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070039static int theta __read_mostly = 5;
40module_param(theta, int, 0);
41MODULE_PARM_DESC(theta, "# of fast RTT's before full growth");
Stephen Hemmingerc4622382007-04-20 17:07:51 -070042
43/* TCP Illinois Parameters */
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070044struct illinois {
45 u64 sum_rtt; /* sum of rtt's measured within last rtt */
46 u16 cnt_rtt; /* # of rtts measured within last rtt */
47 u32 base_rtt; /* min of all rtt in usec */
48 u32 max_rtt; /* max of all rtt in usec */
49 u32 end_seq; /* right edge of current RTT */
50 u32 alpha; /* Additive increase */
51 u32 beta; /* Muliplicative decrease */
52 u16 acked; /* # packets acked by current ACK */
53 u8 rtt_above; /* average rtt has gone above threshold */
54 u8 rtt_low; /* # of rtts measurements below threshold */
Stephen Hemmingerc4622382007-04-20 17:07:51 -070055};
56
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070057static void rtt_reset(struct sock *sk)
58{
59 struct tcp_sock *tp = tcp_sk(sk);
60 struct illinois *ca = inet_csk_ca(sk);
61
62 ca->end_seq = tp->snd_nxt;
63 ca->cnt_rtt = 0;
64 ca->sum_rtt = 0;
65
66 /* TODO: age max_rtt? */
67}
68
Stephen Hemmingerc4622382007-04-20 17:07:51 -070069static void tcp_illinois_init(struct sock *sk)
70{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070071 struct illinois *ca = inet_csk_ca(sk);
Stephen Hemmingerc4622382007-04-20 17:07:51 -070072
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070073 ca->alpha = ALPHA_MAX;
74 ca->beta = BETA_BASE;
75 ca->base_rtt = 0x7fffffff;
76 ca->max_rtt = 0;
77
78 ca->acked = 0;
79 ca->rtt_low = 0;
80 ca->rtt_above = 0;
81
82 rtt_reset(sk);
Stephen Hemmingerc4622382007-04-20 17:07:51 -070083}
84
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070085/* Measure RTT for each ack. */
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -070086static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt)
Stephen Hemmingerc4622382007-04-20 17:07:51 -070087{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070088 struct illinois *ca = inet_csk_ca(sk);
Stephen Hemminger164891a2007-04-23 22:26:16 -070089
90 ca->acked = pkts_acked;
91
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -070092 /* dup ack, no rtt sample */
93 if (rtt < 0)
Ilpo Järvinenb9ce2042007-06-15 15:08:43 -070094 return;
95
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070096 /* ignore bogus values, this prevents wraparound in alpha math */
97 if (rtt > RTT_MAX)
98 rtt = RTT_MAX;
99
100 /* keep track of minimum RTT seen so far */
101 if (ca->base_rtt > rtt)
102 ca->base_rtt = rtt;
103
104 /* and max */
105 if (ca->max_rtt < rtt)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700106 ca->max_rtt = rtt;
107
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700108 ++ca->cnt_rtt;
109 ca->sum_rtt += rtt;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700110}
111
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700112/* Maximum queuing delay */
113static inline u32 max_delay(const struct illinois *ca)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700114{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700115 return ca->max_rtt - ca->base_rtt;
116}
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700117
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700118/* Average queuing delay */
119static inline u32 avg_delay(const struct illinois *ca)
120{
121 u64 t = ca->sum_rtt;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700122
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700123 do_div(t, ca->cnt_rtt);
124 return t - ca->base_rtt;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700125}
126
127/*
128 * Compute value of alpha used for additive increase.
129 * If small window then use 1.0, equivalent to Reno.
130 *
131 * For larger windows, adjust based on average delay.
132 * A. If average delay is at minimum (we are uncongested),
133 * then use large alpha (10.0) to increase faster.
134 * B. If average delay is at maximum (getting congested)
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700135 * then use small alpha (0.3)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700136 *
137 * The result is a convex window growth curve.
138 */
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700139static u32 alpha(struct illinois *ca, u32 da, u32 dm)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700140{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700141 u32 d1 = dm / 100; /* Low threshold */
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700142
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700143 if (da <= d1) {
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700144 /* If never got out of low delay zone, then use max */
145 if (!ca->rtt_above)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700146 return ALPHA_MAX;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700147
148 /* Wait for 5 good RTT's before allowing alpha to go alpha max.
149 * This prevents one good RTT from causing sudden window increase.
150 */
151 if (++ca->rtt_low < theta)
152 return ca->alpha;
153
154 ca->rtt_low = 0;
155 ca->rtt_above = 0;
156 return ALPHA_MAX;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700157 }
158
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700159 ca->rtt_above = 1;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700160
161 /*
162 * Based on:
163 *
164 * (dm - d1) amin amax
165 * k1 = -------------------
166 * amax - amin
167 *
168 * (dm - d1) amin
169 * k2 = ---------------- - d1
170 * amax - amin
171 *
172 * k1
173 * alpha = ----------
174 * k2 + da
175 */
176
177 dm -= d1;
178 da -= d1;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700179 return (dm * ALPHA_MAX) /
180 (dm + (da * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700181}
182
183/*
184 * Beta used for multiplicative decrease.
185 * For small window sizes returns same value as Reno (0.5)
186 *
187 * If delay is small (10% of max) then beta = 1/8
188 * If delay is up to 80% of max then beta = 1/2
189 * In between is a linear function
190 */
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700191static u32 beta(u32 da, u32 dm)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700192{
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700193 u32 d2, d3;
194
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700195 d2 = dm / 10;
196 if (da <= d2)
197 return BETA_MIN;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700198
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700199 d3 = (8 * dm) / 10;
200 if (da >= d3 || d3 <= d2)
201 return BETA_MAX;
202
203 /*
204 * Based on:
205 *
206 * bmin d3 - bmax d2
207 * k3 = -------------------
208 * d3 - d2
209 *
210 * bmax - bmin
211 * k4 = -------------
212 * d3 - d2
213 *
214 * b = k3 + k4 da
215 */
216 return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da)
217 / (d3 - d2);
218}
219
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700220/* Update alpha and beta values once per RTT */
221static void update_params(struct sock *sk)
222{
223 struct tcp_sock *tp = tcp_sk(sk);
224 struct illinois *ca = inet_csk_ca(sk);
225
226 if (tp->snd_cwnd < win_thresh) {
227 ca->alpha = ALPHA_BASE;
228 ca->beta = BETA_BASE;
229 } else if (ca->cnt_rtt > 0) {
230 u32 dm = max_delay(ca);
231 u32 da = avg_delay(ca);
232
233 ca->alpha = alpha(ca, da, dm);
234 ca->beta = beta(da, dm);
235 }
236
237 rtt_reset(sk);
238}
239
240/*
241 * In case of loss, reset to default values
242 */
243static void tcp_illinois_state(struct sock *sk, u8 new_state)
244{
245 struct illinois *ca = inet_csk_ca(sk);
246
247 if (new_state == TCP_CA_Loss) {
248 ca->alpha = ALPHA_BASE;
249 ca->beta = BETA_BASE;
250 ca->rtt_low = 0;
251 ca->rtt_above = 0;
252 rtt_reset(sk);
253 }
254}
255
256/*
257 * Increase window in response to successful acknowledgment.
258 */
Yuchung Cheng9f9843a72013-10-31 11:07:31 -0700259static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked,
260 u32 in_flight)
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700261{
262 struct tcp_sock *tp = tcp_sk(sk);
263 struct illinois *ca = inet_csk_ca(sk);
264
265 if (after(ack, ca->end_seq))
266 update_params(sk);
267
268 /* RFC2861 only increase cwnd if fully utilized */
269 if (!tcp_is_cwnd_limited(sk, in_flight))
270 return;
271
272 /* In slow start */
273 if (tp->snd_cwnd <= tp->snd_ssthresh)
Yuchung Cheng9f9843a72013-10-31 11:07:31 -0700274 tcp_slow_start(tp, acked);
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700275
276 else {
277 u32 delta;
278
279 /* snd_cwnd_cnt is # of packets since last cwnd increment */
280 tp->snd_cwnd_cnt += ca->acked;
281 ca->acked = 1;
282
283 /* This is close approximation of:
284 * tp->snd_cwnd += alpha/tp->snd_cwnd
285 */
286 delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
287 if (delta >= tp->snd_cwnd) {
288 tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd,
289 (u32) tp->snd_cwnd_clamp);
290 tp->snd_cwnd_cnt = 0;
291 }
292 }
293}
294
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700295static u32 tcp_illinois_ssthresh(struct sock *sk)
296{
297 struct tcp_sock *tp = tcp_sk(sk);
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700298 struct illinois *ca = inet_csk_ca(sk);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700299
300 /* Multiplicative decrease */
Stephen Hemmingera357dde2007-11-30 01:10:55 +1100301 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700302}
303
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700304
305/* Extract info for Tcp socket info provided via netlink. */
306static void tcp_illinois_info(struct sock *sk, u32 ext,
307 struct sk_buff *skb)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700308{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700309 const struct illinois *ca = inet_csk_ca(sk);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700310
311 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
312 struct tcpvegas_info info = {
313 .tcpv_enabled = 1,
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700314 .tcpv_rttcnt = ca->cnt_rtt,
315 .tcpv_minrtt = ca->base_rtt,
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700316 };
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700317
Jesper Dangaard Brouer8f363b72012-10-31 02:45:32 +0000318 if (info.tcpv_rttcnt > 0) {
319 u64 t = ca->sum_rtt;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700320
Jesper Dangaard Brouer8f363b72012-10-31 02:45:32 +0000321 do_div(t, info.tcpv_rttcnt);
322 info.tcpv_rtt = t;
323 }
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700324 nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
325 }
326}
327
Stephen Hemmingera252beb2011-03-10 00:40:17 -0800328static struct tcp_congestion_ops tcp_illinois __read_mostly = {
Stephen Hemminger164891a2007-04-23 22:26:16 -0700329 .flags = TCP_CONG_RTT_STAMP,
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700330 .init = tcp_illinois_init,
331 .ssthresh = tcp_illinois_ssthresh,
332 .min_cwnd = tcp_reno_min_cwnd,
333 .cong_avoid = tcp_illinois_cong_avoid,
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700334 .set_state = tcp_illinois_state,
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700335 .get_info = tcp_illinois_info,
336 .pkts_acked = tcp_illinois_acked,
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700337
338 .owner = THIS_MODULE,
339 .name = "illinois",
340};
341
342static int __init tcp_illinois_register(void)
343{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700344 BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700345 return tcp_register_congestion_control(&tcp_illinois);
346}
347
348static void __exit tcp_illinois_unregister(void)
349{
350 tcp_unregister_congestion_control(&tcp_illinois);
351}
352
353module_init(tcp_illinois_register);
354module_exit(tcp_illinois_unregister);
355
356MODULE_AUTHOR("Stephen Hemminger, Shao Liu");
357MODULE_LICENSE("GPL");
358MODULE_DESCRIPTION("TCP Illinois");
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700359MODULE_VERSION("1.0");