blob: 6d9879e93648a0c60579586242643ba364f4e506 [file] [log] [blame]
John Heffnera628d292005-06-23 12:24:58 -07001/*
2 * Sally Floyd's High Speed TCP (RFC 3649) congestion control
3 *
4 * See http://www.icir.org/floyd/hstcp.html
5 *
6 * John Heffner <jheffner@psc.edu>
7 */
8
John Heffnera628d292005-06-23 12:24:58 -07009#include <linux/module.h>
10#include <net/tcp.h>
11
John Heffnera628d292005-06-23 12:24:58 -070012/* From AIMD tables from RFC 3649 appendix B,
13 * with fixed-point MD scaled <<8.
14 */
15static const struct hstcp_aimd_val {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090016 unsigned int cwnd;
17 unsigned int md;
John Heffnera628d292005-06-23 12:24:58 -070018} hstcp_aimd_vals[] = {
stephen hemminger688d1942014-08-29 23:32:05 -070019 { 38, 128, /* 0.50 */ },
20 { 118, 112, /* 0.44 */ },
21 { 221, 104, /* 0.41 */ },
22 { 347, 98, /* 0.38 */ },
23 { 495, 93, /* 0.37 */ },
24 { 663, 89, /* 0.35 */ },
25 { 851, 86, /* 0.34 */ },
26 { 1058, 83, /* 0.33 */ },
27 { 1284, 81, /* 0.32 */ },
28 { 1529, 78, /* 0.31 */ },
29 { 1793, 76, /* 0.30 */ },
30 { 2076, 74, /* 0.29 */ },
31 { 2378, 72, /* 0.28 */ },
32 { 2699, 71, /* 0.28 */ },
33 { 3039, 69, /* 0.27 */ },
34 { 3399, 68, /* 0.27 */ },
35 { 3778, 66, /* 0.26 */ },
36 { 4177, 65, /* 0.26 */ },
37 { 4596, 64, /* 0.25 */ },
38 { 5036, 62, /* 0.25 */ },
39 { 5497, 61, /* 0.24 */ },
40 { 5979, 60, /* 0.24 */ },
41 { 6483, 59, /* 0.23 */ },
42 { 7009, 58, /* 0.23 */ },
43 { 7558, 57, /* 0.22 */ },
44 { 8130, 56, /* 0.22 */ },
45 { 8726, 55, /* 0.22 */ },
46 { 9346, 54, /* 0.21 */ },
47 { 9991, 53, /* 0.21 */ },
48 { 10661, 52, /* 0.21 */ },
49 { 11358, 52, /* 0.20 */ },
50 { 12082, 51, /* 0.20 */ },
51 { 12834, 50, /* 0.20 */ },
52 { 13614, 49, /* 0.19 */ },
53 { 14424, 48, /* 0.19 */ },
54 { 15265, 48, /* 0.19 */ },
55 { 16137, 47, /* 0.19 */ },
56 { 17042, 46, /* 0.18 */ },
57 { 17981, 45, /* 0.18 */ },
58 { 18955, 45, /* 0.18 */ },
59 { 19965, 44, /* 0.17 */ },
60 { 21013, 43, /* 0.17 */ },
61 { 22101, 43, /* 0.17 */ },
62 { 23230, 42, /* 0.17 */ },
63 { 24402, 41, /* 0.16 */ },
64 { 25618, 41, /* 0.16 */ },
65 { 26881, 40, /* 0.16 */ },
66 { 28193, 39, /* 0.16 */ },
67 { 29557, 39, /* 0.15 */ },
68 { 30975, 38, /* 0.15 */ },
69 { 32450, 38, /* 0.15 */ },
70 { 33986, 37, /* 0.15 */ },
71 { 35586, 36, /* 0.14 */ },
72 { 37253, 36, /* 0.14 */ },
73 { 38992, 35, /* 0.14 */ },
74 { 40808, 35, /* 0.14 */ },
75 { 42707, 34, /* 0.13 */ },
76 { 44694, 33, /* 0.13 */ },
77 { 46776, 33, /* 0.13 */ },
78 { 48961, 32, /* 0.13 */ },
79 { 51258, 32, /* 0.13 */ },
80 { 53677, 31, /* 0.12 */ },
81 { 56230, 30, /* 0.12 */ },
82 { 58932, 30, /* 0.12 */ },
83 { 61799, 29, /* 0.12 */ },
84 { 64851, 28, /* 0.11 */ },
85 { 68113, 28, /* 0.11 */ },
86 { 71617, 27, /* 0.11 */ },
87 { 75401, 26, /* 0.10 */ },
88 { 79517, 26, /* 0.10 */ },
89 { 84035, 25, /* 0.10 */ },
90 { 89053, 24, /* 0.10 */ },
John Heffnera628d292005-06-23 12:24:58 -070091};
92
93#define HSTCP_AIMD_MAX ARRAY_SIZE(hstcp_aimd_vals)
94
95struct hstcp {
96 u32 ai;
Florian Westphal85f7e752016-11-21 14:18:37 +010097 u32 loss_cwnd;
John Heffnera628d292005-06-23 12:24:58 -070098};
99
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300100static void hstcp_init(struct sock *sk)
John Heffnera628d292005-06-23 12:24:58 -0700101{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300102 struct tcp_sock *tp = tcp_sk(sk);
103 struct hstcp *ca = inet_csk_ca(sk);
John Heffnera628d292005-06-23 12:24:58 -0700104
105 ca->ai = 0;
106
107 /* Ensure the MD arithmetic works. This is somewhat pedantic,
108 * since I don't think we will see a cwnd this large. :) */
109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
110}
111
Eric Dumazet24901552014-05-02 21:18:05 -0700112static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
John Heffnera628d292005-06-23 12:24:58 -0700113{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300114 struct tcp_sock *tp = tcp_sk(sk);
115 struct hstcp *ca = inet_csk_ca(sk);
John Heffnera628d292005-06-23 12:24:58 -0700116
Eric Dumazet24901552014-05-02 21:18:05 -0700117 if (!tcp_is_cwnd_limited(sk))
John Heffnera628d292005-06-23 12:24:58 -0700118 return;
119
Yuchung Cheng071d5082015-07-09 13:16:29 -0700120 if (tcp_in_slow_start(tp))
Yuchung Cheng9f9843a72013-10-31 11:07:31 -0700121 tcp_slow_start(tp, acked);
Ilpo Järvinen03fba042007-05-03 13:28:35 -0700122 else {
Xiaoliang (David) Wei6150c222006-07-11 13:03:28 -0700123 /* Update AIMD parameters.
124 *
125 * We want to guarantee that:
126 * hstcp_aimd_vals[ca->ai-1].cwnd <
127 * snd_cwnd <=
128 * hstcp_aimd_vals[ca->ai].cwnd
129 */
John Heffnera628d292005-06-23 12:24:58 -0700130 if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
131 while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
Patrick McHardy4a1ff6e2006-03-12 20:34:53 -0800132 ca->ai < HSTCP_AIMD_MAX - 1)
John Heffnera628d292005-06-23 12:24:58 -0700133 ca->ai++;
Xiaoliang (David) Wei6150c222006-07-11 13:03:28 -0700134 } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) {
135 while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd)
John Heffnera628d292005-06-23 12:24:58 -0700136 ca->ai--;
137 }
138
139 /* Do additive increase */
140 if (tp->snd_cwnd < tp->snd_cwnd_clamp) {
Stephen Hemmingerfb80a6e2006-06-02 17:51:08 -0700141 /* cwnd = cwnd + a(w) / cwnd */
142 tp->snd_cwnd_cnt += ca->ai + 1;
John Heffnera628d292005-06-23 12:24:58 -0700143 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
John Heffnera628d292005-06-23 12:24:58 -0700144 tp->snd_cwnd_cnt -= tp->snd_cwnd;
John Heffner5528e562006-05-05 17:41:44 -0700145 tp->snd_cwnd++;
John Heffnera628d292005-06-23 12:24:58 -0700146 }
147 }
148 }
149}
150
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300151static u32 hstcp_ssthresh(struct sock *sk)
John Heffnera628d292005-06-23 12:24:58 -0700152{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300153 const struct tcp_sock *tp = tcp_sk(sk);
Florian Westphal85f7e752016-11-21 14:18:37 +0100154 struct hstcp *ca = inet_csk_ca(sk);
John Heffnera628d292005-06-23 12:24:58 -0700155
Florian Westphal85f7e752016-11-21 14:18:37 +0100156 ca->loss_cwnd = tp->snd_cwnd;
John Heffnera628d292005-06-23 12:24:58 -0700157 /* Do multiplicative decrease */
158 return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
159}
160
Florian Westphal85f7e752016-11-21 14:18:37 +0100161static u32 hstcp_cwnd_undo(struct sock *sk)
162{
163 const struct hstcp *ca = inet_csk_ca(sk);
164
165 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
166}
John Heffnera628d292005-06-23 12:24:58 -0700167
Stephen Hemmingera252beb2011-03-10 00:40:17 -0800168static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
John Heffnera628d292005-06-23 12:24:58 -0700169 .init = hstcp_init,
170 .ssthresh = hstcp_ssthresh,
Florian Westphal85f7e752016-11-21 14:18:37 +0100171 .undo_cwnd = hstcp_cwnd_undo,
John Heffnera628d292005-06-23 12:24:58 -0700172 .cong_avoid = hstcp_cong_avoid,
John Heffnera628d292005-06-23 12:24:58 -0700173
174 .owner = THIS_MODULE,
175 .name = "highspeed"
176};
177
178static int __init hstcp_register(void)
179{
Alexey Dobriyan74975d42006-08-25 17:10:33 -0700180 BUILD_BUG_ON(sizeof(struct hstcp) > ICSK_CA_PRIV_SIZE);
John Heffnera628d292005-06-23 12:24:58 -0700181 return tcp_register_congestion_control(&tcp_highspeed);
182}
183
184static void __exit hstcp_unregister(void)
185{
186 tcp_unregister_congestion_control(&tcp_highspeed);
187}
188
189module_init(hstcp_register);
190module_exit(hstcp_unregister);
191
192MODULE_AUTHOR("John Heffner");
193MODULE_LICENSE("GPL");
194MODULE_DESCRIPTION("High Speed TCP");