blob: af0aca1e6be612ee100cc99bca70102ed644a3eb [file] [log] [blame]
Stephen Hemminger317a76f2005-06-23 12:19:55 -07001/*
2 * Plugable TCP congestion control support and newReno
3 * congestion control.
4 * Based on ideas from I/O scheduler suport and Web100.
5 *
6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
7 */
8
Stephen Hemminger317a76f2005-06-23 12:19:55 -07009#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/types.h>
12#include <linux/list.h>
13#include <net/tcp.h>
14
15static DEFINE_SPINLOCK(tcp_cong_list_lock);
16static LIST_HEAD(tcp_cong_list);
17
18/* Simple linear search, don't expect many entries! */
19static struct tcp_congestion_ops *tcp_ca_find(const char *name)
20{
21 struct tcp_congestion_ops *e;
22
Stephen Hemminger5f8ef482005-06-23 20:37:36 -070023 list_for_each_entry_rcu(e, &tcp_cong_list, list) {
Stephen Hemminger317a76f2005-06-23 12:19:55 -070024 if (strcmp(e->name, name) == 0)
25 return e;
26 }
27
28 return NULL;
29}
30
31/*
32 * Attach new congestion control algorthim to the list
33 * of available options.
34 */
35int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
36{
37 int ret = 0;
38
39 /* all algorithms must implement ssthresh and cong_avoid ops */
Stephen Hemminger72dc5b92006-06-05 17:30:08 -070040 if (!ca->ssthresh || !ca->cong_avoid) {
Stephen Hemminger317a76f2005-06-23 12:19:55 -070041 printk(KERN_ERR "TCP %s does not implement required ops\n",
42 ca->name);
43 return -EINVAL;
44 }
45
46 spin_lock(&tcp_cong_list_lock);
47 if (tcp_ca_find(ca->name)) {
48 printk(KERN_NOTICE "TCP %s already registered\n", ca->name);
49 ret = -EEXIST;
50 } else {
Stephen Hemminger3d2573f2006-09-24 20:11:58 -070051 list_add_tail_rcu(&ca->list, &tcp_cong_list);
Stephen Hemminger317a76f2005-06-23 12:19:55 -070052 printk(KERN_INFO "TCP %s registered\n", ca->name);
53 }
54 spin_unlock(&tcp_cong_list_lock);
55
56 return ret;
57}
58EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
59
60/*
61 * Remove congestion control algorithm, called from
62 * the module's remove function. Module ref counts are used
63 * to ensure that this can't be done till all sockets using
64 * that method are closed.
65 */
66void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
67{
68 spin_lock(&tcp_cong_list_lock);
69 list_del_rcu(&ca->list);
70 spin_unlock(&tcp_cong_list_lock);
71}
72EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
73
74/* Assign choice of congestion control. */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030075void tcp_init_congestion_control(struct sock *sk)
Stephen Hemminger317a76f2005-06-23 12:19:55 -070076{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030077 struct inet_connection_sock *icsk = inet_csk(sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -070078 struct tcp_congestion_ops *ca;
79
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030080 if (icsk->icsk_ca_ops != &tcp_init_congestion_ops)
Stephen Hemminger5f8ef482005-06-23 20:37:36 -070081 return;
82
Stephen Hemminger317a76f2005-06-23 12:19:55 -070083 rcu_read_lock();
84 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
85 if (try_module_get(ca->owner)) {
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030086 icsk->icsk_ca_ops = ca;
Stephen Hemminger317a76f2005-06-23 12:19:55 -070087 break;
88 }
89
90 }
91 rcu_read_unlock();
92
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030093 if (icsk->icsk_ca_ops->init)
94 icsk->icsk_ca_ops->init(sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -070095}
96
97/* Manage refcounts on socket close. */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030098void tcp_cleanup_congestion_control(struct sock *sk)
Stephen Hemminger317a76f2005-06-23 12:19:55 -070099{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300100 struct inet_connection_sock *icsk = inet_csk(sk);
101
102 if (icsk->icsk_ca_ops->release)
103 icsk->icsk_ca_ops->release(sk);
104 module_put(icsk->icsk_ca_ops->owner);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700105}
106
107/* Used by sysctl to change default congestion control */
108int tcp_set_default_congestion_control(const char *name)
109{
110 struct tcp_congestion_ops *ca;
111 int ret = -ENOENT;
112
113 spin_lock(&tcp_cong_list_lock);
114 ca = tcp_ca_find(name);
115#ifdef CONFIG_KMOD
116 if (!ca) {
117 spin_unlock(&tcp_cong_list_lock);
118
119 request_module("tcp_%s", name);
120 spin_lock(&tcp_cong_list_lock);
121 ca = tcp_ca_find(name);
122 }
123#endif
124
125 if (ca) {
126 list_move(&ca->list, &tcp_cong_list);
127 ret = 0;
128 }
129 spin_unlock(&tcp_cong_list_lock);
130
131 return ret;
132}
133
134/* Get current default congestion control */
135void tcp_get_default_congestion_control(char *name)
136{
137 struct tcp_congestion_ops *ca;
138 /* We will always have reno... */
139 BUG_ON(list_empty(&tcp_cong_list));
140
141 rcu_read_lock();
142 ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
143 strncpy(name, ca->name, TCP_CA_NAME_MAX);
144 rcu_read_unlock();
145}
146
Stephen Hemminger5f8ef482005-06-23 20:37:36 -0700147/* Change congestion control for socket */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300148int tcp_set_congestion_control(struct sock *sk, const char *name)
Stephen Hemminger5f8ef482005-06-23 20:37:36 -0700149{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300150 struct inet_connection_sock *icsk = inet_csk(sk);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -0700151 struct tcp_congestion_ops *ca;
152 int err = 0;
153
154 rcu_read_lock();
155 ca = tcp_ca_find(name);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300156 if (ca == icsk->icsk_ca_ops)
Stephen Hemminger5f8ef482005-06-23 20:37:36 -0700157 goto out;
158
159 if (!ca)
160 err = -ENOENT;
161
162 else if (!try_module_get(ca->owner))
163 err = -EBUSY;
164
165 else {
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300166 tcp_cleanup_congestion_control(sk);
167 icsk->icsk_ca_ops = ca;
168 if (icsk->icsk_ca_ops->init)
169 icsk->icsk_ca_ops->init(sk);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -0700170 }
171 out:
172 rcu_read_unlock();
173 return err;
174}
175
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800176
177/*
178 * Linear increase during slow start
179 */
180void tcp_slow_start(struct tcp_sock *tp)
181{
182 if (sysctl_tcp_abc) {
183 /* RFC3465: Slow Start
184 * TCP sender SHOULD increase cwnd by the number of
185 * previously unacknowledged bytes ACKed by each incoming
186 * acknowledgment, provided the increase is not more than L
187 */
188 if (tp->bytes_acked < tp->mss_cache)
189 return;
190
191 /* We MAY increase by 2 if discovered delayed ack */
Daikichi Osuga3fdf3f02006-08-29 02:01:44 -0700192 if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) {
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800193 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
194 tp->snd_cwnd++;
195 }
196 }
197 tp->bytes_acked = 0;
198
199 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
200 tp->snd_cwnd++;
201}
202EXPORT_SYMBOL_GPL(tcp_slow_start);
203
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700204/*
205 * TCP Reno congestion control
206 * This is special case used for fallback as well.
207 */
208/* This is Jacobson's slow start and congestion avoidance.
209 * SIGCOMM '88, p. 328.
210 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300211void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700212 int flag)
213{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300214 struct tcp_sock *tp = tcp_sk(sk);
215
Stephen Hemmingerf4805ed2005-11-10 16:53:30 -0800216 if (!tcp_is_cwnd_limited(sk, in_flight))
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700217 return;
218
Stephen Hemminger7faffa12005-11-10 17:07:24 -0800219 /* In "safe" area, increase. */
220 if (tp->snd_cwnd <= tp->snd_ssthresh)
221 tcp_slow_start(tp);
Stephen Hemminger9772efb2005-11-10 17:09:53 -0800222
223 /* In dangerous area, increase slowly. */
224 else if (sysctl_tcp_abc) {
S Pc3e5d872006-03-28 16:35:46 -0800225 /* RFC3465: Appropriate Byte Count
Stephen Hemminger9772efb2005-11-10 17:09:53 -0800226 * increase once for each full cwnd acked
227 */
228 if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
229 tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
230 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
231 tp->snd_cwnd++;
232 }
233 } else {
234 /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd */
235 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
236 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
237 tp->snd_cwnd++;
238 tp->snd_cwnd_cnt = 0;
239 } else
240 tp->snd_cwnd_cnt++;
241 }
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700242}
243EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
244
245/* Slow start threshold is half the congestion window (min 2) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300246u32 tcp_reno_ssthresh(struct sock *sk)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700247{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300248 const struct tcp_sock *tp = tcp_sk(sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700249 return max(tp->snd_cwnd >> 1U, 2U);
250}
251EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
252
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700253/* Lower bound on congestion window with halving. */
254u32 tcp_reno_min_cwnd(const struct sock *sk)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700255{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300256 const struct tcp_sock *tp = tcp_sk(sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700257 return tp->snd_ssthresh/2;
258}
259EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd);
260
261struct tcp_congestion_ops tcp_reno = {
262 .name = "reno",
263 .owner = THIS_MODULE,
264 .ssthresh = tcp_reno_ssthresh,
265 .cong_avoid = tcp_reno_cong_avoid,
266 .min_cwnd = tcp_reno_min_cwnd,
267};
268
Stephen Hemminger5f8ef482005-06-23 20:37:36 -0700269/* Initial congestion control used (until SYN)
270 * really reno under another name so we can tell difference
271 * during tcp_set_default_congestion_control
272 */
273struct tcp_congestion_ops tcp_init_congestion_ops = {
274 .name = "",
275 .owner = THIS_MODULE,
276 .ssthresh = tcp_reno_ssthresh,
277 .cong_avoid = tcp_reno_cong_avoid,
278 .min_cwnd = tcp_reno_min_cwnd,
279};
280EXPORT_SYMBOL_GPL(tcp_init_congestion_ops);