Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 1 | /* |
Fabian Frederick | b92022f | 2014-11-04 20:25:38 +0100 | [diff] [blame] | 2 | * Pluggable TCP congestion control support and newReno |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 3 | * congestion control. |
Masanari Iida | 02582e9 | 2012-08-22 19:11:26 +0900 | [diff] [blame] | 4 | * Based on ideas from I/O scheduler support and Web100. |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 5 | * |
| 6 | * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> |
| 7 | */ |
| 8 | |
Joe Perches | afd46503 | 2012-03-12 07:03:32 +0000 | [diff] [blame] | 9 | #define pr_fmt(fmt) "TCP: " fmt |
| 10 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/types.h> |
| 14 | #include <linux/list.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 15 | #include <linux/gfp.h> |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 16 | #include <linux/jhash.h> |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 17 | #include <net/tcp.h> |
| 18 | |
| 19 | static DEFINE_SPINLOCK(tcp_cong_list_lock); |
| 20 | static LIST_HEAD(tcp_cong_list); |
| 21 | |
| 22 | /* Simple linear search, don't expect many entries! */ |
| 23 | static struct tcp_congestion_ops *tcp_ca_find(const char *name) |
| 24 | { |
| 25 | struct tcp_congestion_ops *e; |
| 26 | |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 27 | list_for_each_entry_rcu(e, &tcp_cong_list, list) { |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 28 | if (strcmp(e->name, name) == 0) |
| 29 | return e; |
| 30 | } |
| 31 | |
| 32 | return NULL; |
| 33 | } |
| 34 | |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 35 | /* Must be called with rcu lock held */ |
| 36 | static const struct tcp_congestion_ops *__tcp_ca_find_autoload(const char *name) |
| 37 | { |
| 38 | const struct tcp_congestion_ops *ca = tcp_ca_find(name); |
| 39 | #ifdef CONFIG_MODULES |
| 40 | if (!ca && capable(CAP_NET_ADMIN)) { |
| 41 | rcu_read_unlock(); |
| 42 | request_module("tcp_%s", name); |
| 43 | rcu_read_lock(); |
| 44 | ca = tcp_ca_find(name); |
| 45 | } |
| 46 | #endif |
| 47 | return ca; |
| 48 | } |
| 49 | |
| 50 | /* Simple linear search, not much in here. */ |
| 51 | struct tcp_congestion_ops *tcp_ca_find_key(u32 key) |
| 52 | { |
| 53 | struct tcp_congestion_ops *e; |
| 54 | |
| 55 | list_for_each_entry_rcu(e, &tcp_cong_list, list) { |
| 56 | if (e->key == key) |
| 57 | return e; |
| 58 | } |
| 59 | |
| 60 | return NULL; |
| 61 | } |
| 62 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 63 | /* |
Robert P. J. Day | d08df60 | 2007-02-17 19:07:33 +0100 | [diff] [blame] | 64 | * Attach new congestion control algorithm to the list |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 65 | * of available options. |
| 66 | */ |
| 67 | int tcp_register_congestion_control(struct tcp_congestion_ops *ca) |
| 68 | { |
| 69 | int ret = 0; |
| 70 | |
Florian Westphal | e979918 | 2016-11-21 14:18:38 +0100 | [diff] [blame] | 71 | /* all algorithms must implement these */ |
| 72 | if (!ca->ssthresh || !ca->undo_cwnd || |
| 73 | !(ca->cong_avoid || ca->cong_control)) { |
Joe Perches | afd46503 | 2012-03-12 07:03:32 +0000 | [diff] [blame] | 74 | pr_err("%s does not implement required ops\n", ca->name); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 75 | return -EINVAL; |
| 76 | } |
| 77 | |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 78 | ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name)); |
| 79 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 80 | spin_lock(&tcp_cong_list_lock); |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 81 | if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) { |
| 82 | pr_notice("%s already registered or non-unique key\n", |
| 83 | ca->name); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 84 | ret = -EEXIST; |
| 85 | } else { |
Stephen Hemminger | 3d2573f | 2006-09-24 20:11:58 -0700 | [diff] [blame] | 86 | list_add_tail_rcu(&ca->list, &tcp_cong_list); |
stephen hemminger | db2855a | 2015-02-16 09:38:13 -0500 | [diff] [blame] | 87 | pr_debug("%s registered\n", ca->name); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 88 | } |
| 89 | spin_unlock(&tcp_cong_list_lock); |
| 90 | |
| 91 | return ret; |
| 92 | } |
| 93 | EXPORT_SYMBOL_GPL(tcp_register_congestion_control); |
| 94 | |
| 95 | /* |
| 96 | * Remove congestion control algorithm, called from |
| 97 | * the module's remove function. Module ref counts are used |
| 98 | * to ensure that this can't be done till all sockets using |
| 99 | * that method are closed. |
| 100 | */ |
| 101 | void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca) |
| 102 | { |
| 103 | spin_lock(&tcp_cong_list_lock); |
| 104 | list_del_rcu(&ca->list); |
| 105 | spin_unlock(&tcp_cong_list_lock); |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 106 | |
| 107 | /* Wait for outstanding readers to complete before the |
| 108 | * module gets removed entirely. |
| 109 | * |
| 110 | * A try_module_get() should fail by now as our module is |
| 111 | * in "going" state since no refs are held anymore and |
| 112 | * module_exit() handler being called. |
| 113 | */ |
| 114 | synchronize_rcu(); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 115 | } |
| 116 | EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); |
| 117 | |
Daniel Borkmann | c3a8d94 | 2015-08-31 15:58:47 +0200 | [diff] [blame] | 118 | u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca) |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 119 | { |
| 120 | const struct tcp_congestion_ops *ca; |
Daniel Borkmann | c3a8d94 | 2015-08-31 15:58:47 +0200 | [diff] [blame] | 121 | u32 key = TCP_CA_UNSPEC; |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 122 | |
| 123 | might_sleep(); |
| 124 | |
| 125 | rcu_read_lock(); |
| 126 | ca = __tcp_ca_find_autoload(name); |
Daniel Borkmann | c3a8d94 | 2015-08-31 15:58:47 +0200 | [diff] [blame] | 127 | if (ca) { |
| 128 | key = ca->key; |
| 129 | *ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN; |
| 130 | } |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 131 | rcu_read_unlock(); |
| 132 | |
| 133 | return key; |
| 134 | } |
| 135 | EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name); |
| 136 | |
| 137 | char *tcp_ca_get_name_by_key(u32 key, char *buffer) |
| 138 | { |
| 139 | const struct tcp_congestion_ops *ca; |
| 140 | char *ret = NULL; |
| 141 | |
| 142 | rcu_read_lock(); |
| 143 | ca = tcp_ca_find_key(key); |
| 144 | if (ca) |
| 145 | ret = strncpy(buffer, ca->name, |
| 146 | TCP_CA_NAME_MAX); |
| 147 | rcu_read_unlock(); |
| 148 | |
| 149 | return ret; |
| 150 | } |
| 151 | EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key); |
| 152 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 153 | /* Assign choice of congestion control. */ |
Florian Westphal | 55d8694 | 2014-09-26 22:37:32 +0200 | [diff] [blame] | 154 | void tcp_assign_congestion_control(struct sock *sk) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 155 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 156 | struct inet_connection_sock *icsk = inet_csk(sk); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 157 | struct tcp_congestion_ops *ca; |
| 158 | |
Florian Westphal | 55d8694 | 2014-09-26 22:37:32 +0200 | [diff] [blame] | 159 | rcu_read_lock(); |
| 160 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { |
| 161 | if (likely(try_module_get(ca->owner))) { |
| 162 | icsk->icsk_ca_ops = ca; |
| 163 | goto out; |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 164 | } |
Florian Westphal | 55d8694 | 2014-09-26 22:37:32 +0200 | [diff] [blame] | 165 | /* Fallback to next available. The last really |
| 166 | * guaranteed fallback is Reno from this list. |
| 167 | */ |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 168 | } |
Florian Westphal | 55d8694 | 2014-09-26 22:37:32 +0200 | [diff] [blame] | 169 | out: |
| 170 | rcu_read_unlock(); |
Wei Wang | c120144 | 2017-04-25 17:38:02 -0700 | [diff] [blame] | 171 | memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); |
Florian Westphal | 55d8694 | 2014-09-26 22:37:32 +0200 | [diff] [blame] | 172 | |
Eric Dumazet | 6ac705b | 2015-09-25 07:39:18 -0700 | [diff] [blame] | 173 | if (ca->flags & TCP_CONG_NEEDS_ECN) |
| 174 | INET_ECN_xmit(sk); |
| 175 | else |
| 176 | INET_ECN_dontxmit(sk); |
Florian Westphal | 55d8694 | 2014-09-26 22:37:32 +0200 | [diff] [blame] | 177 | } |
| 178 | |
| 179 | void tcp_init_congestion_control(struct sock *sk) |
| 180 | { |
| 181 | const struct inet_connection_sock *icsk = inet_csk(sk); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 182 | |
Yuchung Cheng | 44abafc | 2017-05-31 11:21:27 -0700 | [diff] [blame] | 183 | tcp_sk(sk)->prior_ssthresh = 0; |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 184 | if (icsk->icsk_ca_ops->init) |
| 185 | icsk->icsk_ca_ops->init(sk); |
Eric Dumazet | 6ac705b | 2015-09-25 07:39:18 -0700 | [diff] [blame] | 186 | if (tcp_ca_needs_ecn(sk)) |
| 187 | INET_ECN_xmit(sk); |
| 188 | else |
| 189 | INET_ECN_dontxmit(sk); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 190 | } |
| 191 | |
Sabrina Dubroca | ebfa00c | 2017-08-25 13:10:12 +0200 | [diff] [blame] | 192 | static void tcp_reinit_congestion_control(struct sock *sk, |
| 193 | const struct tcp_congestion_ops *ca) |
Daniel Borkmann | 29ba4fff | 2015-01-05 23:57:45 +0100 | [diff] [blame] | 194 | { |
| 195 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 196 | |
| 197 | tcp_cleanup_congestion_control(sk); |
| 198 | icsk->icsk_ca_ops = ca; |
Neal Cardwell | 9f95041 | 2015-05-29 13:47:07 -0400 | [diff] [blame] | 199 | icsk->icsk_ca_setsockopt = 1; |
Wei Wang | c120144 | 2017-04-25 17:38:02 -0700 | [diff] [blame] | 200 | memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); |
Daniel Borkmann | 29ba4fff | 2015-01-05 23:57:45 +0100 | [diff] [blame] | 201 | |
Wei Wang | c120144 | 2017-04-25 17:38:02 -0700 | [diff] [blame] | 202 | if (sk->sk_state != TCP_CLOSE) |
Eric Dumazet | 6ac705b | 2015-09-25 07:39:18 -0700 | [diff] [blame] | 203 | tcp_init_congestion_control(sk); |
Daniel Borkmann | 29ba4fff | 2015-01-05 23:57:45 +0100 | [diff] [blame] | 204 | } |
| 205 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 206 | /* Manage refcounts on socket close. */ |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 207 | void tcp_cleanup_congestion_control(struct sock *sk) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 208 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 209 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 210 | |
| 211 | if (icsk->icsk_ca_ops->release) |
| 212 | icsk->icsk_ca_ops->release(sk); |
| 213 | module_put(icsk->icsk_ca_ops->owner); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | /* Used by sysctl to change default congestion control */ |
| 217 | int tcp_set_default_congestion_control(const char *name) |
| 218 | { |
| 219 | struct tcp_congestion_ops *ca; |
| 220 | int ret = -ENOENT; |
| 221 | |
| 222 | spin_lock(&tcp_cong_list_lock); |
| 223 | ca = tcp_ca_find(name); |
Johannes Berg | 95a5afc | 2008-10-16 15:24:51 -0700 | [diff] [blame] | 224 | #ifdef CONFIG_MODULES |
Eric Paris | a8f80e8 | 2009-08-13 09:44:51 -0400 | [diff] [blame] | 225 | if (!ca && capable(CAP_NET_ADMIN)) { |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 226 | spin_unlock(&tcp_cong_list_lock); |
| 227 | |
| 228 | request_module("tcp_%s", name); |
| 229 | spin_lock(&tcp_cong_list_lock); |
| 230 | ca = tcp_ca_find(name); |
| 231 | } |
| 232 | #endif |
| 233 | |
| 234 | if (ca) { |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 235 | ca->flags |= TCP_CONG_NON_RESTRICTED; /* default is always allowed */ |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 236 | list_move(&ca->list, &tcp_cong_list); |
| 237 | ret = 0; |
| 238 | } |
| 239 | spin_unlock(&tcp_cong_list_lock); |
| 240 | |
| 241 | return ret; |
| 242 | } |
| 243 | |
Stephen Hemminger | b1736a7 | 2006-10-31 17:31:33 -0800 | [diff] [blame] | 244 | /* Set default value from kernel configuration at bootup */ |
| 245 | static int __init tcp_congestion_default(void) |
| 246 | { |
| 247 | return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG); |
| 248 | } |
| 249 | late_initcall(tcp_congestion_default); |
| 250 | |
Stephen Hemminger | 3ff825b | 2006-11-09 16:32:06 -0800 | [diff] [blame] | 251 | /* Build string with list of available congestion control values */ |
| 252 | void tcp_get_available_congestion_control(char *buf, size_t maxlen) |
| 253 | { |
| 254 | struct tcp_congestion_ops *ca; |
| 255 | size_t offs = 0; |
| 256 | |
| 257 | rcu_read_lock(); |
| 258 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { |
| 259 | offs += snprintf(buf + offs, maxlen - offs, |
| 260 | "%s%s", |
| 261 | offs == 0 ? "" : " ", ca->name); |
Stephen Hemminger | 3ff825b | 2006-11-09 16:32:06 -0800 | [diff] [blame] | 262 | } |
| 263 | rcu_read_unlock(); |
| 264 | } |
| 265 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 266 | /* Get current default congestion control */ |
| 267 | void tcp_get_default_congestion_control(char *name) |
| 268 | { |
| 269 | struct tcp_congestion_ops *ca; |
| 270 | /* We will always have reno... */ |
| 271 | BUG_ON(list_empty(&tcp_cong_list)); |
| 272 | |
| 273 | rcu_read_lock(); |
| 274 | ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list); |
| 275 | strncpy(name, ca->name, TCP_CA_NAME_MAX); |
| 276 | rcu_read_unlock(); |
| 277 | } |
| 278 | |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 279 | /* Built list of non-restricted congestion control values */ |
| 280 | void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) |
| 281 | { |
| 282 | struct tcp_congestion_ops *ca; |
| 283 | size_t offs = 0; |
| 284 | |
| 285 | *buf = '\0'; |
| 286 | rcu_read_lock(); |
| 287 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 288 | if (!(ca->flags & TCP_CONG_NON_RESTRICTED)) |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 289 | continue; |
| 290 | offs += snprintf(buf + offs, maxlen - offs, |
| 291 | "%s%s", |
| 292 | offs == 0 ? "" : " ", ca->name); |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 293 | } |
| 294 | rcu_read_unlock(); |
| 295 | } |
| 296 | |
| 297 | /* Change list of non-restricted congestion control */ |
| 298 | int tcp_set_allowed_congestion_control(char *val) |
| 299 | { |
| 300 | struct tcp_congestion_ops *ca; |
Julia Lawall | c34186e | 2010-08-27 19:31:56 -0700 | [diff] [blame] | 301 | char *saved_clone, *clone, *name; |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 302 | int ret = 0; |
| 303 | |
Julia Lawall | c34186e | 2010-08-27 19:31:56 -0700 | [diff] [blame] | 304 | saved_clone = clone = kstrdup(val, GFP_USER); |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 305 | if (!clone) |
| 306 | return -ENOMEM; |
| 307 | |
| 308 | spin_lock(&tcp_cong_list_lock); |
| 309 | /* pass 1 check for bad entries */ |
| 310 | while ((name = strsep(&clone, " ")) && *name) { |
| 311 | ca = tcp_ca_find(name); |
| 312 | if (!ca) { |
| 313 | ret = -ENOENT; |
| 314 | goto out; |
| 315 | } |
| 316 | } |
| 317 | |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 318 | /* pass 2 clear old values */ |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 319 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 320 | ca->flags &= ~TCP_CONG_NON_RESTRICTED; |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 321 | |
| 322 | /* pass 3 mark as allowed */ |
| 323 | while ((name = strsep(&val, " ")) && *name) { |
| 324 | ca = tcp_ca_find(name); |
| 325 | WARN_ON(!ca); |
| 326 | if (ca) |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 327 | ca->flags |= TCP_CONG_NON_RESTRICTED; |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 328 | } |
| 329 | out: |
| 330 | spin_unlock(&tcp_cong_list_lock); |
Julia Lawall | c34186e | 2010-08-27 19:31:56 -0700 | [diff] [blame] | 331 | kfree(saved_clone); |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 332 | |
| 333 | return ret; |
| 334 | } |
| 335 | |
Lawrence Brakmo | 91b5b21 | 2017-06-30 20:02:49 -0700 | [diff] [blame] | 336 | /* Change congestion control for socket. If load is false, then it is the |
| 337 | * responsibility of the caller to call tcp_init_congestion_control or |
| 338 | * tcp_reinit_congestion_control (if the current congestion control was |
| 339 | * already initialized. |
| 340 | */ |
Sabrina Dubroca | ebfa00c | 2017-08-25 13:10:12 +0200 | [diff] [blame] | 341 | int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit) |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 342 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 343 | struct inet_connection_sock *icsk = inet_csk(sk); |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 344 | const struct tcp_congestion_ops *ca; |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 345 | int err = 0; |
| 346 | |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 347 | if (icsk->icsk_ca_dst_locked) |
| 348 | return -EPERM; |
Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 349 | |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 350 | rcu_read_lock(); |
Lawrence Brakmo | 91b5b21 | 2017-06-30 20:02:49 -0700 | [diff] [blame] | 351 | if (!load) |
| 352 | ca = tcp_ca_find(name); |
| 353 | else |
| 354 | ca = __tcp_ca_find_autoload(name); |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 355 | /* No change asking for existing value */ |
Neal Cardwell | 9f95041 | 2015-05-29 13:47:07 -0400 | [diff] [blame] | 356 | if (ca == icsk->icsk_ca_ops) { |
| 357 | icsk->icsk_ca_setsockopt = 1; |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 358 | goto out; |
Neal Cardwell | 9f95041 | 2015-05-29 13:47:07 -0400 | [diff] [blame] | 359 | } |
Lawrence Brakmo | 91b5b21 | 2017-06-30 20:02:49 -0700 | [diff] [blame] | 360 | if (!ca) { |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 361 | err = -ENOENT; |
Lawrence Brakmo | 91b5b21 | 2017-06-30 20:02:49 -0700 | [diff] [blame] | 362 | } else if (!load) { |
Sabrina Dubroca | ebfa00c | 2017-08-25 13:10:12 +0200 | [diff] [blame] | 363 | const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops; |
| 364 | |
| 365 | if (try_module_get(ca->owner)) { |
| 366 | if (reinit) { |
| 367 | tcp_reinit_congestion_control(sk, ca); |
| 368 | } else { |
| 369 | icsk->icsk_ca_ops = ca; |
| 370 | module_put(old_ca->owner); |
| 371 | } |
| 372 | } else { |
Lawrence Brakmo | 91b5b21 | 2017-06-30 20:02:49 -0700 | [diff] [blame] | 373 | err = -EBUSY; |
Sabrina Dubroca | ebfa00c | 2017-08-25 13:10:12 +0200 | [diff] [blame] | 374 | } |
Lawrence Brakmo | 91b5b21 | 2017-06-30 20:02:49 -0700 | [diff] [blame] | 375 | } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || |
| 376 | ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) { |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 377 | err = -EPERM; |
Lawrence Brakmo | 91b5b21 | 2017-06-30 20:02:49 -0700 | [diff] [blame] | 378 | } else if (!try_module_get(ca->owner)) { |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 379 | err = -EBUSY; |
Lawrence Brakmo | 91b5b21 | 2017-06-30 20:02:49 -0700 | [diff] [blame] | 380 | } else { |
Daniel Borkmann | 29ba4fff | 2015-01-05 23:57:45 +0100 | [diff] [blame] | 381 | tcp_reinit_congestion_control(sk, ca); |
Lawrence Brakmo | 91b5b21 | 2017-06-30 20:02:49 -0700 | [diff] [blame] | 382 | } |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 383 | out: |
| 384 | rcu_read_unlock(); |
| 385 | return err; |
| 386 | } |
| 387 | |
Yuchung Cheng | 9f9843a7 | 2013-10-31 11:07:31 -0700 | [diff] [blame] | 388 | /* Slow start is used when congestion window is no greater than the slow start |
| 389 | * threshold. We base on RFC2581 and also handle stretch ACKs properly. |
| 390 | * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but |
| 391 | * something better;) a packet is only considered (s)acked in its entirety to |
| 392 | * defend the ACK attacks described in the RFC. Slow start processes a stretch |
| 393 | * ACK of degree N as if N acks of degree 1 are received back to back except |
| 394 | * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and |
| 395 | * returns the leftover acks to adjust cwnd in congestion avoidance mode. |
Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 396 | */ |
Neal Cardwell | e73ebb08 | 2015-01-28 20:01:35 -0500 | [diff] [blame] | 397 | u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) |
Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 398 | { |
Yuchung Cheng | 7617400 | 2015-07-09 13:16:30 -0700 | [diff] [blame] | 399 | u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh); |
Eric Dumazet | 973ec44 | 2013-02-02 05:23:16 +0000 | [diff] [blame] | 400 | |
Neal Cardwell | e73ebb08 | 2015-01-28 20:01:35 -0500 | [diff] [blame] | 401 | acked -= cwnd - tp->snd_cwnd; |
Yuchung Cheng | 9f9843a7 | 2013-10-31 11:07:31 -0700 | [diff] [blame] | 402 | tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); |
Neal Cardwell | e73ebb08 | 2015-01-28 20:01:35 -0500 | [diff] [blame] | 403 | |
| 404 | return acked; |
Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 405 | } |
| 406 | EXPORT_SYMBOL_GPL(tcp_slow_start); |
| 407 | |
Neal Cardwell | 814d488 | 2015-01-28 20:01:36 -0500 | [diff] [blame] | 408 | /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w), |
| 409 | * for every packet that was ACKed. |
| 410 | */ |
Neal Cardwell | e73ebb08 | 2015-01-28 20:01:35 -0500 | [diff] [blame] | 411 | void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) |
Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 412 | { |
Neal Cardwell | 9949afa | 2015-03-10 17:17:03 -0400 | [diff] [blame] | 413 | /* If credits accumulated at a higher w, apply them gently now. */ |
| 414 | if (tp->snd_cwnd_cnt >= w) { |
| 415 | tp->snd_cwnd_cnt = 0; |
| 416 | tp->snd_cwnd++; |
| 417 | } |
| 418 | |
Neal Cardwell | 814d488 | 2015-01-28 20:01:36 -0500 | [diff] [blame] | 419 | tp->snd_cwnd_cnt += acked; |
Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 420 | if (tp->snd_cwnd_cnt >= w) { |
Neal Cardwell | 814d488 | 2015-01-28 20:01:36 -0500 | [diff] [blame] | 421 | u32 delta = tp->snd_cwnd_cnt / w; |
| 422 | |
| 423 | tp->snd_cwnd_cnt -= delta * w; |
| 424 | tp->snd_cwnd += delta; |
Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 425 | } |
Neal Cardwell | 814d488 | 2015-01-28 20:01:36 -0500 | [diff] [blame] | 426 | tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp); |
Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 427 | } |
| 428 | EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); |
| 429 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 430 | /* |
| 431 | * TCP Reno congestion control |
| 432 | * This is special case used for fallback as well. |
| 433 | */ |
| 434 | /* This is Jacobson's slow start and congestion avoidance. |
| 435 | * SIGCOMM '88, p. 328. |
| 436 | */ |
Eric Dumazet | 2490155 | 2014-05-02 21:18:05 -0700 | [diff] [blame] | 437 | void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 438 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 439 | struct tcp_sock *tp = tcp_sk(sk); |
| 440 | |
Eric Dumazet | 2490155 | 2014-05-02 21:18:05 -0700 | [diff] [blame] | 441 | if (!tcp_is_cwnd_limited(sk)) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 442 | return; |
| 443 | |
Stephen Hemminger | 7faffa1 | 2005-11-10 17:07:24 -0800 | [diff] [blame] | 444 | /* In "safe" area, increase. */ |
Yuchung Cheng | 071d508 | 2015-07-09 13:16:29 -0700 | [diff] [blame] | 445 | if (tcp_in_slow_start(tp)) { |
Neal Cardwell | c22bdca | 2015-01-28 20:01:37 -0500 | [diff] [blame] | 446 | acked = tcp_slow_start(tp, acked); |
| 447 | if (!acked) |
| 448 | return; |
| 449 | } |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 450 | /* In dangerous area, increase slowly. */ |
Neal Cardwell | c22bdca | 2015-01-28 20:01:37 -0500 | [diff] [blame] | 451 | tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 452 | } |
| 453 | EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); |
| 454 | |
| 455 | /* Slow start threshold is half the congestion window (min 2) */ |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 456 | u32 tcp_reno_ssthresh(struct sock *sk) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 457 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 458 | const struct tcp_sock *tp = tcp_sk(sk); |
stephen hemminger | 688d194 | 2014-08-29 23:32:05 -0700 | [diff] [blame] | 459 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 460 | return max(tp->snd_cwnd >> 1U, 2U); |
| 461 | } |
| 462 | EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); |
| 463 | |
Florian Westphal | e979918 | 2016-11-21 14:18:38 +0100 | [diff] [blame] | 464 | u32 tcp_reno_undo_cwnd(struct sock *sk) |
| 465 | { |
| 466 | const struct tcp_sock *tp = tcp_sk(sk); |
| 467 | |
| 468 | return max(tp->snd_cwnd, tp->snd_ssthresh << 1); |
| 469 | } |
| 470 | EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd); |
| 471 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 472 | struct tcp_congestion_ops tcp_reno = { |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 473 | .flags = TCP_CONG_NON_RESTRICTED, |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 474 | .name = "reno", |
| 475 | .owner = THIS_MODULE, |
| 476 | .ssthresh = tcp_reno_ssthresh, |
| 477 | .cong_avoid = tcp_reno_cong_avoid, |
Florian Westphal | e979918 | 2016-11-21 14:18:38 +0100 | [diff] [blame] | 478 | .undo_cwnd = tcp_reno_undo_cwnd, |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 479 | }; |