John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 1 | /* Tom Kelly's Scalable TCP |
| 2 | * |
Joe Perches | a52b8bd | 2009-02-24 16:40:16 -0800 | [diff] [blame] | 3 | * See http://www.deneholme.net/tom/scalable/ |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 4 | * |
| 5 | * John Heffner <jheffner@sc.edu> |
| 6 | */ |
| 7 | |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 8 | #include <linux/module.h> |
| 9 | #include <net/tcp.h> |
| 10 | |
| 11 | /* These factors derived from the recommended values in the aer: |
| 12 | * .01 and and 7/8. We use 50 instead of 100 to account for |
| 13 | * delayed ack. |
| 14 | */ |
| 15 | #define TCP_SCALABLE_AI_CNT 50U |
| 16 | #define TCP_SCALABLE_MD_SCALE 3 |
| 17 | |
Eric Dumazet | 2490155 | 2014-05-02 21:18:05 -0700 | [diff] [blame] | 18 | static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 19 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 20 | struct tcp_sock *tp = tcp_sk(sk); |
Stephen Hemminger | f4805ed | 2005-11-10 16:53:30 -0800 | [diff] [blame] | 21 | |
Eric Dumazet | 2490155 | 2014-05-02 21:18:05 -0700 | [diff] [blame] | 22 | if (!tcp_is_cwnd_limited(sk)) |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 23 | return; |
| 24 | |
Yuchung Cheng | 071d508 | 2015-07-09 13:16:29 -0700 | [diff] [blame] | 25 | if (tcp_in_slow_start(tp)) |
Yuchung Cheng | 9f9843a7 | 2013-10-31 11:07:31 -0700 | [diff] [blame] | 26 | tcp_slow_start(tp, acked); |
Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 27 | else |
Neal Cardwell | e73ebb08 | 2015-01-28 20:01:35 -0500 | [diff] [blame] | 28 | tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT), |
| 29 | 1); |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 30 | } |
| 31 | |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 32 | static u32 tcp_scalable_ssthresh(struct sock *sk) |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 33 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 34 | const struct tcp_sock *tp = tcp_sk(sk); |
stephen hemminger | 688d194 | 2014-08-29 23:32:05 -0700 | [diff] [blame] | 35 | |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 36 | return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); |
| 37 | } |
| 38 | |
Stephen Hemminger | a252beb | 2011-03-10 00:40:17 -0800 | [diff] [blame] | 39 | static struct tcp_congestion_ops tcp_scalable __read_mostly = { |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 40 | .ssthresh = tcp_scalable_ssthresh, |
| 41 | .cong_avoid = tcp_scalable_cong_avoid, |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 42 | |
| 43 | .owner = THIS_MODULE, |
| 44 | .name = "scalable", |
| 45 | }; |
| 46 | |
| 47 | static int __init tcp_scalable_register(void) |
| 48 | { |
| 49 | return tcp_register_congestion_control(&tcp_scalable); |
| 50 | } |
| 51 | |
| 52 | static void __exit tcp_scalable_unregister(void) |
| 53 | { |
| 54 | tcp_unregister_congestion_control(&tcp_scalable); |
| 55 | } |
| 56 | |
| 57 | module_init(tcp_scalable_register); |
| 58 | module_exit(tcp_scalable_unregister); |
| 59 | |
| 60 | MODULE_AUTHOR("John Heffner"); |
| 61 | MODULE_LICENSE("GPL"); |
| 62 | MODULE_DESCRIPTION("Scalable TCP"); |