John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 1 | /* Tom Kelly's Scalable TCP |
| 2 | * |
Joe Perches | a52b8bd | 2009-02-24 16:40:16 -0800 | [diff] [blame] | 3 | * See http://www.deneholme.net/tom/scalable/ |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 4 | * |
| 5 | * John Heffner <jheffner@sc.edu> |
| 6 | */ |
| 7 | |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 8 | #include <linux/module.h> |
| 9 | #include <net/tcp.h> |
| 10 | |
| 11 | /* These factors derived from the recommended values in the aer: |
| 12 | * .01 and and 7/8. We use 50 instead of 100 to account for |
| 13 | * delayed ack. |
| 14 | */ |
| 15 | #define TCP_SCALABLE_AI_CNT 50U |
| 16 | #define TCP_SCALABLE_MD_SCALE 3 |
| 17 | |
Yuchung Cheng | 9f9843a7 | 2013-10-31 11:07:31 -0700 | [diff] [blame] | 18 | static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked, |
| 19 | u32 in_flight) |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 20 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 21 | struct tcp_sock *tp = tcp_sk(sk); |
Stephen Hemminger | f4805ed | 2005-11-10 16:53:30 -0800 | [diff] [blame] | 22 | |
| 23 | if (!tcp_is_cwnd_limited(sk, in_flight)) |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 24 | return; |
| 25 | |
Stephen Hemminger | 7faffa1 | 2005-11-10 17:07:24 -0800 | [diff] [blame] | 26 | if (tp->snd_cwnd <= tp->snd_ssthresh) |
Yuchung Cheng | 9f9843a7 | 2013-10-31 11:07:31 -0700 | [diff] [blame] | 27 | tcp_slow_start(tp, acked); |
Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 28 | else |
| 29 | tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)); |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 30 | } |
| 31 | |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 32 | static u32 tcp_scalable_ssthresh(struct sock *sk) |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 33 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 34 | const struct tcp_sock *tp = tcp_sk(sk); |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 35 | return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); |
| 36 | } |
| 37 | |
| 38 | |
Stephen Hemminger | a252beb | 2011-03-10 00:40:17 -0800 | [diff] [blame] | 39 | static struct tcp_congestion_ops tcp_scalable __read_mostly = { |
John Heffner | 0e57976 | 2005-06-23 12:29:07 -0700 | [diff] [blame] | 40 | .ssthresh = tcp_scalable_ssthresh, |
| 41 | .cong_avoid = tcp_scalable_cong_avoid, |
| 42 | .min_cwnd = tcp_reno_min_cwnd, |
| 43 | |
| 44 | .owner = THIS_MODULE, |
| 45 | .name = "scalable", |
| 46 | }; |
| 47 | |
| 48 | static int __init tcp_scalable_register(void) |
| 49 | { |
| 50 | return tcp_register_congestion_control(&tcp_scalable); |
| 51 | } |
| 52 | |
| 53 | static void __exit tcp_scalable_unregister(void) |
| 54 | { |
| 55 | tcp_unregister_congestion_control(&tcp_scalable); |
| 56 | } |
| 57 | |
| 58 | module_init(tcp_scalable_register); |
| 59 | module_exit(tcp_scalable_unregister); |
| 60 | |
| 61 | MODULE_AUTHOR("John Heffner"); |
| 62 | MODULE_LICENSE("GPL"); |
| 63 | MODULE_DESCRIPTION("Scalable TCP"); |