blob: f2123075ce6e1be4753e26bb1db81423e272caef [file] [log] [blame]
John Heffner0e579762005-06-23 12:29:07 -07001/* Tom Kelly's Scalable TCP
2 *
Joe Perchesa52b8bd2009-02-24 16:40:16 -08003 * See http://www.deneholme.net/tom/scalable/
John Heffner0e579762005-06-23 12:29:07 -07004 *
5 * John Heffner <jheffner@sc.edu>
6 */
7
John Heffner0e579762005-06-23 12:29:07 -07008#include <linux/module.h>
9#include <net/tcp.h>
10
11/* These factors derived from the recommended values in the aer:
12 * .01 and and 7/8. We use 50 instead of 100 to account for
13 * delayed ack.
14 */
15#define TCP_SCALABLE_AI_CNT 50U
16#define TCP_SCALABLE_MD_SCALE 3
17
Florian Westphal85f7e752016-11-21 14:18:37 +010018struct scalable {
19 u32 loss_cwnd;
20};
21
Eric Dumazet24901552014-05-02 21:18:05 -070022static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
John Heffner0e579762005-06-23 12:29:07 -070023{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030024 struct tcp_sock *tp = tcp_sk(sk);
Stephen Hemmingerf4805ed2005-11-10 16:53:30 -080025
Eric Dumazet24901552014-05-02 21:18:05 -070026 if (!tcp_is_cwnd_limited(sk))
John Heffner0e579762005-06-23 12:29:07 -070027 return;
28
Yuchung Cheng071d5082015-07-09 13:16:29 -070029 if (tcp_in_slow_start(tp))
Yuchung Cheng9f9843a72013-10-31 11:07:31 -070030 tcp_slow_start(tp, acked);
Ilpo Järvinen758ce5c2009-02-28 04:44:37 +000031 else
Neal Cardwelle73ebb082015-01-28 20:01:35 -050032 tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
33 1);
John Heffner0e579762005-06-23 12:29:07 -070034}
35
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030036static u32 tcp_scalable_ssthresh(struct sock *sk)
John Heffner0e579762005-06-23 12:29:07 -070037{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030038 const struct tcp_sock *tp = tcp_sk(sk);
Florian Westphal85f7e752016-11-21 14:18:37 +010039 struct scalable *ca = inet_csk_ca(sk);
40
41 ca->loss_cwnd = tp->snd_cwnd;
stephen hemminger688d1942014-08-29 23:32:05 -070042
John Heffner0e579762005-06-23 12:29:07 -070043 return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
44}
45
Florian Westphal85f7e752016-11-21 14:18:37 +010046static u32 tcp_scalable_cwnd_undo(struct sock *sk)
47{
48 const struct scalable *ca = inet_csk_ca(sk);
49
50 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
51}
52
Stephen Hemmingera252beb2011-03-10 00:40:17 -080053static struct tcp_congestion_ops tcp_scalable __read_mostly = {
John Heffner0e579762005-06-23 12:29:07 -070054 .ssthresh = tcp_scalable_ssthresh,
Florian Westphal85f7e752016-11-21 14:18:37 +010055 .undo_cwnd = tcp_scalable_cwnd_undo,
John Heffner0e579762005-06-23 12:29:07 -070056 .cong_avoid = tcp_scalable_cong_avoid,
John Heffner0e579762005-06-23 12:29:07 -070057
58 .owner = THIS_MODULE,
59 .name = "scalable",
60};
61
62static int __init tcp_scalable_register(void)
63{
64 return tcp_register_congestion_control(&tcp_scalable);
65}
66
67static void __exit tcp_scalable_unregister(void)
68{
69 tcp_unregister_congestion_control(&tcp_scalable);
70}
71
72module_init(tcp_scalable_register);
73module_exit(tcp_scalable_unregister);
74
75MODULE_AUTHOR("John Heffner");
76MODULE_LICENSE("GPL");
77MODULE_DESCRIPTION("Scalable TCP");