blob: 12a2cd976e3a06b33edfe08e4d749ac309027506 [file] [log] [blame]
Stephen Hemminger87270762005-06-23 12:24:09 -07001/*
Luca De Ciccob7d7a9e2006-06-11 23:01:39 -07002 * TCP Westwood+: end-to-end bandwidth estimation for TCP
Stephen Hemminger87270762005-06-23 12:24:09 -07003 *
Luca De Ciccob7d7a9e2006-06-11 23:01:39 -07004 * Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4
5 *
6 * Support at http://c3lab.poliba.it/index.php/Westwood
7 * Main references in literature:
8 *
9 * - Mascolo S, Casetti, M. Gerla et al.
10 * "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001
11 *
12 * - A. Grieco, s. Mascolo
13 * "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer
14 * Comm. Review, 2004
15 *
16 * - A. Dell'Aera, L. Grieco, S. Mascolo.
17 * "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving :
18 * A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004
19 *
20 * Westwood+ employs end-to-end bandwidth measurement to set cwnd and
21 * ssthresh after packet loss. The probing phase is as the original Reno.
Stephen Hemminger87270762005-06-23 12:24:09 -070022 */
23
24#include <linux/config.h>
25#include <linux/mm.h>
26#include <linux/module.h>
27#include <linux/skbuff.h>
Arnaldo Carvalho de Meloa8c21902005-08-12 12:56:38 -030028#include <linux/inet_diag.h>
Stephen Hemminger87270762005-06-23 12:24:09 -070029#include <net/tcp.h>
30
31/* TCP Westwood structure */
32struct westwood {
33 u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */
34 u32 bw_est; /* bandwidth estimate */
35 u32 rtt_win_sx; /* here starts a new evaluation... */
36 u32 bk;
37 u32 snd_una; /* used for evaluating the number of acked bytes */
38 u32 cumul_ack;
39 u32 accounted;
40 u32 rtt;
41 u32 rtt_min; /* minimum observed RTT */
Stephen Hemmingerf61e2902006-06-11 23:01:02 -070042 u8 first_ack; /* flag which infers that this is the first ack */
Stephen Hemminger87270762005-06-23 12:24:09 -070043};
44
45
46/* TCP Westwood functions and constants */
47#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
48#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
49
50/*
51 * @tcp_westwood_create
52 * This function initializes fields used in TCP Westwood+,
53 * it is called after the initial SYN, so the sequence numbers
54 * are correct but new passive connections we have no
55 * information about RTTmin at this time so we simply set it to
56 * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
57 * since in this way we're sure it will be updated in a consistent
58 * way as soon as possible. It will reasonably happen within the first
59 * RTT period of the connection lifetime.
60 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030061static void tcp_westwood_init(struct sock *sk)
Stephen Hemminger87270762005-06-23 12:24:09 -070062{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030063 struct westwood *w = inet_csk_ca(sk);
Stephen Hemminger87270762005-06-23 12:24:09 -070064
65 w->bk = 0;
66 w->bw_ns_est = 0;
67 w->bw_est = 0;
68 w->accounted = 0;
69 w->cumul_ack = 0;
70 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
71 w->rtt_win_sx = tcp_time_stamp;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030072 w->snd_una = tcp_sk(sk)->snd_una;
Stephen Hemmingerf61e2902006-06-11 23:01:02 -070073 w->first_ack = 1;
Stephen Hemminger87270762005-06-23 12:24:09 -070074}
75
76/*
77 * @westwood_do_filter
78 * Low-pass filter. Implemented using constant coefficients.
79 */
80static inline u32 westwood_do_filter(u32 a, u32 b)
81{
82 return (((7 * a) + b) >> 3);
83}
84
85static inline void westwood_filter(struct westwood *w, u32 delta)
86{
87 w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
88 w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
89}
90
91/*
92 * @westwood_pkts_acked
93 * Called after processing group of packets.
94 * but all westwood needs is the last sample of srtt.
95 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030096static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt)
Stephen Hemminger87270762005-06-23 12:24:09 -070097{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -030098 struct westwood *w = inet_csk_ca(sk);
Stephen Hemminger87270762005-06-23 12:24:09 -070099 if (cnt > 0)
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300100 w->rtt = tcp_sk(sk)->srtt >> 3;
Stephen Hemminger87270762005-06-23 12:24:09 -0700101}
102
103/*
104 * @westwood_update_window
105 * It updates RTT evaluation window if it is the right moment to do
106 * it. If so it calls filter for evaluating bandwidth.
107 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300108static void westwood_update_window(struct sock *sk)
Stephen Hemminger87270762005-06-23 12:24:09 -0700109{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300110 struct westwood *w = inet_csk_ca(sk);
Stephen Hemminger87270762005-06-23 12:24:09 -0700111 s32 delta = tcp_time_stamp - w->rtt_win_sx;
112
Luca De Ciccob7d7a9e2006-06-11 23:01:39 -0700113 /* Initialize w->snd_una with the first acked sequence number in order
Stephen Hemmingerf61e2902006-06-11 23:01:02 -0700114 * to fix mismatch between tp->snd_una and w->snd_una for the first
115 * bandwidth sample
116 */
117 if (w->first_ack) {
118 w->snd_una = tcp_sk(sk)->snd_una;
119 w->first_ack = 0;
120 }
121
Stephen Hemminger87270762005-06-23 12:24:09 -0700122 /*
123 * See if a RTT-window has passed.
124 * Be careful since if RTT is less than
125 * 50ms we don't filter but we continue 'building the sample'.
126 * This minimum limit was chosen since an estimation on small
127 * time intervals is better to avoid...
128 * Obviously on a LAN we reasonably will always have
129 * right_bound = left_bound + WESTWOOD_RTT_MIN
130 */
131 if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
132 westwood_filter(w, delta);
133
134 w->bk = 0;
135 w->rtt_win_sx = tcp_time_stamp;
136 }
137}
138
139/*
140 * @westwood_fast_bw
141 * It is called when we are in fast path. In particular it is called when
142 * header prediction is successful. In such case in fact update is
143 * straight forward and doesn't need any particular care.
144 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300145static inline void westwood_fast_bw(struct sock *sk)
Stephen Hemminger87270762005-06-23 12:24:09 -0700146{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300147 const struct tcp_sock *tp = tcp_sk(sk);
148 struct westwood *w = inet_csk_ca(sk);
Stephen Hemminger87270762005-06-23 12:24:09 -0700149
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300150 westwood_update_window(sk);
Stephen Hemminger87270762005-06-23 12:24:09 -0700151
152 w->bk += tp->snd_una - w->snd_una;
153 w->snd_una = tp->snd_una;
154 w->rtt_min = min(w->rtt, w->rtt_min);
155}
156
157/*
158 * @westwood_acked_count
159 * This function evaluates cumul_ack for evaluating bk in case of
160 * delayed or partial acks.
161 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300162static inline u32 westwood_acked_count(struct sock *sk)
Stephen Hemminger87270762005-06-23 12:24:09 -0700163{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300164 const struct tcp_sock *tp = tcp_sk(sk);
165 struct westwood *w = inet_csk_ca(sk);
Stephen Hemminger87270762005-06-23 12:24:09 -0700166
167 w->cumul_ack = tp->snd_una - w->snd_una;
168
169 /* If cumul_ack is 0 this is a dupack since it's not moving
170 * tp->snd_una.
171 */
172 if (!w->cumul_ack) {
173 w->accounted += tp->mss_cache;
174 w->cumul_ack = tp->mss_cache;
175 }
176
177 if (w->cumul_ack > tp->mss_cache) {
178 /* Partial or delayed ack */
179 if (w->accounted >= w->cumul_ack) {
180 w->accounted -= w->cumul_ack;
181 w->cumul_ack = tp->mss_cache;
182 } else {
183 w->cumul_ack -= w->accounted;
184 w->accounted = 0;
185 }
186 }
187
188 w->snd_una = tp->snd_una;
189
190 return w->cumul_ack;
191}
192
Stephen Hemminger87270762005-06-23 12:24:09 -0700193
194/*
195 * TCP Westwood
196 * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
197 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
198 * so avoids ever returning 0.
199 */
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700200static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
Stephen Hemminger87270762005-06-23 12:24:09 -0700201{
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700202 const struct tcp_sock *tp = tcp_sk(sk);
203 const struct westwood *w = inet_csk_ca(sk);
204 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
Stephen Hemminger87270762005-06-23 12:24:09 -0700205}
206
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300207static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
Stephen Hemminger87270762005-06-23 12:24:09 -0700208{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300209 struct tcp_sock *tp = tcp_sk(sk);
210 struct westwood *w = inet_csk_ca(sk);
Luca De Ciccob7d7a9e2006-06-11 23:01:39 -0700211
Stephen Hemminger87270762005-06-23 12:24:09 -0700212 switch(event) {
213 case CA_EVENT_FAST_ACK:
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300214 westwood_fast_bw(sk);
Stephen Hemminger87270762005-06-23 12:24:09 -0700215 break;
216
217 case CA_EVENT_COMPLETE_CWR:
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700218 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
Stephen Hemminger87270762005-06-23 12:24:09 -0700219 break;
220
221 case CA_EVENT_FRTO:
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700222 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
Stephen Hemminger87270762005-06-23 12:24:09 -0700223 break;
224
225 case CA_EVENT_SLOW_ACK:
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300226 westwood_update_window(sk);
227 w->bk += westwood_acked_count(sk);
Stephen Hemminger87270762005-06-23 12:24:09 -0700228 w->rtt_min = min(w->rtt, w->rtt_min);
229 break;
230
231 default:
232 /* don't care */
233 break;
234 }
235}
236
237
238/* Extract info for Tcp socket info provided via netlink. */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300239static void tcp_westwood_info(struct sock *sk, u32 ext,
Stephen Hemminger87270762005-06-23 12:24:09 -0700240 struct sk_buff *skb)
241{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300242 const struct westwood *ca = inet_csk_ca(sk);
Arnaldo Carvalho de Melo73c1f4a2005-08-12 12:51:49 -0300243 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
Stephen Hemminger87270762005-06-23 12:24:09 -0700244 struct rtattr *rta;
245 struct tcpvegas_info *info;
246
Arnaldo Carvalho de Melo73c1f4a2005-08-12 12:51:49 -0300247 rta = __RTA_PUT(skb, INET_DIAG_VEGASINFO, sizeof(*info));
Stephen Hemminger87270762005-06-23 12:24:09 -0700248 info = RTA_DATA(rta);
249 info->tcpv_enabled = 1;
250 info->tcpv_rttcnt = 0;
251 info->tcpv_rtt = jiffies_to_usecs(ca->rtt);
252 info->tcpv_minrtt = jiffies_to_usecs(ca->rtt_min);
253 rtattr_failure: ;
254 }
255}
256
257
258static struct tcp_congestion_ops tcp_westwood = {
259 .init = tcp_westwood_init,
260 .ssthresh = tcp_reno_ssthresh,
261 .cong_avoid = tcp_reno_cong_avoid,
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700262 .min_cwnd = tcp_westwood_bw_rttmin,
Stephen Hemminger87270762005-06-23 12:24:09 -0700263 .cwnd_event = tcp_westwood_event,
264 .get_info = tcp_westwood_info,
265 .pkts_acked = tcp_westwood_pkts_acked,
266
267 .owner = THIS_MODULE,
268 .name = "westwood"
269};
270
271static int __init tcp_westwood_register(void)
272{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300273 BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
Stephen Hemminger87270762005-06-23 12:24:09 -0700274 return tcp_register_congestion_control(&tcp_westwood);
275}
276
277static void __exit tcp_westwood_unregister(void)
278{
279 tcp_unregister_congestion_control(&tcp_westwood);
280}
281
282module_init(tcp_westwood_register);
283module_exit(tcp_westwood_unregister);
284
285MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
286MODULE_LICENSE("GPL");
287MODULE_DESCRIPTION("TCP Westwood+");