Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 1 | /* |
Luca De Cicco | b7d7a9e | 2006-06-11 23:01:39 -0700 | [diff] [blame] | 2 | * TCP Westwood+: end-to-end bandwidth estimation for TCP |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 3 | * |
Luca De Cicco | b7d7a9e | 2006-06-11 23:01:39 -0700 | [diff] [blame] | 4 | * Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4 |
| 5 | * |
| 6 | * Support at http://c3lab.poliba.it/index.php/Westwood |
| 7 | * Main references in literature: |
| 8 | * |
| 9 | * - Mascolo S, Casetti, M. Gerla et al. |
| 10 | * "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001 |
| 11 | * |
| 12 | * - A. Grieco, s. Mascolo |
| 13 | * "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer |
| 14 | * Comm. Review, 2004 |
| 15 | * |
| 16 | * - A. Dell'Aera, L. Grieco, S. Mascolo. |
| 17 | * "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving : |
| 18 | * A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004 |
| 19 | * |
| 20 | * Westwood+ employs end-to-end bandwidth measurement to set cwnd and |
| 21 | * ssthresh after packet loss. The probing phase is as the original Reno. |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 22 | */ |
| 23 | |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 24 | #include <linux/mm.h> |
| 25 | #include <linux/module.h> |
| 26 | #include <linux/skbuff.h> |
Arnaldo Carvalho de Melo | a8c2190 | 2005-08-12 12:56:38 -0300 | [diff] [blame] | 27 | #include <linux/inet_diag.h> |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 28 | #include <net/tcp.h> |
| 29 | |
| 30 | /* TCP Westwood structure */ |
| 31 | struct westwood { |
| 32 | u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */ |
| 33 | u32 bw_est; /* bandwidth estimate */ |
| 34 | u32 rtt_win_sx; /* here starts a new evaluation... */ |
| 35 | u32 bk; |
| 36 | u32 snd_una; /* used for evaluating the number of acked bytes */ |
| 37 | u32 cumul_ack; |
| 38 | u32 accounted; |
| 39 | u32 rtt; |
| 40 | u32 rtt_min; /* minimum observed RTT */ |
Stephen Hemminger | f61e290 | 2006-06-11 23:01:02 -0700 | [diff] [blame] | 41 | u8 first_ack; /* flag which infers that this is the first ack */ |
Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 42 | u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/ |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 43 | }; |
| 44 | |
| 45 | |
| 46 | /* TCP Westwood functions and constants */ |
| 47 | #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */ |
| 48 | #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */ |
| 49 | |
| 50 | /* |
| 51 | * @tcp_westwood_create |
| 52 | * This function initializes fields used in TCP Westwood+, |
| 53 | * it is called after the initial SYN, so the sequence numbers |
| 54 | * are correct but new passive connections we have no |
| 55 | * information about RTTmin at this time so we simply set it to |
| 56 | * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative |
| 57 | * since in this way we're sure it will be updated in a consistent |
| 58 | * way as soon as possible. It will reasonably happen within the first |
| 59 | * RTT period of the connection lifetime. |
| 60 | */ |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 61 | static void tcp_westwood_init(struct sock *sk) |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 62 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 63 | struct westwood *w = inet_csk_ca(sk); |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 64 | |
| 65 | w->bk = 0; |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 66 | w->bw_ns_est = 0; |
| 67 | w->bw_est = 0; |
| 68 | w->accounted = 0; |
| 69 | w->cumul_ack = 0; |
Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 70 | w->reset_rtt_min = 1; |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 71 | w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT; |
| 72 | w->rtt_win_sx = tcp_time_stamp; |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 73 | w->snd_una = tcp_sk(sk)->snd_una; |
Stephen Hemminger | f61e290 | 2006-06-11 23:01:02 -0700 | [diff] [blame] | 74 | w->first_ack = 1; |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | /* |
| 78 | * @westwood_do_filter |
| 79 | * Low-pass filter. Implemented using constant coefficients. |
| 80 | */ |
| 81 | static inline u32 westwood_do_filter(u32 a, u32 b) |
| 82 | { |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 83 | return ((7 * a) + b) >> 3; |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 84 | } |
| 85 | |
Luca De Cicco | b3a92ea | 2006-06-11 23:01:59 -0700 | [diff] [blame] | 86 | static void westwood_filter(struct westwood *w, u32 delta) |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 87 | { |
Luca De Cicco | b3a92ea | 2006-06-11 23:01:59 -0700 | [diff] [blame] | 88 | /* If the filter is empty fill it with the first sample of bandwidth */ |
| 89 | if (w->bw_ns_est == 0 && w->bw_est == 0) { |
| 90 | w->bw_ns_est = w->bk / delta; |
| 91 | w->bw_est = w->bw_ns_est; |
| 92 | } else { |
| 93 | w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta); |
| 94 | w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est); |
| 95 | } |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | /* |
| 99 | * @westwood_pkts_acked |
| 100 | * Called after processing group of packets. |
| 101 | * but all westwood needs is the last sample of srtt. |
| 102 | */ |
Stephen Hemminger | 30cfd0b | 2007-07-25 23:49:34 -0700 | [diff] [blame] | 103 | static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt) |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 104 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 105 | struct westwood *w = inet_csk_ca(sk); |
Stephen Hemminger | 30cfd0b | 2007-07-25 23:49:34 -0700 | [diff] [blame] | 106 | |
| 107 | if (rtt > 0) |
| 108 | w->rtt = usecs_to_jiffies(rtt); |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | /* |
| 112 | * @westwood_update_window |
| 113 | * It updates RTT evaluation window if it is the right moment to do |
| 114 | * it. If so it calls filter for evaluating bandwidth. |
| 115 | */ |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 116 | static void westwood_update_window(struct sock *sk) |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 117 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 118 | struct westwood *w = inet_csk_ca(sk); |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 119 | s32 delta = tcp_time_stamp - w->rtt_win_sx; |
| 120 | |
Luca De Cicco | b7d7a9e | 2006-06-11 23:01:39 -0700 | [diff] [blame] | 121 | /* Initialize w->snd_una with the first acked sequence number in order |
Stephen Hemminger | f61e290 | 2006-06-11 23:01:02 -0700 | [diff] [blame] | 122 | * to fix mismatch between tp->snd_una and w->snd_una for the first |
| 123 | * bandwidth sample |
| 124 | */ |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 125 | if (w->first_ack) { |
Stephen Hemminger | f61e290 | 2006-06-11 23:01:02 -0700 | [diff] [blame] | 126 | w->snd_una = tcp_sk(sk)->snd_una; |
| 127 | w->first_ack = 0; |
| 128 | } |
| 129 | |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 130 | /* |
| 131 | * See if a RTT-window has passed. |
| 132 | * Be careful since if RTT is less than |
| 133 | * 50ms we don't filter but we continue 'building the sample'. |
| 134 | * This minimum limit was chosen since an estimation on small |
| 135 | * time intervals is better to avoid... |
| 136 | * Obviously on a LAN we reasonably will always have |
| 137 | * right_bound = left_bound + WESTWOOD_RTT_MIN |
| 138 | */ |
| 139 | if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) { |
| 140 | westwood_filter(w, delta); |
| 141 | |
| 142 | w->bk = 0; |
| 143 | w->rtt_win_sx = tcp_time_stamp; |
| 144 | } |
| 145 | } |
| 146 | |
Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 147 | static inline void update_rtt_min(struct westwood *w) |
| 148 | { |
| 149 | if (w->reset_rtt_min) { |
| 150 | w->rtt_min = w->rtt; |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 151 | w->reset_rtt_min = 0; |
Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 152 | } else |
| 153 | w->rtt_min = min(w->rtt, w->rtt_min); |
| 154 | } |
| 155 | |
| 156 | |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 157 | /* |
| 158 | * @westwood_fast_bw |
| 159 | * It is called when we are in fast path. In particular it is called when |
| 160 | * header prediction is successful. In such case in fact update is |
| 161 | * straight forward and doesn't need any particular care. |
| 162 | */ |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 163 | static inline void westwood_fast_bw(struct sock *sk) |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 164 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 165 | const struct tcp_sock *tp = tcp_sk(sk); |
| 166 | struct westwood *w = inet_csk_ca(sk); |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 167 | |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 168 | westwood_update_window(sk); |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 169 | |
| 170 | w->bk += tp->snd_una - w->snd_una; |
| 171 | w->snd_una = tp->snd_una; |
Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 172 | update_rtt_min(w); |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 173 | } |
| 174 | |
| 175 | /* |
| 176 | * @westwood_acked_count |
| 177 | * This function evaluates cumul_ack for evaluating bk in case of |
| 178 | * delayed or partial acks. |
| 179 | */ |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 180 | static inline u32 westwood_acked_count(struct sock *sk) |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 181 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 182 | const struct tcp_sock *tp = tcp_sk(sk); |
| 183 | struct westwood *w = inet_csk_ca(sk); |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 184 | |
| 185 | w->cumul_ack = tp->snd_una - w->snd_una; |
| 186 | |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 187 | /* If cumul_ack is 0 this is a dupack since it's not moving |
| 188 | * tp->snd_una. |
| 189 | */ |
| 190 | if (!w->cumul_ack) { |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 191 | w->accounted += tp->mss_cache; |
| 192 | w->cumul_ack = tp->mss_cache; |
| 193 | } |
| 194 | |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 195 | if (w->cumul_ack > tp->mss_cache) { |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 196 | /* Partial or delayed ack */ |
| 197 | if (w->accounted >= w->cumul_ack) { |
| 198 | w->accounted -= w->cumul_ack; |
| 199 | w->cumul_ack = tp->mss_cache; |
| 200 | } else { |
| 201 | w->cumul_ack -= w->accounted; |
| 202 | w->accounted = 0; |
| 203 | } |
| 204 | } |
| 205 | |
| 206 | w->snd_una = tp->snd_una; |
| 207 | |
| 208 | return w->cumul_ack; |
| 209 | } |
| 210 | |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 211 | |
| 212 | /* |
| 213 | * TCP Westwood |
| 214 | * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it |
| 215 | * in packets we use mss_cache). Rttmin is guaranteed to be >= 2 |
| 216 | * so avoids ever returning 0. |
| 217 | */ |
Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 218 | static u32 tcp_westwood_bw_rttmin(const struct sock *sk) |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 219 | { |
Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 220 | const struct tcp_sock *tp = tcp_sk(sk); |
| 221 | const struct westwood *w = inet_csk_ca(sk); |
| 222 | return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2); |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 223 | } |
| 224 | |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 225 | static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 226 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 227 | struct tcp_sock *tp = tcp_sk(sk); |
| 228 | struct westwood *w = inet_csk_ca(sk); |
Luca De Cicco | b7d7a9e | 2006-06-11 23:01:39 -0700 | [diff] [blame] | 229 | |
Stephen Hemminger | 2de979b | 2007-03-08 20:45:19 -0800 | [diff] [blame] | 230 | switch (event) { |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 231 | case CA_EVENT_FAST_ACK: |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 232 | westwood_fast_bw(sk); |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 233 | break; |
| 234 | |
| 235 | case CA_EVENT_COMPLETE_CWR: |
Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 236 | tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 237 | break; |
| 238 | |
Yuchung Cheng | 9b44190 | 2013-03-20 13:32:58 +0000 | [diff] [blame] | 239 | case CA_EVENT_LOSS: |
Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 240 | tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 241 | /* Update RTT_min when next ack arrives */ |
Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 242 | w->reset_rtt_min = 1; |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 243 | break; |
| 244 | |
| 245 | case CA_EVENT_SLOW_ACK: |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 246 | westwood_update_window(sk); |
| 247 | w->bk += westwood_acked_count(sk); |
Luca De Cicco | bc726a7 | 2006-06-11 23:02:19 -0700 | [diff] [blame] | 248 | update_rtt_min(w); |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 249 | break; |
| 250 | |
| 251 | default: |
| 252 | /* don't care */ |
| 253 | break; |
| 254 | } |
| 255 | } |
| 256 | |
| 257 | |
| 258 | /* Extract info for Tcp socket info provided via netlink. */ |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 259 | static void tcp_westwood_info(struct sock *sk, u32 ext, |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 260 | struct sk_buff *skb) |
| 261 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 262 | const struct westwood *ca = inet_csk_ca(sk); |
Arnaldo Carvalho de Melo | 73c1f4a | 2005-08-12 12:51:49 -0300 | [diff] [blame] | 263 | if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { |
Thomas Graf | 2672810 | 2007-03-22 23:27:19 -0700 | [diff] [blame] | 264 | struct tcpvegas_info info = { |
| 265 | .tcpv_enabled = 1, |
| 266 | .tcpv_rtt = jiffies_to_usecs(ca->rtt), |
| 267 | .tcpv_minrtt = jiffies_to_usecs(ca->rtt_min), |
| 268 | }; |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 269 | |
Thomas Graf | 2672810 | 2007-03-22 23:27:19 -0700 | [diff] [blame] | 270 | nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 271 | } |
| 272 | } |
| 273 | |
| 274 | |
Stephen Hemminger | a252beb | 2011-03-10 00:40:17 -0800 | [diff] [blame] | 275 | static struct tcp_congestion_ops tcp_westwood __read_mostly = { |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 276 | .init = tcp_westwood_init, |
| 277 | .ssthresh = tcp_reno_ssthresh, |
| 278 | .cong_avoid = tcp_reno_cong_avoid, |
Stephen Hemminger | 72dc5b9 | 2006-06-05 17:30:08 -0700 | [diff] [blame] | 279 | .min_cwnd = tcp_westwood_bw_rttmin, |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 280 | .cwnd_event = tcp_westwood_event, |
| 281 | .get_info = tcp_westwood_info, |
| 282 | .pkts_acked = tcp_westwood_pkts_acked, |
| 283 | |
| 284 | .owner = THIS_MODULE, |
| 285 | .name = "westwood" |
| 286 | }; |
| 287 | |
| 288 | static int __init tcp_westwood_register(void) |
| 289 | { |
Alexey Dobriyan | 74975d4 | 2006-08-25 17:10:33 -0700 | [diff] [blame] | 290 | BUILD_BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE); |
Stephen Hemminger | 8727076 | 2005-06-23 12:24:09 -0700 | [diff] [blame] | 291 | return tcp_register_congestion_control(&tcp_westwood); |
| 292 | } |
| 293 | |
| 294 | static void __exit tcp_westwood_unregister(void) |
| 295 | { |
| 296 | tcp_unregister_congestion_control(&tcp_westwood); |
| 297 | } |
| 298 | |
| 299 | module_init(tcp_westwood_register); |
| 300 | module_exit(tcp_westwood_unregister); |
| 301 | |
| 302 | MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera"); |
| 303 | MODULE_LICENSE("GPL"); |
| 304 | MODULE_DESCRIPTION("TCP Westwood+"); |