Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 1 | /* |
| 2 | * TCP Illinois congestion control. |
| 3 | * Home page: |
| 4 | * http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html |
| 5 | * |
| 6 | * The algorithm is described in: |
| 7 | * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm |
| 8 | * for High-Speed Networks" |
Justin P. Mattock | 631dd1a | 2010-10-18 11:03:14 +0200 | [diff] [blame] | 9 | * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 10 | * |
| 11 | * Implemented from description in paper and ns-2 simulation. |
| 12 | * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> |
| 13 | */ |
| 14 | |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/skbuff.h> |
| 17 | #include <linux/inet_diag.h> |
| 18 | #include <asm/div64.h> |
| 19 | #include <net/tcp.h> |
| 20 | |
| 21 | #define ALPHA_SHIFT 7 |
| 22 | #define ALPHA_SCALE (1u<<ALPHA_SHIFT) |
| 23 | #define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */ |
| 24 | #define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */ |
| 25 | #define ALPHA_BASE ALPHA_SCALE /* 1.0 */ |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 26 | #define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */ |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 27 | |
| 28 | #define BETA_SHIFT 6 |
| 29 | #define BETA_SCALE (1u<<BETA_SHIFT) |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 30 | #define BETA_MIN (BETA_SCALE/8) /* 0.125 */ |
| 31 | #define BETA_MAX (BETA_SCALE/2) /* 0.5 */ |
| 32 | #define BETA_BASE BETA_MAX |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 33 | |
| 34 | static int win_thresh __read_mostly = 15; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 35 | module_param(win_thresh, int, 0); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 36 | MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing"); |
| 37 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 38 | static int theta __read_mostly = 5; |
| 39 | module_param(theta, int, 0); |
| 40 | MODULE_PARM_DESC(theta, "# of fast RTT's before full growth"); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 41 | |
| 42 | /* TCP Illinois Parameters */ |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 43 | struct illinois { |
| 44 | u64 sum_rtt; /* sum of rtt's measured within last rtt */ |
| 45 | u16 cnt_rtt; /* # of rtts measured within last rtt */ |
| 46 | u32 base_rtt; /* min of all rtt in usec */ |
| 47 | u32 max_rtt; /* max of all rtt in usec */ |
| 48 | u32 end_seq; /* right edge of current RTT */ |
| 49 | u32 alpha; /* Additive increase */ |
| 50 | u32 beta; /* Muliplicative decrease */ |
| 51 | u16 acked; /* # packets acked by current ACK */ |
| 52 | u8 rtt_above; /* average rtt has gone above threshold */ |
| 53 | u8 rtt_low; /* # of rtts measurements below threshold */ |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 54 | }; |
| 55 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 56 | static void rtt_reset(struct sock *sk) |
| 57 | { |
| 58 | struct tcp_sock *tp = tcp_sk(sk); |
| 59 | struct illinois *ca = inet_csk_ca(sk); |
| 60 | |
| 61 | ca->end_seq = tp->snd_nxt; |
| 62 | ca->cnt_rtt = 0; |
| 63 | ca->sum_rtt = 0; |
| 64 | |
| 65 | /* TODO: age max_rtt? */ |
| 66 | } |
| 67 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 68 | static void tcp_illinois_init(struct sock *sk) |
| 69 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 70 | struct illinois *ca = inet_csk_ca(sk); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 71 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 72 | ca->alpha = ALPHA_MAX; |
| 73 | ca->beta = BETA_BASE; |
| 74 | ca->base_rtt = 0x7fffffff; |
| 75 | ca->max_rtt = 0; |
| 76 | |
| 77 | ca->acked = 0; |
| 78 | ca->rtt_low = 0; |
| 79 | ca->rtt_above = 0; |
| 80 | |
| 81 | rtt_reset(sk); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 82 | } |
| 83 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 84 | /* Measure RTT for each ack. */ |
Stephen Hemminger | 30cfd0b | 2007-07-25 23:49:34 -0700 | [diff] [blame] | 85 | static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 86 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 87 | struct illinois *ca = inet_csk_ca(sk); |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 88 | |
| 89 | ca->acked = pkts_acked; |
| 90 | |
Stephen Hemminger | 30cfd0b | 2007-07-25 23:49:34 -0700 | [diff] [blame] | 91 | /* dup ack, no rtt sample */ |
| 92 | if (rtt < 0) |
Ilpo Järvinen | b9ce204 | 2007-06-15 15:08:43 -0700 | [diff] [blame] | 93 | return; |
| 94 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 95 | /* ignore bogus values, this prevents wraparound in alpha math */ |
| 96 | if (rtt > RTT_MAX) |
| 97 | rtt = RTT_MAX; |
| 98 | |
| 99 | /* keep track of minimum RTT seen so far */ |
| 100 | if (ca->base_rtt > rtt) |
| 101 | ca->base_rtt = rtt; |
| 102 | |
| 103 | /* and max */ |
| 104 | if (ca->max_rtt < rtt) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 105 | ca->max_rtt = rtt; |
| 106 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 107 | ++ca->cnt_rtt; |
| 108 | ca->sum_rtt += rtt; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 109 | } |
| 110 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 111 | /* Maximum queuing delay */ |
| 112 | static inline u32 max_delay(const struct illinois *ca) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 113 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 114 | return ca->max_rtt - ca->base_rtt; |
| 115 | } |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 116 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 117 | /* Average queuing delay */ |
| 118 | static inline u32 avg_delay(const struct illinois *ca) |
| 119 | { |
| 120 | u64 t = ca->sum_rtt; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 121 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 122 | do_div(t, ca->cnt_rtt); |
| 123 | return t - ca->base_rtt; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 124 | } |
| 125 | |
| 126 | /* |
| 127 | * Compute value of alpha used for additive increase. |
| 128 | * If small window then use 1.0, equivalent to Reno. |
| 129 | * |
| 130 | * For larger windows, adjust based on average delay. |
| 131 | * A. If average delay is at minimum (we are uncongested), |
| 132 | * then use large alpha (10.0) to increase faster. |
| 133 | * B. If average delay is at maximum (getting congested) |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 134 | * then use small alpha (0.3) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 135 | * |
| 136 | * The result is a convex window growth curve. |
| 137 | */ |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 138 | static u32 alpha(struct illinois *ca, u32 da, u32 dm) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 139 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 140 | u32 d1 = dm / 100; /* Low threshold */ |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 141 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 142 | if (da <= d1) { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 143 | /* If never got out of low delay zone, then use max */ |
| 144 | if (!ca->rtt_above) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 145 | return ALPHA_MAX; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 146 | |
| 147 | /* Wait for 5 good RTT's before allowing alpha to go alpha max. |
| 148 | * This prevents one good RTT from causing sudden window increase. |
| 149 | */ |
| 150 | if (++ca->rtt_low < theta) |
| 151 | return ca->alpha; |
| 152 | |
| 153 | ca->rtt_low = 0; |
| 154 | ca->rtt_above = 0; |
| 155 | return ALPHA_MAX; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 156 | } |
| 157 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 158 | ca->rtt_above = 1; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 159 | |
| 160 | /* |
| 161 | * Based on: |
| 162 | * |
| 163 | * (dm - d1) amin amax |
| 164 | * k1 = ------------------- |
| 165 | * amax - amin |
| 166 | * |
| 167 | * (dm - d1) amin |
| 168 | * k2 = ---------------- - d1 |
| 169 | * amax - amin |
| 170 | * |
| 171 | * k1 |
| 172 | * alpha = ---------- |
| 173 | * k2 + da |
| 174 | */ |
| 175 | |
| 176 | dm -= d1; |
| 177 | da -= d1; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 178 | return (dm * ALPHA_MAX) / |
| 179 | (dm + (da * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 180 | } |
| 181 | |
| 182 | /* |
| 183 | * Beta used for multiplicative decrease. |
| 184 | * For small window sizes returns same value as Reno (0.5) |
| 185 | * |
| 186 | * If delay is small (10% of max) then beta = 1/8 |
| 187 | * If delay is up to 80% of max then beta = 1/2 |
| 188 | * In between is a linear function |
| 189 | */ |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 190 | static u32 beta(u32 da, u32 dm) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 191 | { |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 192 | u32 d2, d3; |
| 193 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 194 | d2 = dm / 10; |
| 195 | if (da <= d2) |
| 196 | return BETA_MIN; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 197 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 198 | d3 = (8 * dm) / 10; |
| 199 | if (da >= d3 || d3 <= d2) |
| 200 | return BETA_MAX; |
| 201 | |
| 202 | /* |
| 203 | * Based on: |
| 204 | * |
| 205 | * bmin d3 - bmax d2 |
| 206 | * k3 = ------------------- |
| 207 | * d3 - d2 |
| 208 | * |
| 209 | * bmax - bmin |
| 210 | * k4 = ------------- |
| 211 | * d3 - d2 |
| 212 | * |
| 213 | * b = k3 + k4 da |
| 214 | */ |
| 215 | return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da) |
| 216 | / (d3 - d2); |
| 217 | } |
| 218 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 219 | /* Update alpha and beta values once per RTT */ |
| 220 | static void update_params(struct sock *sk) |
| 221 | { |
| 222 | struct tcp_sock *tp = tcp_sk(sk); |
| 223 | struct illinois *ca = inet_csk_ca(sk); |
| 224 | |
| 225 | if (tp->snd_cwnd < win_thresh) { |
| 226 | ca->alpha = ALPHA_BASE; |
| 227 | ca->beta = BETA_BASE; |
| 228 | } else if (ca->cnt_rtt > 0) { |
| 229 | u32 dm = max_delay(ca); |
| 230 | u32 da = avg_delay(ca); |
| 231 | |
| 232 | ca->alpha = alpha(ca, da, dm); |
| 233 | ca->beta = beta(da, dm); |
| 234 | } |
| 235 | |
| 236 | rtt_reset(sk); |
| 237 | } |
| 238 | |
| 239 | /* |
| 240 | * In case of loss, reset to default values |
| 241 | */ |
| 242 | static void tcp_illinois_state(struct sock *sk, u8 new_state) |
| 243 | { |
| 244 | struct illinois *ca = inet_csk_ca(sk); |
| 245 | |
| 246 | if (new_state == TCP_CA_Loss) { |
| 247 | ca->alpha = ALPHA_BASE; |
| 248 | ca->beta = BETA_BASE; |
| 249 | ca->rtt_low = 0; |
| 250 | ca->rtt_above = 0; |
| 251 | rtt_reset(sk); |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | /* |
| 256 | * Increase window in response to successful acknowledgment. |
| 257 | */ |
Eric Dumazet | 2490155 | 2014-05-02 21:18:05 -0700 | [diff] [blame] | 258 | static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked) |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 259 | { |
| 260 | struct tcp_sock *tp = tcp_sk(sk); |
| 261 | struct illinois *ca = inet_csk_ca(sk); |
| 262 | |
| 263 | if (after(ack, ca->end_seq)) |
| 264 | update_params(sk); |
| 265 | |
| 266 | /* RFC2861 only increase cwnd if fully utilized */ |
Eric Dumazet | 2490155 | 2014-05-02 21:18:05 -0700 | [diff] [blame] | 267 | if (!tcp_is_cwnd_limited(sk)) |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 268 | return; |
| 269 | |
| 270 | /* In slow start */ |
| 271 | if (tp->snd_cwnd <= tp->snd_ssthresh) |
Yuchung Cheng | 9f9843a7 | 2013-10-31 11:07:31 -0700 | [diff] [blame] | 272 | tcp_slow_start(tp, acked); |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 273 | |
| 274 | else { |
| 275 | u32 delta; |
| 276 | |
| 277 | /* snd_cwnd_cnt is # of packets since last cwnd increment */ |
| 278 | tp->snd_cwnd_cnt += ca->acked; |
| 279 | ca->acked = 1; |
| 280 | |
| 281 | /* This is close approximation of: |
| 282 | * tp->snd_cwnd += alpha/tp->snd_cwnd |
| 283 | */ |
| 284 | delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT; |
| 285 | if (delta >= tp->snd_cwnd) { |
| 286 | tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd, |
| 287 | (u32) tp->snd_cwnd_clamp); |
| 288 | tp->snd_cwnd_cnt = 0; |
| 289 | } |
| 290 | } |
| 291 | } |
| 292 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 293 | static u32 tcp_illinois_ssthresh(struct sock *sk) |
| 294 | { |
| 295 | struct tcp_sock *tp = tcp_sk(sk); |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 296 | struct illinois *ca = inet_csk_ca(sk); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 297 | |
| 298 | /* Multiplicative decrease */ |
Stephen Hemminger | a357dde | 2007-11-30 01:10:55 +1100 | [diff] [blame] | 299 | return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 300 | } |
| 301 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 302 | |
| 303 | /* Extract info for Tcp socket info provided via netlink. */ |
| 304 | static void tcp_illinois_info(struct sock *sk, u32 ext, |
| 305 | struct sk_buff *skb) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 306 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 307 | const struct illinois *ca = inet_csk_ca(sk); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 308 | |
| 309 | if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { |
| 310 | struct tcpvegas_info info = { |
| 311 | .tcpv_enabled = 1, |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 312 | .tcpv_rttcnt = ca->cnt_rtt, |
| 313 | .tcpv_minrtt = ca->base_rtt, |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 314 | }; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 315 | |
Jesper Dangaard Brouer | 8f363b7 | 2012-10-31 02:45:32 +0000 | [diff] [blame] | 316 | if (info.tcpv_rttcnt > 0) { |
| 317 | u64 t = ca->sum_rtt; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 318 | |
Jesper Dangaard Brouer | 8f363b7 | 2012-10-31 02:45:32 +0000 | [diff] [blame] | 319 | do_div(t, info.tcpv_rttcnt); |
| 320 | info.tcpv_rtt = t; |
| 321 | } |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 322 | nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); |
| 323 | } |
| 324 | } |
| 325 | |
Stephen Hemminger | a252beb | 2011-03-10 00:40:17 -0800 | [diff] [blame] | 326 | static struct tcp_congestion_ops tcp_illinois __read_mostly = { |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 327 | .init = tcp_illinois_init, |
| 328 | .ssthresh = tcp_illinois_ssthresh, |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 329 | .cong_avoid = tcp_illinois_cong_avoid, |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 330 | .set_state = tcp_illinois_state, |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 331 | .get_info = tcp_illinois_info, |
| 332 | .pkts_acked = tcp_illinois_acked, |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 333 | |
| 334 | .owner = THIS_MODULE, |
| 335 | .name = "illinois", |
| 336 | }; |
| 337 | |
| 338 | static int __init tcp_illinois_register(void) |
| 339 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 340 | BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 341 | return tcp_register_congestion_control(&tcp_illinois); |
| 342 | } |
| 343 | |
| 344 | static void __exit tcp_illinois_unregister(void) |
| 345 | { |
| 346 | tcp_unregister_congestion_control(&tcp_illinois); |
| 347 | } |
| 348 | |
| 349 | module_init(tcp_illinois_register); |
| 350 | module_exit(tcp_illinois_unregister); |
| 351 | |
| 352 | MODULE_AUTHOR("Stephen Hemminger, Shao Liu"); |
| 353 | MODULE_LICENSE("GPL"); |
| 354 | MODULE_DESCRIPTION("TCP Illinois"); |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 355 | MODULE_VERSION("1.0"); |