Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 1 | /* |
| 2 | * TCP Illinois congestion control. |
| 3 | * Home page: |
| 4 | * http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html |
| 5 | * |
| 6 | * The algorithm is described in: |
| 7 | * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm |
| 8 | * for High-Speed Networks" |
Justin P. Mattock | 631dd1a | 2010-10-18 11:03:14 +0200 | [diff] [blame] | 9 | * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 10 | * |
| 11 | * Implemented from description in paper and ns-2 simulation. |
| 12 | * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> |
| 13 | */ |
| 14 | |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/skbuff.h> |
| 17 | #include <linux/inet_diag.h> |
| 18 | #include <asm/div64.h> |
| 19 | #include <net/tcp.h> |
| 20 | |
| 21 | #define ALPHA_SHIFT 7 |
| 22 | #define ALPHA_SCALE (1u<<ALPHA_SHIFT) |
| 23 | #define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */ |
| 24 | #define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */ |
| 25 | #define ALPHA_BASE ALPHA_SCALE /* 1.0 */ |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 26 | #define U32_MAX ((u32)~0U) |
| 27 | #define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */ |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 28 | |
| 29 | #define BETA_SHIFT 6 |
| 30 | #define BETA_SCALE (1u<<BETA_SHIFT) |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 31 | #define BETA_MIN (BETA_SCALE/8) /* 0.125 */ |
| 32 | #define BETA_MAX (BETA_SCALE/2) /* 0.5 */ |
| 33 | #define BETA_BASE BETA_MAX |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 34 | |
| 35 | static int win_thresh __read_mostly = 15; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 36 | module_param(win_thresh, int, 0); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 37 | MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing"); |
| 38 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 39 | static int theta __read_mostly = 5; |
| 40 | module_param(theta, int, 0); |
| 41 | MODULE_PARM_DESC(theta, "# of fast RTT's before full growth"); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 42 | |
| 43 | /* TCP Illinois Parameters */ |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 44 | struct illinois { |
| 45 | u64 sum_rtt; /* sum of rtt's measured within last rtt */ |
| 46 | u16 cnt_rtt; /* # of rtts measured within last rtt */ |
| 47 | u32 base_rtt; /* min of all rtt in usec */ |
| 48 | u32 max_rtt; /* max of all rtt in usec */ |
| 49 | u32 end_seq; /* right edge of current RTT */ |
| 50 | u32 alpha; /* Additive increase */ |
| 51 | u32 beta; /* Muliplicative decrease */ |
| 52 | u16 acked; /* # packets acked by current ACK */ |
| 53 | u8 rtt_above; /* average rtt has gone above threshold */ |
| 54 | u8 rtt_low; /* # of rtts measurements below threshold */ |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 55 | }; |
| 56 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 57 | static void rtt_reset(struct sock *sk) |
| 58 | { |
| 59 | struct tcp_sock *tp = tcp_sk(sk); |
| 60 | struct illinois *ca = inet_csk_ca(sk); |
| 61 | |
| 62 | ca->end_seq = tp->snd_nxt; |
| 63 | ca->cnt_rtt = 0; |
| 64 | ca->sum_rtt = 0; |
| 65 | |
| 66 | /* TODO: age max_rtt? */ |
| 67 | } |
| 68 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 69 | static void tcp_illinois_init(struct sock *sk) |
| 70 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 71 | struct illinois *ca = inet_csk_ca(sk); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 72 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 73 | ca->alpha = ALPHA_MAX; |
| 74 | ca->beta = BETA_BASE; |
| 75 | ca->base_rtt = 0x7fffffff; |
| 76 | ca->max_rtt = 0; |
| 77 | |
| 78 | ca->acked = 0; |
| 79 | ca->rtt_low = 0; |
| 80 | ca->rtt_above = 0; |
| 81 | |
| 82 | rtt_reset(sk); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 83 | } |
| 84 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 85 | /* Measure RTT for each ack. */ |
Stephen Hemminger | 30cfd0b | 2007-07-25 23:49:34 -0700 | [diff] [blame] | 86 | static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 87 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 88 | struct illinois *ca = inet_csk_ca(sk); |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 89 | |
| 90 | ca->acked = pkts_acked; |
| 91 | |
Stephen Hemminger | 30cfd0b | 2007-07-25 23:49:34 -0700 | [diff] [blame] | 92 | /* dup ack, no rtt sample */ |
| 93 | if (rtt < 0) |
Ilpo Järvinen | b9ce204 | 2007-06-15 15:08:43 -0700 | [diff] [blame] | 94 | return; |
| 95 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 96 | /* ignore bogus values, this prevents wraparound in alpha math */ |
| 97 | if (rtt > RTT_MAX) |
| 98 | rtt = RTT_MAX; |
| 99 | |
| 100 | /* keep track of minimum RTT seen so far */ |
| 101 | if (ca->base_rtt > rtt) |
| 102 | ca->base_rtt = rtt; |
| 103 | |
| 104 | /* and max */ |
| 105 | if (ca->max_rtt < rtt) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 106 | ca->max_rtt = rtt; |
| 107 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 108 | ++ca->cnt_rtt; |
| 109 | ca->sum_rtt += rtt; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 110 | } |
| 111 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 112 | /* Maximum queuing delay */ |
| 113 | static inline u32 max_delay(const struct illinois *ca) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 114 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 115 | return ca->max_rtt - ca->base_rtt; |
| 116 | } |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 117 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 118 | /* Average queuing delay */ |
| 119 | static inline u32 avg_delay(const struct illinois *ca) |
| 120 | { |
| 121 | u64 t = ca->sum_rtt; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 122 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 123 | do_div(t, ca->cnt_rtt); |
| 124 | return t - ca->base_rtt; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | /* |
| 128 | * Compute value of alpha used for additive increase. |
| 129 | * If small window then use 1.0, equivalent to Reno. |
| 130 | * |
| 131 | * For larger windows, adjust based on average delay. |
| 132 | * A. If average delay is at minimum (we are uncongested), |
| 133 | * then use large alpha (10.0) to increase faster. |
| 134 | * B. If average delay is at maximum (getting congested) |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 135 | * then use small alpha (0.3) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 136 | * |
| 137 | * The result is a convex window growth curve. |
| 138 | */ |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 139 | static u32 alpha(struct illinois *ca, u32 da, u32 dm) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 140 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 141 | u32 d1 = dm / 100; /* Low threshold */ |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 142 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 143 | if (da <= d1) { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 144 | /* If never got out of low delay zone, then use max */ |
| 145 | if (!ca->rtt_above) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 146 | return ALPHA_MAX; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 147 | |
| 148 | /* Wait for 5 good RTT's before allowing alpha to go alpha max. |
| 149 | * This prevents one good RTT from causing sudden window increase. |
| 150 | */ |
| 151 | if (++ca->rtt_low < theta) |
| 152 | return ca->alpha; |
| 153 | |
| 154 | ca->rtt_low = 0; |
| 155 | ca->rtt_above = 0; |
| 156 | return ALPHA_MAX; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 157 | } |
| 158 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 159 | ca->rtt_above = 1; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 160 | |
| 161 | /* |
| 162 | * Based on: |
| 163 | * |
| 164 | * (dm - d1) amin amax |
| 165 | * k1 = ------------------- |
| 166 | * amax - amin |
| 167 | * |
| 168 | * (dm - d1) amin |
| 169 | * k2 = ---------------- - d1 |
| 170 | * amax - amin |
| 171 | * |
| 172 | * k1 |
| 173 | * alpha = ---------- |
| 174 | * k2 + da |
| 175 | */ |
| 176 | |
| 177 | dm -= d1; |
| 178 | da -= d1; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 179 | return (dm * ALPHA_MAX) / |
| 180 | (dm + (da * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | /* |
| 184 | * Beta used for multiplicative decrease. |
| 185 | * For small window sizes returns same value as Reno (0.5) |
| 186 | * |
| 187 | * If delay is small (10% of max) then beta = 1/8 |
| 188 | * If delay is up to 80% of max then beta = 1/2 |
| 189 | * In between is a linear function |
| 190 | */ |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 191 | static u32 beta(u32 da, u32 dm) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 192 | { |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 193 | u32 d2, d3; |
| 194 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 195 | d2 = dm / 10; |
| 196 | if (da <= d2) |
| 197 | return BETA_MIN; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 198 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 199 | d3 = (8 * dm) / 10; |
| 200 | if (da >= d3 || d3 <= d2) |
| 201 | return BETA_MAX; |
| 202 | |
| 203 | /* |
| 204 | * Based on: |
| 205 | * |
| 206 | * bmin d3 - bmax d2 |
| 207 | * k3 = ------------------- |
| 208 | * d3 - d2 |
| 209 | * |
| 210 | * bmax - bmin |
| 211 | * k4 = ------------- |
| 212 | * d3 - d2 |
| 213 | * |
| 214 | * b = k3 + k4 da |
| 215 | */ |
| 216 | return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da) |
| 217 | / (d3 - d2); |
| 218 | } |
| 219 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 220 | /* Update alpha and beta values once per RTT */ |
| 221 | static void update_params(struct sock *sk) |
| 222 | { |
| 223 | struct tcp_sock *tp = tcp_sk(sk); |
| 224 | struct illinois *ca = inet_csk_ca(sk); |
| 225 | |
| 226 | if (tp->snd_cwnd < win_thresh) { |
| 227 | ca->alpha = ALPHA_BASE; |
| 228 | ca->beta = BETA_BASE; |
| 229 | } else if (ca->cnt_rtt > 0) { |
| 230 | u32 dm = max_delay(ca); |
| 231 | u32 da = avg_delay(ca); |
| 232 | |
| 233 | ca->alpha = alpha(ca, da, dm); |
| 234 | ca->beta = beta(da, dm); |
| 235 | } |
| 236 | |
| 237 | rtt_reset(sk); |
| 238 | } |
| 239 | |
| 240 | /* |
| 241 | * In case of loss, reset to default values |
| 242 | */ |
| 243 | static void tcp_illinois_state(struct sock *sk, u8 new_state) |
| 244 | { |
| 245 | struct illinois *ca = inet_csk_ca(sk); |
| 246 | |
| 247 | if (new_state == TCP_CA_Loss) { |
| 248 | ca->alpha = ALPHA_BASE; |
| 249 | ca->beta = BETA_BASE; |
| 250 | ca->rtt_low = 0; |
| 251 | ca->rtt_above = 0; |
| 252 | rtt_reset(sk); |
| 253 | } |
| 254 | } |
| 255 | |
| 256 | /* |
| 257 | * Increase window in response to successful acknowledgment. |
| 258 | */ |
Yuchung Cheng | 9f9843a7 | 2013-10-31 11:07:31 -0700 | [diff] [blame] | 259 | static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked, |
| 260 | u32 in_flight) |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 261 | { |
| 262 | struct tcp_sock *tp = tcp_sk(sk); |
| 263 | struct illinois *ca = inet_csk_ca(sk); |
| 264 | |
| 265 | if (after(ack, ca->end_seq)) |
| 266 | update_params(sk); |
| 267 | |
| 268 | /* RFC2861 only increase cwnd if fully utilized */ |
| 269 | if (!tcp_is_cwnd_limited(sk, in_flight)) |
| 270 | return; |
| 271 | |
| 272 | /* In slow start */ |
| 273 | if (tp->snd_cwnd <= tp->snd_ssthresh) |
Yuchung Cheng | 9f9843a7 | 2013-10-31 11:07:31 -0700 | [diff] [blame] | 274 | tcp_slow_start(tp, acked); |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 275 | |
| 276 | else { |
| 277 | u32 delta; |
| 278 | |
| 279 | /* snd_cwnd_cnt is # of packets since last cwnd increment */ |
| 280 | tp->snd_cwnd_cnt += ca->acked; |
| 281 | ca->acked = 1; |
| 282 | |
| 283 | /* This is close approximation of: |
| 284 | * tp->snd_cwnd += alpha/tp->snd_cwnd |
| 285 | */ |
| 286 | delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT; |
| 287 | if (delta >= tp->snd_cwnd) { |
| 288 | tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd, |
| 289 | (u32) tp->snd_cwnd_clamp); |
| 290 | tp->snd_cwnd_cnt = 0; |
| 291 | } |
| 292 | } |
| 293 | } |
| 294 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 295 | static u32 tcp_illinois_ssthresh(struct sock *sk) |
| 296 | { |
| 297 | struct tcp_sock *tp = tcp_sk(sk); |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 298 | struct illinois *ca = inet_csk_ca(sk); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 299 | |
| 300 | /* Multiplicative decrease */ |
Stephen Hemminger | a357dde | 2007-11-30 01:10:55 +1100 | [diff] [blame] | 301 | return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 302 | } |
| 303 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 304 | |
| 305 | /* Extract info for Tcp socket info provided via netlink. */ |
| 306 | static void tcp_illinois_info(struct sock *sk, u32 ext, |
| 307 | struct sk_buff *skb) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 308 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 309 | const struct illinois *ca = inet_csk_ca(sk); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 310 | |
| 311 | if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { |
| 312 | struct tcpvegas_info info = { |
| 313 | .tcpv_enabled = 1, |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 314 | .tcpv_rttcnt = ca->cnt_rtt, |
| 315 | .tcpv_minrtt = ca->base_rtt, |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 316 | }; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 317 | |
Jesper Dangaard Brouer | 8f363b7 | 2012-10-31 02:45:32 +0000 | [diff] [blame] | 318 | if (info.tcpv_rttcnt > 0) { |
| 319 | u64 t = ca->sum_rtt; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 320 | |
Jesper Dangaard Brouer | 8f363b7 | 2012-10-31 02:45:32 +0000 | [diff] [blame] | 321 | do_div(t, info.tcpv_rttcnt); |
| 322 | info.tcpv_rtt = t; |
| 323 | } |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 324 | nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info); |
| 325 | } |
| 326 | } |
| 327 | |
Stephen Hemminger | a252beb | 2011-03-10 00:40:17 -0800 | [diff] [blame] | 328 | static struct tcp_congestion_ops tcp_illinois __read_mostly = { |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 329 | .flags = TCP_CONG_RTT_STAMP, |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 330 | .init = tcp_illinois_init, |
| 331 | .ssthresh = tcp_illinois_ssthresh, |
| 332 | .min_cwnd = tcp_reno_min_cwnd, |
| 333 | .cong_avoid = tcp_illinois_cong_avoid, |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 334 | .set_state = tcp_illinois_state, |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 335 | .get_info = tcp_illinois_info, |
| 336 | .pkts_acked = tcp_illinois_acked, |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 337 | |
| 338 | .owner = THIS_MODULE, |
| 339 | .name = "illinois", |
| 340 | }; |
| 341 | |
| 342 | static int __init tcp_illinois_register(void) |
| 343 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 344 | BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 345 | return tcp_register_congestion_control(&tcp_illinois); |
| 346 | } |
| 347 | |
| 348 | static void __exit tcp_illinois_unregister(void) |
| 349 | { |
| 350 | tcp_unregister_congestion_control(&tcp_illinois); |
| 351 | } |
| 352 | |
| 353 | module_init(tcp_illinois_register); |
| 354 | module_exit(tcp_illinois_unregister); |
| 355 | |
| 356 | MODULE_AUTHOR("Stephen Hemminger, Shao Liu"); |
| 357 | MODULE_LICENSE("GPL"); |
| 358 | MODULE_DESCRIPTION("TCP Illinois"); |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 359 | MODULE_VERSION("1.0"); |