Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 1 | /* |
| 2 | * TCP Illinois congestion control. |
| 3 | * Home page: |
| 4 | * http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html |
| 5 | * |
| 6 | * The algorithm is described in: |
| 7 | * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm |
| 8 | * for High-Speed Networks" |
Justin P. Mattock | 631dd1a | 2010-10-18 11:03:14 +0200 | [diff] [blame] | 9 | * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 10 | * |
| 11 | * Implemented from description in paper and ns-2 simulation. |
| 12 | * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org> |
| 13 | */ |
| 14 | |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/skbuff.h> |
| 17 | #include <linux/inet_diag.h> |
| 18 | #include <asm/div64.h> |
| 19 | #include <net/tcp.h> |
| 20 | |
| 21 | #define ALPHA_SHIFT 7 |
| 22 | #define ALPHA_SCALE (1u<<ALPHA_SHIFT) |
| 23 | #define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */ |
| 24 | #define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */ |
| 25 | #define ALPHA_BASE ALPHA_SCALE /* 1.0 */ |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 26 | #define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */ |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 27 | |
| 28 | #define BETA_SHIFT 6 |
| 29 | #define BETA_SCALE (1u<<BETA_SHIFT) |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 30 | #define BETA_MIN (BETA_SCALE/8) /* 0.125 */ |
| 31 | #define BETA_MAX (BETA_SCALE/2) /* 0.5 */ |
| 32 | #define BETA_BASE BETA_MAX |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 33 | |
| 34 | static int win_thresh __read_mostly = 15; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 35 | module_param(win_thresh, int, 0); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 36 | MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing"); |
| 37 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 38 | static int theta __read_mostly = 5; |
| 39 | module_param(theta, int, 0); |
| 40 | MODULE_PARM_DESC(theta, "# of fast RTT's before full growth"); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 41 | |
| 42 | /* TCP Illinois Parameters */ |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 43 | struct illinois { |
| 44 | u64 sum_rtt; /* sum of rtt's measured within last rtt */ |
| 45 | u16 cnt_rtt; /* # of rtts measured within last rtt */ |
| 46 | u32 base_rtt; /* min of all rtt in usec */ |
| 47 | u32 max_rtt; /* max of all rtt in usec */ |
| 48 | u32 end_seq; /* right edge of current RTT */ |
| 49 | u32 alpha; /* Additive increase */ |
| 50 | u32 beta; /* Muliplicative decrease */ |
Florian Westphal | 85f7e75 | 2016-11-21 14:18:37 +0100 | [diff] [blame^] | 51 | u32 loss_cwnd; /* cwnd on loss */ |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 52 | u16 acked; /* # packets acked by current ACK */ |
| 53 | u8 rtt_above; /* average rtt has gone above threshold */ |
| 54 | u8 rtt_low; /* # of rtts measurements below threshold */ |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 55 | }; |
| 56 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 57 | static void rtt_reset(struct sock *sk) |
| 58 | { |
| 59 | struct tcp_sock *tp = tcp_sk(sk); |
| 60 | struct illinois *ca = inet_csk_ca(sk); |
| 61 | |
| 62 | ca->end_seq = tp->snd_nxt; |
| 63 | ca->cnt_rtt = 0; |
| 64 | ca->sum_rtt = 0; |
| 65 | |
| 66 | /* TODO: age max_rtt? */ |
| 67 | } |
| 68 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 69 | static void tcp_illinois_init(struct sock *sk) |
| 70 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 71 | struct illinois *ca = inet_csk_ca(sk); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 72 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 73 | ca->alpha = ALPHA_MAX; |
| 74 | ca->beta = BETA_BASE; |
| 75 | ca->base_rtt = 0x7fffffff; |
| 76 | ca->max_rtt = 0; |
| 77 | |
| 78 | ca->acked = 0; |
| 79 | ca->rtt_low = 0; |
| 80 | ca->rtt_above = 0; |
| 81 | |
| 82 | rtt_reset(sk); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 83 | } |
| 84 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 85 | /* Measure RTT for each ack. */ |
Lawrence Brakmo | 756ee17 | 2016-05-11 10:02:13 -0700 | [diff] [blame] | 86 | static void tcp_illinois_acked(struct sock *sk, const struct ack_sample *sample) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 87 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 88 | struct illinois *ca = inet_csk_ca(sk); |
Lawrence Brakmo | 756ee17 | 2016-05-11 10:02:13 -0700 | [diff] [blame] | 89 | s32 rtt_us = sample->rtt_us; |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 90 | |
Lawrence Brakmo | 756ee17 | 2016-05-11 10:02:13 -0700 | [diff] [blame] | 91 | ca->acked = sample->pkts_acked; |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 92 | |
Stephen Hemminger | 30cfd0b | 2007-07-25 23:49:34 -0700 | [diff] [blame] | 93 | /* dup ack, no rtt sample */ |
Lawrence Brakmo | 756ee17 | 2016-05-11 10:02:13 -0700 | [diff] [blame] | 94 | if (rtt_us < 0) |
Ilpo Järvinen | b9ce204 | 2007-06-15 15:08:43 -0700 | [diff] [blame] | 95 | return; |
| 96 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 97 | /* ignore bogus values, this prevents wraparound in alpha math */ |
Lawrence Brakmo | 756ee17 | 2016-05-11 10:02:13 -0700 | [diff] [blame] | 98 | if (rtt_us > RTT_MAX) |
| 99 | rtt_us = RTT_MAX; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 100 | |
| 101 | /* keep track of minimum RTT seen so far */ |
Lawrence Brakmo | 756ee17 | 2016-05-11 10:02:13 -0700 | [diff] [blame] | 102 | if (ca->base_rtt > rtt_us) |
| 103 | ca->base_rtt = rtt_us; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 104 | |
| 105 | /* and max */ |
Lawrence Brakmo | 756ee17 | 2016-05-11 10:02:13 -0700 | [diff] [blame] | 106 | if (ca->max_rtt < rtt_us) |
| 107 | ca->max_rtt = rtt_us; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 108 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 109 | ++ca->cnt_rtt; |
Lawrence Brakmo | 756ee17 | 2016-05-11 10:02:13 -0700 | [diff] [blame] | 110 | ca->sum_rtt += rtt_us; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 111 | } |
| 112 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 113 | /* Maximum queuing delay */ |
| 114 | static inline u32 max_delay(const struct illinois *ca) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 115 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 116 | return ca->max_rtt - ca->base_rtt; |
| 117 | } |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 118 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 119 | /* Average queuing delay */ |
| 120 | static inline u32 avg_delay(const struct illinois *ca) |
| 121 | { |
| 122 | u64 t = ca->sum_rtt; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 123 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 124 | do_div(t, ca->cnt_rtt); |
| 125 | return t - ca->base_rtt; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | /* |
| 129 | * Compute value of alpha used for additive increase. |
| 130 | * If small window then use 1.0, equivalent to Reno. |
| 131 | * |
| 132 | * For larger windows, adjust based on average delay. |
| 133 | * A. If average delay is at minimum (we are uncongested), |
| 134 | * then use large alpha (10.0) to increase faster. |
| 135 | * B. If average delay is at maximum (getting congested) |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 136 | * then use small alpha (0.3) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 137 | * |
| 138 | * The result is a convex window growth curve. |
| 139 | */ |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 140 | static u32 alpha(struct illinois *ca, u32 da, u32 dm) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 141 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 142 | u32 d1 = dm / 100; /* Low threshold */ |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 143 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 144 | if (da <= d1) { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 145 | /* If never got out of low delay zone, then use max */ |
| 146 | if (!ca->rtt_above) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 147 | return ALPHA_MAX; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 148 | |
| 149 | /* Wait for 5 good RTT's before allowing alpha to go alpha max. |
| 150 | * This prevents one good RTT from causing sudden window increase. |
| 151 | */ |
| 152 | if (++ca->rtt_low < theta) |
| 153 | return ca->alpha; |
| 154 | |
| 155 | ca->rtt_low = 0; |
| 156 | ca->rtt_above = 0; |
| 157 | return ALPHA_MAX; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 158 | } |
| 159 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 160 | ca->rtt_above = 1; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 161 | |
| 162 | /* |
| 163 | * Based on: |
| 164 | * |
| 165 | * (dm - d1) amin amax |
| 166 | * k1 = ------------------- |
| 167 | * amax - amin |
| 168 | * |
| 169 | * (dm - d1) amin |
| 170 | * k2 = ---------------- - d1 |
| 171 | * amax - amin |
| 172 | * |
| 173 | * k1 |
| 174 | * alpha = ---------- |
| 175 | * k2 + da |
| 176 | */ |
| 177 | |
| 178 | dm -= d1; |
| 179 | da -= d1; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 180 | return (dm * ALPHA_MAX) / |
| 181 | (dm + (da * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 182 | } |
| 183 | |
| 184 | /* |
| 185 | * Beta used for multiplicative decrease. |
| 186 | * For small window sizes returns same value as Reno (0.5) |
| 187 | * |
| 188 | * If delay is small (10% of max) then beta = 1/8 |
| 189 | * If delay is up to 80% of max then beta = 1/2 |
| 190 | * In between is a linear function |
| 191 | */ |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 192 | static u32 beta(u32 da, u32 dm) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 193 | { |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 194 | u32 d2, d3; |
| 195 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 196 | d2 = dm / 10; |
| 197 | if (da <= d2) |
| 198 | return BETA_MIN; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 199 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 200 | d3 = (8 * dm) / 10; |
| 201 | if (da >= d3 || d3 <= d2) |
| 202 | return BETA_MAX; |
| 203 | |
| 204 | /* |
| 205 | * Based on: |
| 206 | * |
| 207 | * bmin d3 - bmax d2 |
| 208 | * k3 = ------------------- |
| 209 | * d3 - d2 |
| 210 | * |
| 211 | * bmax - bmin |
| 212 | * k4 = ------------- |
| 213 | * d3 - d2 |
| 214 | * |
| 215 | * b = k3 + k4 da |
| 216 | */ |
| 217 | return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da) |
| 218 | / (d3 - d2); |
| 219 | } |
| 220 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 221 | /* Update alpha and beta values once per RTT */ |
| 222 | static void update_params(struct sock *sk) |
| 223 | { |
| 224 | struct tcp_sock *tp = tcp_sk(sk); |
| 225 | struct illinois *ca = inet_csk_ca(sk); |
| 226 | |
| 227 | if (tp->snd_cwnd < win_thresh) { |
| 228 | ca->alpha = ALPHA_BASE; |
| 229 | ca->beta = BETA_BASE; |
| 230 | } else if (ca->cnt_rtt > 0) { |
| 231 | u32 dm = max_delay(ca); |
| 232 | u32 da = avg_delay(ca); |
| 233 | |
| 234 | ca->alpha = alpha(ca, da, dm); |
| 235 | ca->beta = beta(da, dm); |
| 236 | } |
| 237 | |
| 238 | rtt_reset(sk); |
| 239 | } |
| 240 | |
| 241 | /* |
| 242 | * In case of loss, reset to default values |
| 243 | */ |
| 244 | static void tcp_illinois_state(struct sock *sk, u8 new_state) |
| 245 | { |
| 246 | struct illinois *ca = inet_csk_ca(sk); |
| 247 | |
| 248 | if (new_state == TCP_CA_Loss) { |
| 249 | ca->alpha = ALPHA_BASE; |
| 250 | ca->beta = BETA_BASE; |
| 251 | ca->rtt_low = 0; |
| 252 | ca->rtt_above = 0; |
| 253 | rtt_reset(sk); |
| 254 | } |
| 255 | } |
| 256 | |
| 257 | /* |
| 258 | * Increase window in response to successful acknowledgment. |
| 259 | */ |
Eric Dumazet | 2490155 | 2014-05-02 21:18:05 -0700 | [diff] [blame] | 260 | static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked) |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 261 | { |
| 262 | struct tcp_sock *tp = tcp_sk(sk); |
| 263 | struct illinois *ca = inet_csk_ca(sk); |
| 264 | |
| 265 | if (after(ack, ca->end_seq)) |
| 266 | update_params(sk); |
| 267 | |
| 268 | /* RFC2861 only increase cwnd if fully utilized */ |
Eric Dumazet | 2490155 | 2014-05-02 21:18:05 -0700 | [diff] [blame] | 269 | if (!tcp_is_cwnd_limited(sk)) |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 270 | return; |
| 271 | |
| 272 | /* In slow start */ |
Yuchung Cheng | 071d508 | 2015-07-09 13:16:29 -0700 | [diff] [blame] | 273 | if (tcp_in_slow_start(tp)) |
Yuchung Cheng | 9f9843a7 | 2013-10-31 11:07:31 -0700 | [diff] [blame] | 274 | tcp_slow_start(tp, acked); |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 275 | |
| 276 | else { |
| 277 | u32 delta; |
| 278 | |
| 279 | /* snd_cwnd_cnt is # of packets since last cwnd increment */ |
| 280 | tp->snd_cwnd_cnt += ca->acked; |
| 281 | ca->acked = 1; |
| 282 | |
| 283 | /* This is close approximation of: |
| 284 | * tp->snd_cwnd += alpha/tp->snd_cwnd |
| 285 | */ |
| 286 | delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT; |
| 287 | if (delta >= tp->snd_cwnd) { |
| 288 | tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd, |
stephen hemminger | 688d194 | 2014-08-29 23:32:05 -0700 | [diff] [blame] | 289 | (u32)tp->snd_cwnd_clamp); |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 290 | tp->snd_cwnd_cnt = 0; |
| 291 | } |
| 292 | } |
| 293 | } |
| 294 | |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 295 | static u32 tcp_illinois_ssthresh(struct sock *sk) |
| 296 | { |
| 297 | struct tcp_sock *tp = tcp_sk(sk); |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 298 | struct illinois *ca = inet_csk_ca(sk); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 299 | |
Florian Westphal | 85f7e75 | 2016-11-21 14:18:37 +0100 | [diff] [blame^] | 300 | ca->loss_cwnd = tp->snd_cwnd; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 301 | /* Multiplicative decrease */ |
Stephen Hemminger | a357dde | 2007-11-30 01:10:55 +1100 | [diff] [blame] | 302 | return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 303 | } |
| 304 | |
Florian Westphal | 85f7e75 | 2016-11-21 14:18:37 +0100 | [diff] [blame^] | 305 | static u32 tcp_illinois_cwnd_undo(struct sock *sk) |
| 306 | { |
| 307 | const struct illinois *ca = inet_csk_ca(sk); |
| 308 | |
| 309 | return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); |
| 310 | } |
| 311 | |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 312 | /* Extract info for Tcp socket info provided via netlink. */ |
Eric Dumazet | 64f40ff | 2015-04-28 16:23:48 -0700 | [diff] [blame] | 313 | static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr, |
| 314 | union tcp_cc_info *info) |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 315 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 316 | const struct illinois *ca = inet_csk_ca(sk); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 317 | |
| 318 | if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { |
Eric Dumazet | 64f40ff | 2015-04-28 16:23:48 -0700 | [diff] [blame] | 319 | info->vegas.tcpv_enabled = 1; |
| 320 | info->vegas.tcpv_rttcnt = ca->cnt_rtt; |
| 321 | info->vegas.tcpv_minrtt = ca->base_rtt; |
| 322 | info->vegas.tcpv_rtt = 0; |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 323 | |
Eric Dumazet | 64f40ff | 2015-04-28 16:23:48 -0700 | [diff] [blame] | 324 | if (info->vegas.tcpv_rttcnt > 0) { |
Jesper Dangaard Brouer | 8f363b7 | 2012-10-31 02:45:32 +0000 | [diff] [blame] | 325 | u64 t = ca->sum_rtt; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 326 | |
Eric Dumazet | 64f40ff | 2015-04-28 16:23:48 -0700 | [diff] [blame] | 327 | do_div(t, info->vegas.tcpv_rttcnt); |
| 328 | info->vegas.tcpv_rtt = t; |
Jesper Dangaard Brouer | 8f363b7 | 2012-10-31 02:45:32 +0000 | [diff] [blame] | 329 | } |
Eric Dumazet | 64f40ff | 2015-04-28 16:23:48 -0700 | [diff] [blame] | 330 | *attr = INET_DIAG_VEGASINFO; |
| 331 | return sizeof(struct tcpvegas_info); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 332 | } |
Eric Dumazet | 521f1cf | 2015-04-16 18:10:35 -0700 | [diff] [blame] | 333 | return 0; |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 334 | } |
| 335 | |
Stephen Hemminger | a252beb | 2011-03-10 00:40:17 -0800 | [diff] [blame] | 336 | static struct tcp_congestion_ops tcp_illinois __read_mostly = { |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 337 | .init = tcp_illinois_init, |
| 338 | .ssthresh = tcp_illinois_ssthresh, |
Florian Westphal | 85f7e75 | 2016-11-21 14:18:37 +0100 | [diff] [blame^] | 339 | .undo_cwnd = tcp_illinois_cwnd_undo, |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 340 | .cong_avoid = tcp_illinois_cong_avoid, |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 341 | .set_state = tcp_illinois_state, |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 342 | .get_info = tcp_illinois_info, |
| 343 | .pkts_acked = tcp_illinois_acked, |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 344 | |
| 345 | .owner = THIS_MODULE, |
| 346 | .name = "illinois", |
| 347 | }; |
| 348 | |
| 349 | static int __init tcp_illinois_register(void) |
| 350 | { |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 351 | BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE); |
Stephen Hemminger | c462238 | 2007-04-20 17:07:51 -0700 | [diff] [blame] | 352 | return tcp_register_congestion_control(&tcp_illinois); |
| 353 | } |
| 354 | |
| 355 | static void __exit tcp_illinois_unregister(void) |
| 356 | { |
| 357 | tcp_unregister_congestion_control(&tcp_illinois); |
| 358 | } |
| 359 | |
| 360 | module_init(tcp_illinois_register); |
| 361 | module_exit(tcp_illinois_unregister); |
| 362 | |
| 363 | MODULE_AUTHOR("Stephen Hemminger, Shao Liu"); |
| 364 | MODULE_LICENSE("GPL"); |
| 365 | MODULE_DESCRIPTION("TCP Illinois"); |
Stephen Hemminger | 65d1b4a | 2007-04-23 22:24:32 -0700 | [diff] [blame] | 366 | MODULE_VERSION("1.0"); |