blob: 99779ae44f6479c0aa598576fae4edf8c03f2835 [file] [log] [blame]
David S. Miller51c5d0c2012-07-10 00:49:14 -07001#include <linux/rcupdate.h>
2#include <linux/spinlock.h>
3#include <linux/jiffies.h>
4#include <linux/bootmem.h>
David S. Millerab92bb22012-07-09 16:19:30 -07005#include <linux/module.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -07006#include <linux/cache.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -07007#include <linux/slab.h>
8#include <linux/init.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -07009#include <linux/tcp.h>
10
11#include <net/inet_connection_sock.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -070012#include <net/net_namespace.h>
David S. Millerab92bb22012-07-09 16:19:30 -070013#include <net/request_sock.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -070014#include <net/inetpeer.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070015#include <net/sock.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -070016#include <net/ipv6.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070017#include <net/dst.h>
18#include <net/tcp.h>
19
20int sysctl_tcp_nometrics_save __read_mostly;
21
David S. Miller51c5d0c2012-07-10 00:49:14 -070022enum tcp_metric_index {
23 TCP_METRIC_RTT,
24 TCP_METRIC_RTTVAR,
25 TCP_METRIC_SSTHRESH,
26 TCP_METRIC_CWND,
27 TCP_METRIC_REORDERING,
28
29 /* Always last. */
30 TCP_METRIC_MAX,
31};
32
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000033struct tcp_fastopen_metrics {
34 u16 mss;
Yuchung Chengaab48742012-07-19 06:43:10 +000035 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
36 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000037 struct tcp_fastopen_cookie cookie;
38};
39
David S. Miller51c5d0c2012-07-10 00:49:14 -070040struct tcp_metrics_block {
41 struct tcp_metrics_block __rcu *tcpm_next;
42 struct inetpeer_addr tcpm_addr;
43 unsigned long tcpm_stamp;
David S. Miller81166dd2012-07-10 03:14:24 -070044 u32 tcpm_ts;
45 u32 tcpm_ts_stamp;
David S. Miller51c5d0c2012-07-10 00:49:14 -070046 u32 tcpm_lock;
47 u32 tcpm_vals[TCP_METRIC_MAX];
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000048 struct tcp_fastopen_metrics tcpm_fastopen;
David S. Miller51c5d0c2012-07-10 00:49:14 -070049};
50
51static bool tcp_metric_locked(struct tcp_metrics_block *tm,
52 enum tcp_metric_index idx)
53{
54 return tm->tcpm_lock & (1 << idx);
55}
56
57static u32 tcp_metric_get(struct tcp_metrics_block *tm,
58 enum tcp_metric_index idx)
59{
60 return tm->tcpm_vals[idx];
61}
62
63static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
64 enum tcp_metric_index idx)
65{
66 return msecs_to_jiffies(tm->tcpm_vals[idx]);
67}
68
69static void tcp_metric_set(struct tcp_metrics_block *tm,
70 enum tcp_metric_index idx,
71 u32 val)
72{
73 tm->tcpm_vals[idx] = val;
74}
75
76static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
77 enum tcp_metric_index idx,
78 u32 val)
79{
80 tm->tcpm_vals[idx] = jiffies_to_msecs(val);
81}
82
83static bool addr_same(const struct inetpeer_addr *a,
84 const struct inetpeer_addr *b)
85{
86 const struct in6_addr *a6, *b6;
87
88 if (a->family != b->family)
89 return false;
90 if (a->family == AF_INET)
91 return a->addr.a4 == b->addr.a4;
92
93 a6 = (const struct in6_addr *) &a->addr.a6[0];
94 b6 = (const struct in6_addr *) &b->addr.a6[0];
95
96 return ipv6_addr_equal(a6, b6);
97}
98
99struct tcpm_hash_bucket {
100 struct tcp_metrics_block __rcu *chain;
101};
102
103static DEFINE_SPINLOCK(tcp_metrics_lock);
104
105static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
106{
107 u32 val;
108
109 val = 0;
110 if (dst_metric_locked(dst, RTAX_RTT))
111 val |= 1 << TCP_METRIC_RTT;
112 if (dst_metric_locked(dst, RTAX_RTTVAR))
113 val |= 1 << TCP_METRIC_RTTVAR;
114 if (dst_metric_locked(dst, RTAX_SSTHRESH))
115 val |= 1 << TCP_METRIC_SSTHRESH;
116 if (dst_metric_locked(dst, RTAX_CWND))
117 val |= 1 << TCP_METRIC_CWND;
118 if (dst_metric_locked(dst, RTAX_REORDERING))
119 val |= 1 << TCP_METRIC_REORDERING;
120 tm->tcpm_lock = val;
121
122 tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
123 tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
124 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
125 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
126 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
David S. Miller81166dd2012-07-10 03:14:24 -0700127 tm->tcpm_ts = 0;
128 tm->tcpm_ts_stamp = 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000129 tm->tcpm_fastopen.mss = 0;
Yuchung Chengaab48742012-07-19 06:43:10 +0000130 tm->tcpm_fastopen.syn_loss = 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000131 tm->tcpm_fastopen.cookie.len = 0;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700132}
133
134static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
135 struct inetpeer_addr *addr,
136 unsigned int hash,
137 bool reclaim)
138{
139 struct tcp_metrics_block *tm;
140 struct net *net;
141
142 spin_lock_bh(&tcp_metrics_lock);
143 net = dev_net(dst->dev);
144 if (unlikely(reclaim)) {
145 struct tcp_metrics_block *oldest;
146
147 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
148 for (tm = rcu_dereference(oldest->tcpm_next); tm;
149 tm = rcu_dereference(tm->tcpm_next)) {
150 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
151 oldest = tm;
152 }
153 tm = oldest;
154 } else {
155 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
156 if (!tm)
157 goto out_unlock;
158 }
159 tm->tcpm_addr = *addr;
160 tm->tcpm_stamp = jiffies;
161
162 tcpm_suck_dst(tm, dst);
163
164 if (likely(!reclaim)) {
165 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
166 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
167 }
168
169out_unlock:
170 spin_unlock_bh(&tcp_metrics_lock);
171 return tm;
172}
173
174#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
175
176static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
177{
178 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
179 tcpm_suck_dst(tm, dst);
180}
181
182#define TCP_METRICS_RECLAIM_DEPTH 5
183#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
184
185static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
186{
187 if (tm)
188 return tm;
189 if (depth > TCP_METRICS_RECLAIM_DEPTH)
190 return TCP_METRICS_RECLAIM_PTR;
191 return NULL;
192}
193
194static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
195 struct net *net, unsigned int hash)
196{
197 struct tcp_metrics_block *tm;
198 int depth = 0;
199
200 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
201 tm = rcu_dereference(tm->tcpm_next)) {
202 if (addr_same(&tm->tcpm_addr, addr))
203 break;
204 depth++;
205 }
206 return tcp_get_encode(tm, depth);
207}
208
209static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
210 struct dst_entry *dst)
211{
212 struct tcp_metrics_block *tm;
213 struct inetpeer_addr addr;
214 unsigned int hash;
215 struct net *net;
216
217 addr.family = req->rsk_ops->family;
218 switch (addr.family) {
219 case AF_INET:
220 addr.addr.a4 = inet_rsk(req)->rmt_addr;
221 hash = (__force unsigned int) addr.addr.a4;
222 break;
223 case AF_INET6:
224 *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
Eric Dumazetddbe5032012-07-18 08:11:12 +0000225 hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700226 break;
227 default:
228 return NULL;
229 }
230
231 hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8);
232
233 net = dev_net(dst->dev);
234 hash &= net->ipv4.tcp_metrics_hash_mask;
235
236 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
237 tm = rcu_dereference(tm->tcpm_next)) {
238 if (addr_same(&tm->tcpm_addr, &addr))
239 break;
240 }
241 tcpm_check_stamp(tm, dst);
242 return tm;
243}
244
David S. Miller81166dd2012-07-10 03:14:24 -0700245static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
246{
247 struct inet6_timewait_sock *tw6;
248 struct tcp_metrics_block *tm;
249 struct inetpeer_addr addr;
250 unsigned int hash;
251 struct net *net;
252
253 addr.family = tw->tw_family;
254 switch (addr.family) {
255 case AF_INET:
256 addr.addr.a4 = tw->tw_daddr;
257 hash = (__force unsigned int) addr.addr.a4;
258 break;
259 case AF_INET6:
260 tw6 = inet6_twsk((struct sock *)tw);
261 *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
Eric Dumazetddbe5032012-07-18 08:11:12 +0000262 hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
David S. Miller81166dd2012-07-10 03:14:24 -0700263 break;
264 default:
265 return NULL;
266 }
267
268 hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8);
269
270 net = twsk_net(tw);
271 hash &= net->ipv4.tcp_metrics_hash_mask;
272
273 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
274 tm = rcu_dereference(tm->tcpm_next)) {
275 if (addr_same(&tm->tcpm_addr, &addr))
276 break;
277 }
278 return tm;
279}
280
David S. Miller51c5d0c2012-07-10 00:49:14 -0700281static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
282 struct dst_entry *dst,
283 bool create)
284{
285 struct tcp_metrics_block *tm;
286 struct inetpeer_addr addr;
287 unsigned int hash;
288 struct net *net;
289 bool reclaim;
290
291 addr.family = sk->sk_family;
292 switch (addr.family) {
293 case AF_INET:
294 addr.addr.a4 = inet_sk(sk)->inet_daddr;
295 hash = (__force unsigned int) addr.addr.a4;
296 break;
297 case AF_INET6:
298 *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
Eric Dumazetddbe5032012-07-18 08:11:12 +0000299 hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700300 break;
301 default:
302 return NULL;
303 }
304
305 hash ^= (hash >> 24) ^ (hash >> 16) ^ (hash >> 8);
306
307 net = dev_net(dst->dev);
308 hash &= net->ipv4.tcp_metrics_hash_mask;
309
310 tm = __tcp_get_metrics(&addr, net, hash);
311 reclaim = false;
312 if (tm == TCP_METRICS_RECLAIM_PTR) {
313 reclaim = true;
314 tm = NULL;
315 }
316 if (!tm && create)
317 tm = tcpm_new(dst, &addr, hash, reclaim);
318 else
319 tcpm_check_stamp(tm, dst);
320
321 return tm;
322}
323
David S. Miller4aabd8e2012-07-09 16:07:30 -0700324/* Save metrics learned by this TCP session. This function is called
325 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
326 * or goes from LAST-ACK to CLOSE.
327 */
328void tcp_update_metrics(struct sock *sk)
329{
David S. Miller51c5d0c2012-07-10 00:49:14 -0700330 const struct inet_connection_sock *icsk = inet_csk(sk);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700331 struct dst_entry *dst = __sk_dst_get(sk);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700332 struct tcp_sock *tp = tcp_sk(sk);
333 struct tcp_metrics_block *tm;
334 unsigned long rtt;
335 u32 val;
336 int m;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700337
David S. Miller51c5d0c2012-07-10 00:49:14 -0700338 if (sysctl_tcp_nometrics_save || !dst)
David S. Miller4aabd8e2012-07-09 16:07:30 -0700339 return;
340
David S. Miller51c5d0c2012-07-10 00:49:14 -0700341 if (dst->flags & DST_HOST)
David S. Miller4aabd8e2012-07-09 16:07:30 -0700342 dst_confirm(dst);
343
David S. Miller51c5d0c2012-07-10 00:49:14 -0700344 rcu_read_lock();
345 if (icsk->icsk_backoff || !tp->srtt) {
346 /* This session failed to estimate rtt. Why?
347 * Probably, no packets returned in time. Reset our
348 * results.
David S. Miller4aabd8e2012-07-09 16:07:30 -0700349 */
David S. Miller51c5d0c2012-07-10 00:49:14 -0700350 tm = tcp_get_metrics(sk, dst, false);
351 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
352 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
353 goto out_unlock;
354 } else
355 tm = tcp_get_metrics(sk, dst, true);
356
357 if (!tm)
358 goto out_unlock;
359
360 rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
361 m = rtt - tp->srtt;
362
363 /* If newly calculated rtt larger than stored one, store new
364 * one. Otherwise, use EWMA. Remember, rtt overestimation is
365 * always better than underestimation.
366 */
367 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
368 if (m <= 0)
369 rtt = tp->srtt;
370 else
371 rtt -= (m >> 3);
372 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
373 }
374
375 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
376 unsigned long var;
377
378 if (m < 0)
379 m = -m;
380
381 /* Scale deviation to rttvar fixed point */
382 m >>= 1;
383 if (m < tp->mdev)
384 m = tp->mdev;
385
386 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
387 if (m >= var)
388 var = m;
389 else
390 var -= (var - m) >> 2;
391
392 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
393 }
394
395 if (tcp_in_initial_slowstart(tp)) {
396 /* Slow start still did not finish. */
397 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
398 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
399 if (val && (tp->snd_cwnd >> 1) > val)
400 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
401 tp->snd_cwnd >> 1);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700402 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700403 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
404 val = tcp_metric_get(tm, TCP_METRIC_CWND);
405 if (tp->snd_cwnd > val)
406 tcp_metric_set(tm, TCP_METRIC_CWND,
407 tp->snd_cwnd);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700408 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700409 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
410 icsk->icsk_ca_state == TCP_CA_Open) {
411 /* Cong. avoidance phase, cwnd is reliable. */
412 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
413 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
414 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
415 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
416 val = tcp_metric_get(tm, TCP_METRIC_CWND);
Alexander Duyck21008442012-07-11 17:18:04 -0700417 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700418 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700419 } else {
420 /* Else slow start did not finish, cwnd is non-sense,
421 * ssthresh may be also invalid.
422 */
423 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
424 val = tcp_metric_get(tm, TCP_METRIC_CWND);
425 tcp_metric_set(tm, TCP_METRIC_CWND,
426 (val + tp->snd_ssthresh) >> 1);
427 }
428 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
429 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
430 if (val && tp->snd_ssthresh > val)
431 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
432 tp->snd_ssthresh);
433 }
434 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
435 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
436 if (val < tp->reordering &&
David S. Miller4aabd8e2012-07-09 16:07:30 -0700437 tp->reordering != sysctl_tcp_reordering)
David S. Miller51c5d0c2012-07-10 00:49:14 -0700438 tcp_metric_set(tm, TCP_METRIC_REORDERING,
439 tp->reordering);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700440 }
441 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700442 tm->tcpm_stamp = jiffies;
443out_unlock:
444 rcu_read_unlock();
David S. Miller4aabd8e2012-07-09 16:07:30 -0700445}
446
447/* Initialize metrics on socket. */
448
449void tcp_init_metrics(struct sock *sk)
450{
David S. Miller4aabd8e2012-07-09 16:07:30 -0700451 struct dst_entry *dst = __sk_dst_get(sk);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700452 struct tcp_sock *tp = tcp_sk(sk);
453 struct tcp_metrics_block *tm;
454 u32 val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700455
456 if (dst == NULL)
457 goto reset;
458
459 dst_confirm(dst);
460
David S. Miller51c5d0c2012-07-10 00:49:14 -0700461 rcu_read_lock();
462 tm = tcp_get_metrics(sk, dst, true);
463 if (!tm) {
464 rcu_read_unlock();
465 goto reset;
466 }
467
468 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
469 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
470
471 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
472 if (val) {
473 tp->snd_ssthresh = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700474 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
475 tp->snd_ssthresh = tp->snd_cwnd_clamp;
476 } else {
477 /* ssthresh may have been reduced unnecessarily during.
478 * 3WHS. Restore it back to its initial default.
479 */
480 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
481 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700482 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
483 if (val && tp->reordering != val) {
David S. Miller4aabd8e2012-07-09 16:07:30 -0700484 tcp_disable_fack(tp);
485 tcp_disable_early_retrans(tp);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700486 tp->reordering = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700487 }
488
David S. Miller51c5d0c2012-07-10 00:49:14 -0700489 val = tcp_metric_get(tm, TCP_METRIC_RTT);
490 if (val == 0 || tp->srtt == 0) {
491 rcu_read_unlock();
David S. Miller4aabd8e2012-07-09 16:07:30 -0700492 goto reset;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700493 }
David S. Miller4aabd8e2012-07-09 16:07:30 -0700494 /* Initial rtt is determined from SYN,SYN-ACK.
495 * The segment is small and rtt may appear much
496 * less than real one. Use per-dst memory
497 * to make it more realistic.
498 *
499 * A bit of theory. RTT is time passed after "normal" sized packet
500 * is sent until it is ACKed. In normal circumstances sending small
501 * packets force peer to delay ACKs and calculation is correct too.
502 * The algorithm is adaptive and, provided we follow specs, it
503 * NEVER underestimate RTT. BUT! If peer tries to make some clever
504 * tricks sort of "quick acks" for time long enough to decrease RTT
505 * to low value, and then abruptly stops to do it and starts to delay
506 * ACKs, wait for troubles.
507 */
David S. Miller51c5d0c2012-07-10 00:49:14 -0700508 val = msecs_to_jiffies(val);
509 if (val > tp->srtt) {
510 tp->srtt = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700511 tp->rtt_seq = tp->snd_nxt;
512 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700513 val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
514 if (val > tp->mdev) {
515 tp->mdev = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700516 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
517 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700518 rcu_read_unlock();
519
David S. Miller4aabd8e2012-07-09 16:07:30 -0700520 tcp_set_rto(sk);
521reset:
522 if (tp->srtt == 0) {
523 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
524 * 3WHS. This is most likely due to retransmission,
525 * including spurious one. Reset the RTO back to 3secs
526 * from the more aggressive 1sec to avoid more spurious
527 * retransmission.
528 */
529 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
530 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
531 }
532 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
533 * retransmitted. In light of RFC6298 more aggressive 1sec
534 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
535 * retransmission has occurred.
536 */
537 if (tp->total_retrans > 1)
538 tp->snd_cwnd = 1;
539 else
540 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
541 tp->snd_cwnd_stamp = tcp_time_stamp;
542}
David S. Millerab92bb22012-07-09 16:19:30 -0700543
David S. Miller81166dd2012-07-10 03:14:24 -0700544bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
David S. Millerab92bb22012-07-09 16:19:30 -0700545{
David S. Miller51c5d0c2012-07-10 00:49:14 -0700546 struct tcp_metrics_block *tm;
547 bool ret;
548
David S. Millerab92bb22012-07-09 16:19:30 -0700549 if (!dst)
550 return false;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700551
552 rcu_read_lock();
553 tm = __tcp_get_metrics_req(req, dst);
David S. Miller81166dd2012-07-10 03:14:24 -0700554 if (paws_check) {
555 if (tm &&
556 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
557 (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
558 ret = false;
559 else
560 ret = true;
561 } else {
562 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
563 ret = true;
564 else
565 ret = false;
566 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700567 rcu_read_unlock();
568
569 return ret;
David S. Millerab92bb22012-07-09 16:19:30 -0700570}
571EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700572
David S. Miller81166dd2012-07-10 03:14:24 -0700573void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
574{
575 struct tcp_metrics_block *tm;
576
577 rcu_read_lock();
578 tm = tcp_get_metrics(sk, dst, true);
579 if (tm) {
580 struct tcp_sock *tp = tcp_sk(sk);
581
582 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
583 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
584 tp->rx_opt.ts_recent = tm->tcpm_ts;
585 }
586 }
587 rcu_read_unlock();
588}
589EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
590
591/* VJ's idea. Save last timestamp seen from this destination and hold
592 * it at least for normal timewait interval to use for duplicate
593 * segment detection in subsequent connections, before they enter
594 * synchronized state.
595 */
596bool tcp_remember_stamp(struct sock *sk)
597{
598 struct dst_entry *dst = __sk_dst_get(sk);
599 bool ret = false;
600
601 if (dst) {
602 struct tcp_metrics_block *tm;
603
604 rcu_read_lock();
605 tm = tcp_get_metrics(sk, dst, true);
606 if (tm) {
607 struct tcp_sock *tp = tcp_sk(sk);
608
609 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
610 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
611 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
612 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
613 tm->tcpm_ts = tp->rx_opt.ts_recent;
614 }
615 ret = true;
616 }
617 rcu_read_unlock();
618 }
619 return ret;
620}
621
622bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
623{
624 struct tcp_metrics_block *tm;
625 bool ret = false;
626
627 rcu_read_lock();
628 tm = __tcp_get_metrics_tw(tw);
629 if (tw) {
630 const struct tcp_timewait_sock *tcptw;
631 struct sock *sk = (struct sock *) tw;
632
633 tcptw = tcp_twsk(sk);
634 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
635 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
636 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
637 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
638 tm->tcpm_ts = tcptw->tw_ts_recent;
639 }
640 ret = true;
641 }
642 rcu_read_unlock();
643
644 return ret;
645}
646
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000647static DEFINE_SEQLOCK(fastopen_seqlock);
648
649void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
Yuchung Chengaab48742012-07-19 06:43:10 +0000650 struct tcp_fastopen_cookie *cookie,
651 int *syn_loss, unsigned long *last_syn_loss)
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000652{
653 struct tcp_metrics_block *tm;
654
655 rcu_read_lock();
656 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
657 if (tm) {
658 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
659 unsigned int seq;
660
661 do {
662 seq = read_seqbegin(&fastopen_seqlock);
663 if (tfom->mss)
664 *mss = tfom->mss;
665 *cookie = tfom->cookie;
Yuchung Chengaab48742012-07-19 06:43:10 +0000666 *syn_loss = tfom->syn_loss;
667 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000668 } while (read_seqretry(&fastopen_seqlock, seq));
669 }
670 rcu_read_unlock();
671}
672
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000673void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
Yuchung Chengaab48742012-07-19 06:43:10 +0000674 struct tcp_fastopen_cookie *cookie, bool syn_lost)
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000675{
676 struct tcp_metrics_block *tm;
677
678 rcu_read_lock();
679 tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
680 if (tm) {
681 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
682
683 write_seqlock_bh(&fastopen_seqlock);
684 tfom->mss = mss;
685 if (cookie->len > 0)
686 tfom->cookie = *cookie;
Yuchung Chengaab48742012-07-19 06:43:10 +0000687 if (syn_lost) {
688 ++tfom->syn_loss;
689 tfom->last_syn_loss = jiffies;
690 } else
691 tfom->syn_loss = 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000692 write_sequnlock_bh(&fastopen_seqlock);
693 }
694 rcu_read_unlock();
695}
696
David S. Miller51c5d0c2012-07-10 00:49:14 -0700697static unsigned long tcpmhash_entries;
698static int __init set_tcpmhash_entries(char *str)
699{
700 ssize_t ret;
701
702 if (!str)
703 return 0;
704
705 ret = kstrtoul(str, 0, &tcpmhash_entries);
706 if (ret)
707 return 0;
708
709 return 1;
710}
711__setup("tcpmhash_entries=", set_tcpmhash_entries);
712
713static int __net_init tcp_net_metrics_init(struct net *net)
714{
715 int slots, size;
716
717 slots = tcpmhash_entries;
718 if (!slots) {
719 if (totalram_pages >= 128 * 1024)
720 slots = 16 * 1024;
721 else
722 slots = 8 * 1024;
723 }
724
725 size = slots * sizeof(struct tcpm_hash_bucket);
726
727 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
728 if (!net->ipv4.tcp_metrics_hash)
729 return -ENOMEM;
730
731 net->ipv4.tcp_metrics_hash_mask = (slots - 1);
732
733 return 0;
734}
735
736static void __net_exit tcp_net_metrics_exit(struct net *net)
737{
738 kfree(net->ipv4.tcp_metrics_hash);
739}
740
741static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
742 .init = tcp_net_metrics_init,
743 .exit = tcp_net_metrics_exit,
744};
745
746void __init tcp_metrics_init(void)
747{
748 register_pernet_subsys(&tcp_net_metrics_ops);
749}