blob: 992f1bff4fc62c7854ff79b91b34049896b2921d [file] [log] [blame]
David S. Miller51c5d0c2012-07-10 00:49:14 -07001#include <linux/rcupdate.h>
2#include <linux/spinlock.h>
3#include <linux/jiffies.h>
4#include <linux/bootmem.h>
David S. Millerab92bb22012-07-09 16:19:30 -07005#include <linux/module.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -07006#include <linux/cache.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -07007#include <linux/slab.h>
8#include <linux/init.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -07009#include <linux/tcp.h>
Eric Dumazet5815d5e2012-07-19 23:02:34 +000010#include <linux/hash.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070011
12#include <net/inet_connection_sock.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -070013#include <net/net_namespace.h>
David S. Millerab92bb22012-07-09 16:19:30 -070014#include <net/request_sock.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -070015#include <net/inetpeer.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070016#include <net/sock.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -070017#include <net/ipv6.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070018#include <net/dst.h>
19#include <net/tcp.h>
20
21int sysctl_tcp_nometrics_save __read_mostly;
22
David S. Miller51c5d0c2012-07-10 00:49:14 -070023enum tcp_metric_index {
24 TCP_METRIC_RTT,
25 TCP_METRIC_RTTVAR,
26 TCP_METRIC_SSTHRESH,
27 TCP_METRIC_CWND,
28 TCP_METRIC_REORDERING,
29
30 /* Always last. */
31 TCP_METRIC_MAX,
32};
33
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000034struct tcp_fastopen_metrics {
35 u16 mss;
Yuchung Chengaab48742012-07-19 06:43:10 +000036 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
37 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000038 struct tcp_fastopen_cookie cookie;
39};
40
David S. Miller51c5d0c2012-07-10 00:49:14 -070041struct tcp_metrics_block {
42 struct tcp_metrics_block __rcu *tcpm_next;
43 struct inetpeer_addr tcpm_addr;
44 unsigned long tcpm_stamp;
David S. Miller81166dd2012-07-10 03:14:24 -070045 u32 tcpm_ts;
46 u32 tcpm_ts_stamp;
David S. Miller51c5d0c2012-07-10 00:49:14 -070047 u32 tcpm_lock;
48 u32 tcpm_vals[TCP_METRIC_MAX];
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000049 struct tcp_fastopen_metrics tcpm_fastopen;
David S. Miller51c5d0c2012-07-10 00:49:14 -070050};
51
52static bool tcp_metric_locked(struct tcp_metrics_block *tm,
53 enum tcp_metric_index idx)
54{
55 return tm->tcpm_lock & (1 << idx);
56}
57
58static u32 tcp_metric_get(struct tcp_metrics_block *tm,
59 enum tcp_metric_index idx)
60{
61 return tm->tcpm_vals[idx];
62}
63
64static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
65 enum tcp_metric_index idx)
66{
67 return msecs_to_jiffies(tm->tcpm_vals[idx]);
68}
69
70static void tcp_metric_set(struct tcp_metrics_block *tm,
71 enum tcp_metric_index idx,
72 u32 val)
73{
74 tm->tcpm_vals[idx] = val;
75}
76
77static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
78 enum tcp_metric_index idx,
79 u32 val)
80{
81 tm->tcpm_vals[idx] = jiffies_to_msecs(val);
82}
83
84static bool addr_same(const struct inetpeer_addr *a,
85 const struct inetpeer_addr *b)
86{
87 const struct in6_addr *a6, *b6;
88
89 if (a->family != b->family)
90 return false;
91 if (a->family == AF_INET)
92 return a->addr.a4 == b->addr.a4;
93
94 a6 = (const struct in6_addr *) &a->addr.a6[0];
95 b6 = (const struct in6_addr *) &b->addr.a6[0];
96
97 return ipv6_addr_equal(a6, b6);
98}
99
100struct tcpm_hash_bucket {
101 struct tcp_metrics_block __rcu *chain;
102};
103
104static DEFINE_SPINLOCK(tcp_metrics_lock);
105
106static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
107{
108 u32 val;
109
110 val = 0;
111 if (dst_metric_locked(dst, RTAX_RTT))
112 val |= 1 << TCP_METRIC_RTT;
113 if (dst_metric_locked(dst, RTAX_RTTVAR))
114 val |= 1 << TCP_METRIC_RTTVAR;
115 if (dst_metric_locked(dst, RTAX_SSTHRESH))
116 val |= 1 << TCP_METRIC_SSTHRESH;
117 if (dst_metric_locked(dst, RTAX_CWND))
118 val |= 1 << TCP_METRIC_CWND;
119 if (dst_metric_locked(dst, RTAX_REORDERING))
120 val |= 1 << TCP_METRIC_REORDERING;
121 tm->tcpm_lock = val;
122
123 tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
124 tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
125 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
126 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
127 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
David S. Miller81166dd2012-07-10 03:14:24 -0700128 tm->tcpm_ts = 0;
129 tm->tcpm_ts_stamp = 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000130 tm->tcpm_fastopen.mss = 0;
Yuchung Chengaab48742012-07-19 06:43:10 +0000131 tm->tcpm_fastopen.syn_loss = 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000132 tm->tcpm_fastopen.cookie.len = 0;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700133}
134
135static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
136 struct inetpeer_addr *addr,
137 unsigned int hash,
138 bool reclaim)
139{
140 struct tcp_metrics_block *tm;
141 struct net *net;
142
143 spin_lock_bh(&tcp_metrics_lock);
144 net = dev_net(dst->dev);
145 if (unlikely(reclaim)) {
146 struct tcp_metrics_block *oldest;
147
148 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
149 for (tm = rcu_dereference(oldest->tcpm_next); tm;
150 tm = rcu_dereference(tm->tcpm_next)) {
151 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
152 oldest = tm;
153 }
154 tm = oldest;
155 } else {
156 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
157 if (!tm)
158 goto out_unlock;
159 }
160 tm->tcpm_addr = *addr;
161 tm->tcpm_stamp = jiffies;
162
163 tcpm_suck_dst(tm, dst);
164
165 if (likely(!reclaim)) {
166 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
167 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
168 }
169
170out_unlock:
171 spin_unlock_bh(&tcp_metrics_lock);
172 return tm;
173}
174
175#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
176
177static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
178{
179 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
180 tcpm_suck_dst(tm, dst);
181}
182
183#define TCP_METRICS_RECLAIM_DEPTH 5
184#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
185
186static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
187{
188 if (tm)
189 return tm;
190 if (depth > TCP_METRICS_RECLAIM_DEPTH)
191 return TCP_METRICS_RECLAIM_PTR;
192 return NULL;
193}
194
195static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
196 struct net *net, unsigned int hash)
197{
198 struct tcp_metrics_block *tm;
199 int depth = 0;
200
201 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
202 tm = rcu_dereference(tm->tcpm_next)) {
203 if (addr_same(&tm->tcpm_addr, addr))
204 break;
205 depth++;
206 }
207 return tcp_get_encode(tm, depth);
208}
209
210static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
211 struct dst_entry *dst)
212{
213 struct tcp_metrics_block *tm;
214 struct inetpeer_addr addr;
215 unsigned int hash;
216 struct net *net;
217
218 addr.family = req->rsk_ops->family;
219 switch (addr.family) {
220 case AF_INET:
221 addr.addr.a4 = inet_rsk(req)->rmt_addr;
222 hash = (__force unsigned int) addr.addr.a4;
223 break;
224 case AF_INET6:
225 *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
Eric Dumazetddbe5032012-07-18 08:11:12 +0000226 hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700227 break;
228 default:
229 return NULL;
230 }
231
David S. Miller51c5d0c2012-07-10 00:49:14 -0700232 net = dev_net(dst->dev);
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000233 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700234
235 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
236 tm = rcu_dereference(tm->tcpm_next)) {
237 if (addr_same(&tm->tcpm_addr, &addr))
238 break;
239 }
240 tcpm_check_stamp(tm, dst);
241 return tm;
242}
243
David S. Miller81166dd2012-07-10 03:14:24 -0700244static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
245{
246 struct inet6_timewait_sock *tw6;
247 struct tcp_metrics_block *tm;
248 struct inetpeer_addr addr;
249 unsigned int hash;
250 struct net *net;
251
252 addr.family = tw->tw_family;
253 switch (addr.family) {
254 case AF_INET:
255 addr.addr.a4 = tw->tw_daddr;
256 hash = (__force unsigned int) addr.addr.a4;
257 break;
258 case AF_INET6:
259 tw6 = inet6_twsk((struct sock *)tw);
260 *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
Eric Dumazetddbe5032012-07-18 08:11:12 +0000261 hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
David S. Miller81166dd2012-07-10 03:14:24 -0700262 break;
263 default:
264 return NULL;
265 }
266
David S. Miller81166dd2012-07-10 03:14:24 -0700267 net = twsk_net(tw);
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000268 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
David S. Miller81166dd2012-07-10 03:14:24 -0700269
270 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
271 tm = rcu_dereference(tm->tcpm_next)) {
272 if (addr_same(&tm->tcpm_addr, &addr))
273 break;
274 }
275 return tm;
276}
277
David S. Miller51c5d0c2012-07-10 00:49:14 -0700278static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
279 struct dst_entry *dst,
280 bool create)
281{
282 struct tcp_metrics_block *tm;
283 struct inetpeer_addr addr;
284 unsigned int hash;
285 struct net *net;
286 bool reclaim;
287
288 addr.family = sk->sk_family;
289 switch (addr.family) {
290 case AF_INET:
291 addr.addr.a4 = inet_sk(sk)->inet_daddr;
292 hash = (__force unsigned int) addr.addr.a4;
293 break;
294 case AF_INET6:
295 *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
Eric Dumazetddbe5032012-07-18 08:11:12 +0000296 hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700297 break;
298 default:
299 return NULL;
300 }
301
David S. Miller51c5d0c2012-07-10 00:49:14 -0700302 net = dev_net(dst->dev);
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000303 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700304
305 tm = __tcp_get_metrics(&addr, net, hash);
306 reclaim = false;
307 if (tm == TCP_METRICS_RECLAIM_PTR) {
308 reclaim = true;
309 tm = NULL;
310 }
311 if (!tm && create)
312 tm = tcpm_new(dst, &addr, hash, reclaim);
313 else
314 tcpm_check_stamp(tm, dst);
315
316 return tm;
317}
318
David S. Miller4aabd8e2012-07-09 16:07:30 -0700319/* Save metrics learned by this TCP session. This function is called
320 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
321 * or goes from LAST-ACK to CLOSE.
322 */
323void tcp_update_metrics(struct sock *sk)
324{
David S. Miller51c5d0c2012-07-10 00:49:14 -0700325 const struct inet_connection_sock *icsk = inet_csk(sk);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700326 struct dst_entry *dst = __sk_dst_get(sk);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700327 struct tcp_sock *tp = tcp_sk(sk);
328 struct tcp_metrics_block *tm;
329 unsigned long rtt;
330 u32 val;
331 int m;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700332
David S. Miller51c5d0c2012-07-10 00:49:14 -0700333 if (sysctl_tcp_nometrics_save || !dst)
David S. Miller4aabd8e2012-07-09 16:07:30 -0700334 return;
335
David S. Miller51c5d0c2012-07-10 00:49:14 -0700336 if (dst->flags & DST_HOST)
David S. Miller4aabd8e2012-07-09 16:07:30 -0700337 dst_confirm(dst);
338
David S. Miller51c5d0c2012-07-10 00:49:14 -0700339 rcu_read_lock();
340 if (icsk->icsk_backoff || !tp->srtt) {
341 /* This session failed to estimate rtt. Why?
342 * Probably, no packets returned in time. Reset our
343 * results.
David S. Miller4aabd8e2012-07-09 16:07:30 -0700344 */
David S. Miller51c5d0c2012-07-10 00:49:14 -0700345 tm = tcp_get_metrics(sk, dst, false);
346 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
347 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
348 goto out_unlock;
349 } else
350 tm = tcp_get_metrics(sk, dst, true);
351
352 if (!tm)
353 goto out_unlock;
354
355 rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
356 m = rtt - tp->srtt;
357
358 /* If newly calculated rtt larger than stored one, store new
359 * one. Otherwise, use EWMA. Remember, rtt overestimation is
360 * always better than underestimation.
361 */
362 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
363 if (m <= 0)
364 rtt = tp->srtt;
365 else
366 rtt -= (m >> 3);
367 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
368 }
369
370 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
371 unsigned long var;
372
373 if (m < 0)
374 m = -m;
375
376 /* Scale deviation to rttvar fixed point */
377 m >>= 1;
378 if (m < tp->mdev)
379 m = tp->mdev;
380
381 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
382 if (m >= var)
383 var = m;
384 else
385 var -= (var - m) >> 2;
386
387 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
388 }
389
390 if (tcp_in_initial_slowstart(tp)) {
391 /* Slow start still did not finish. */
392 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
393 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
394 if (val && (tp->snd_cwnd >> 1) > val)
395 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
396 tp->snd_cwnd >> 1);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700397 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700398 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
399 val = tcp_metric_get(tm, TCP_METRIC_CWND);
400 if (tp->snd_cwnd > val)
401 tcp_metric_set(tm, TCP_METRIC_CWND,
402 tp->snd_cwnd);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700403 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700404 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
405 icsk->icsk_ca_state == TCP_CA_Open) {
406 /* Cong. avoidance phase, cwnd is reliable. */
407 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
408 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
409 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
410 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
411 val = tcp_metric_get(tm, TCP_METRIC_CWND);
Alexander Duyck21008442012-07-11 17:18:04 -0700412 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700413 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700414 } else {
415 /* Else slow start did not finish, cwnd is non-sense,
416 * ssthresh may be also invalid.
417 */
418 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
419 val = tcp_metric_get(tm, TCP_METRIC_CWND);
420 tcp_metric_set(tm, TCP_METRIC_CWND,
421 (val + tp->snd_ssthresh) >> 1);
422 }
423 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
424 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
425 if (val && tp->snd_ssthresh > val)
426 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
427 tp->snd_ssthresh);
428 }
429 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
430 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
431 if (val < tp->reordering &&
David S. Miller4aabd8e2012-07-09 16:07:30 -0700432 tp->reordering != sysctl_tcp_reordering)
David S. Miller51c5d0c2012-07-10 00:49:14 -0700433 tcp_metric_set(tm, TCP_METRIC_REORDERING,
434 tp->reordering);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700435 }
436 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700437 tm->tcpm_stamp = jiffies;
438out_unlock:
439 rcu_read_unlock();
David S. Miller4aabd8e2012-07-09 16:07:30 -0700440}
441
442/* Initialize metrics on socket. */
443
444void tcp_init_metrics(struct sock *sk)
445{
David S. Miller4aabd8e2012-07-09 16:07:30 -0700446 struct dst_entry *dst = __sk_dst_get(sk);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700447 struct tcp_sock *tp = tcp_sk(sk);
448 struct tcp_metrics_block *tm;
449 u32 val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700450
451 if (dst == NULL)
452 goto reset;
453
454 dst_confirm(dst);
455
David S. Miller51c5d0c2012-07-10 00:49:14 -0700456 rcu_read_lock();
457 tm = tcp_get_metrics(sk, dst, true);
458 if (!tm) {
459 rcu_read_unlock();
460 goto reset;
461 }
462
463 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
464 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
465
466 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
467 if (val) {
468 tp->snd_ssthresh = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700469 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
470 tp->snd_ssthresh = tp->snd_cwnd_clamp;
471 } else {
472 /* ssthresh may have been reduced unnecessarily during.
473 * 3WHS. Restore it back to its initial default.
474 */
475 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
476 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700477 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
478 if (val && tp->reordering != val) {
David S. Miller4aabd8e2012-07-09 16:07:30 -0700479 tcp_disable_fack(tp);
480 tcp_disable_early_retrans(tp);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700481 tp->reordering = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700482 }
483
David S. Miller51c5d0c2012-07-10 00:49:14 -0700484 val = tcp_metric_get(tm, TCP_METRIC_RTT);
485 if (val == 0 || tp->srtt == 0) {
486 rcu_read_unlock();
David S. Miller4aabd8e2012-07-09 16:07:30 -0700487 goto reset;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700488 }
David S. Miller4aabd8e2012-07-09 16:07:30 -0700489 /* Initial rtt is determined from SYN,SYN-ACK.
490 * The segment is small and rtt may appear much
491 * less than real one. Use per-dst memory
492 * to make it more realistic.
493 *
494 * A bit of theory. RTT is time passed after "normal" sized packet
495 * is sent until it is ACKed. In normal circumstances sending small
496 * packets force peer to delay ACKs and calculation is correct too.
497 * The algorithm is adaptive and, provided we follow specs, it
498 * NEVER underestimate RTT. BUT! If peer tries to make some clever
499 * tricks sort of "quick acks" for time long enough to decrease RTT
500 * to low value, and then abruptly stops to do it and starts to delay
501 * ACKs, wait for troubles.
502 */
David S. Miller51c5d0c2012-07-10 00:49:14 -0700503 val = msecs_to_jiffies(val);
504 if (val > tp->srtt) {
505 tp->srtt = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700506 tp->rtt_seq = tp->snd_nxt;
507 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700508 val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
509 if (val > tp->mdev) {
510 tp->mdev = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700511 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
512 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700513 rcu_read_unlock();
514
David S. Miller4aabd8e2012-07-09 16:07:30 -0700515 tcp_set_rto(sk);
516reset:
517 if (tp->srtt == 0) {
518 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
519 * 3WHS. This is most likely due to retransmission,
520 * including spurious one. Reset the RTO back to 3secs
521 * from the more aggressive 1sec to avoid more spurious
522 * retransmission.
523 */
524 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
525 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
526 }
527 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
528 * retransmitted. In light of RFC6298 more aggressive 1sec
529 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
530 * retransmission has occurred.
531 */
532 if (tp->total_retrans > 1)
533 tp->snd_cwnd = 1;
534 else
535 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
536 tp->snd_cwnd_stamp = tcp_time_stamp;
537}
David S. Millerab92bb22012-07-09 16:19:30 -0700538
David S. Miller81166dd2012-07-10 03:14:24 -0700539bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
David S. Millerab92bb22012-07-09 16:19:30 -0700540{
David S. Miller51c5d0c2012-07-10 00:49:14 -0700541 struct tcp_metrics_block *tm;
542 bool ret;
543
David S. Millerab92bb22012-07-09 16:19:30 -0700544 if (!dst)
545 return false;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700546
547 rcu_read_lock();
548 tm = __tcp_get_metrics_req(req, dst);
David S. Miller81166dd2012-07-10 03:14:24 -0700549 if (paws_check) {
550 if (tm &&
551 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
552 (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
553 ret = false;
554 else
555 ret = true;
556 } else {
557 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
558 ret = true;
559 else
560 ret = false;
561 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700562 rcu_read_unlock();
563
564 return ret;
David S. Millerab92bb22012-07-09 16:19:30 -0700565}
566EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700567
David S. Miller81166dd2012-07-10 03:14:24 -0700568void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
569{
570 struct tcp_metrics_block *tm;
571
572 rcu_read_lock();
573 tm = tcp_get_metrics(sk, dst, true);
574 if (tm) {
575 struct tcp_sock *tp = tcp_sk(sk);
576
577 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
578 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
579 tp->rx_opt.ts_recent = tm->tcpm_ts;
580 }
581 }
582 rcu_read_unlock();
583}
584EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
585
586/* VJ's idea. Save last timestamp seen from this destination and hold
587 * it at least for normal timewait interval to use for duplicate
588 * segment detection in subsequent connections, before they enter
589 * synchronized state.
590 */
591bool tcp_remember_stamp(struct sock *sk)
592{
593 struct dst_entry *dst = __sk_dst_get(sk);
594 bool ret = false;
595
596 if (dst) {
597 struct tcp_metrics_block *tm;
598
599 rcu_read_lock();
600 tm = tcp_get_metrics(sk, dst, true);
601 if (tm) {
602 struct tcp_sock *tp = tcp_sk(sk);
603
604 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
605 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
606 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
607 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
608 tm->tcpm_ts = tp->rx_opt.ts_recent;
609 }
610 ret = true;
611 }
612 rcu_read_unlock();
613 }
614 return ret;
615}
616
617bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
618{
619 struct tcp_metrics_block *tm;
620 bool ret = false;
621
622 rcu_read_lock();
623 tm = __tcp_get_metrics_tw(tw);
624 if (tw) {
625 const struct tcp_timewait_sock *tcptw;
626 struct sock *sk = (struct sock *) tw;
627
628 tcptw = tcp_twsk(sk);
629 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
630 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
631 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
632 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
633 tm->tcpm_ts = tcptw->tw_ts_recent;
634 }
635 ret = true;
636 }
637 rcu_read_unlock();
638
639 return ret;
640}
641
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000642static DEFINE_SEQLOCK(fastopen_seqlock);
643
644void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
Yuchung Chengaab48742012-07-19 06:43:10 +0000645 struct tcp_fastopen_cookie *cookie,
646 int *syn_loss, unsigned long *last_syn_loss)
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000647{
648 struct tcp_metrics_block *tm;
649
650 rcu_read_lock();
651 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
652 if (tm) {
653 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
654 unsigned int seq;
655
656 do {
657 seq = read_seqbegin(&fastopen_seqlock);
658 if (tfom->mss)
659 *mss = tfom->mss;
660 *cookie = tfom->cookie;
Yuchung Chengaab48742012-07-19 06:43:10 +0000661 *syn_loss = tfom->syn_loss;
662 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000663 } while (read_seqretry(&fastopen_seqlock, seq));
664 }
665 rcu_read_unlock();
666}
667
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000668void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
Yuchung Chengaab48742012-07-19 06:43:10 +0000669 struct tcp_fastopen_cookie *cookie, bool syn_lost)
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000670{
671 struct tcp_metrics_block *tm;
672
673 rcu_read_lock();
674 tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
675 if (tm) {
676 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
677
678 write_seqlock_bh(&fastopen_seqlock);
679 tfom->mss = mss;
680 if (cookie->len > 0)
681 tfom->cookie = *cookie;
Yuchung Chengaab48742012-07-19 06:43:10 +0000682 if (syn_lost) {
683 ++tfom->syn_loss;
684 tfom->last_syn_loss = jiffies;
685 } else
686 tfom->syn_loss = 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000687 write_sequnlock_bh(&fastopen_seqlock);
688 }
689 rcu_read_unlock();
690}
691
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000692static unsigned int tcpmhash_entries;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700693static int __init set_tcpmhash_entries(char *str)
694{
695 ssize_t ret;
696
697 if (!str)
698 return 0;
699
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000700 ret = kstrtouint(str, 0, &tcpmhash_entries);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700701 if (ret)
702 return 0;
703
704 return 1;
705}
706__setup("tcpmhash_entries=", set_tcpmhash_entries);
707
708static int __net_init tcp_net_metrics_init(struct net *net)
709{
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000710 size_t size;
711 unsigned int slots;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700712
713 slots = tcpmhash_entries;
714 if (!slots) {
715 if (totalram_pages >= 128 * 1024)
716 slots = 16 * 1024;
717 else
718 slots = 8 * 1024;
719 }
720
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000721 net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
722 size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700723
724 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
725 if (!net->ipv4.tcp_metrics_hash)
726 return -ENOMEM;
727
David S. Miller51c5d0c2012-07-10 00:49:14 -0700728 return 0;
729}
730
731static void __net_exit tcp_net_metrics_exit(struct net *net)
732{
733 kfree(net->ipv4.tcp_metrics_hash);
734}
735
736static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
737 .init = tcp_net_metrics_init,
738 .exit = tcp_net_metrics_exit,
739};
740
741void __init tcp_metrics_init(void)
742{
743 register_pernet_subsys(&tcp_net_metrics_ops);
744}