blob: 06493736fbc826842876180a24510d9e348cb7f3 [file] [log] [blame]
David S. Miller51c5d0c2012-07-10 00:49:14 -07001#include <linux/rcupdate.h>
2#include <linux/spinlock.h>
3#include <linux/jiffies.h>
David S. Millerab92bb22012-07-09 16:19:30 -07004#include <linux/module.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -07005#include <linux/cache.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -07006#include <linux/slab.h>
7#include <linux/init.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -07008#include <linux/tcp.h>
Eric Dumazet5815d5e2012-07-19 23:02:34 +00009#include <linux/hash.h>
Julian Anastasovd23ff702012-09-04 11:03:15 +000010#include <linux/tcp_metrics.h>
Eric Dumazet976a7022012-11-16 05:31:53 +000011#include <linux/vmalloc.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070012
13#include <net/inet_connection_sock.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -070014#include <net/net_namespace.h>
David S. Millerab92bb22012-07-09 16:19:30 -070015#include <net/request_sock.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -070016#include <net/inetpeer.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070017#include <net/sock.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -070018#include <net/ipv6.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070019#include <net/dst.h>
20#include <net/tcp.h>
Julian Anastasovd23ff702012-09-04 11:03:15 +000021#include <net/genetlink.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070022
23int sysctl_tcp_nometrics_save __read_mostly;
24
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000025struct tcp_fastopen_metrics {
26 u16 mss;
Yuchung Chengaab48742012-07-19 06:43:10 +000027 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
28 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000029 struct tcp_fastopen_cookie cookie;
30};
31
David S. Miller51c5d0c2012-07-10 00:49:14 -070032struct tcp_metrics_block {
33 struct tcp_metrics_block __rcu *tcpm_next;
34 struct inetpeer_addr tcpm_addr;
35 unsigned long tcpm_stamp;
David S. Miller81166dd2012-07-10 03:14:24 -070036 u32 tcpm_ts;
37 u32 tcpm_ts_stamp;
David S. Miller51c5d0c2012-07-10 00:49:14 -070038 u32 tcpm_lock;
Julian Anastasovd23ff702012-09-04 11:03:15 +000039 u32 tcpm_vals[TCP_METRIC_MAX + 1];
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000040 struct tcp_fastopen_metrics tcpm_fastopen;
Julian Anastasovd23ff702012-09-04 11:03:15 +000041
42 struct rcu_head rcu_head;
David S. Miller51c5d0c2012-07-10 00:49:14 -070043};
44
45static bool tcp_metric_locked(struct tcp_metrics_block *tm,
46 enum tcp_metric_index idx)
47{
48 return tm->tcpm_lock & (1 << idx);
49}
50
51static u32 tcp_metric_get(struct tcp_metrics_block *tm,
52 enum tcp_metric_index idx)
53{
54 return tm->tcpm_vals[idx];
55}
56
57static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
58 enum tcp_metric_index idx)
59{
60 return msecs_to_jiffies(tm->tcpm_vals[idx]);
61}
62
63static void tcp_metric_set(struct tcp_metrics_block *tm,
64 enum tcp_metric_index idx,
65 u32 val)
66{
67 tm->tcpm_vals[idx] = val;
68}
69
70static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
71 enum tcp_metric_index idx,
72 u32 val)
73{
74 tm->tcpm_vals[idx] = jiffies_to_msecs(val);
75}
76
77static bool addr_same(const struct inetpeer_addr *a,
78 const struct inetpeer_addr *b)
79{
80 const struct in6_addr *a6, *b6;
81
82 if (a->family != b->family)
83 return false;
84 if (a->family == AF_INET)
85 return a->addr.a4 == b->addr.a4;
86
87 a6 = (const struct in6_addr *) &a->addr.a6[0];
88 b6 = (const struct in6_addr *) &b->addr.a6[0];
89
90 return ipv6_addr_equal(a6, b6);
91}
92
93struct tcpm_hash_bucket {
94 struct tcp_metrics_block __rcu *chain;
95};
96
97static DEFINE_SPINLOCK(tcp_metrics_lock);
98
Eric Dumazetefeaa552013-05-03 19:12:45 +000099static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
100 bool fastopen_clear)
David S. Miller51c5d0c2012-07-10 00:49:14 -0700101{
102 u32 val;
103
Julian Anastasov9a0a9502012-07-23 10:46:38 +0300104 tm->tcpm_stamp = jiffies;
105
David S. Miller51c5d0c2012-07-10 00:49:14 -0700106 val = 0;
107 if (dst_metric_locked(dst, RTAX_RTT))
108 val |= 1 << TCP_METRIC_RTT;
109 if (dst_metric_locked(dst, RTAX_RTTVAR))
110 val |= 1 << TCP_METRIC_RTTVAR;
111 if (dst_metric_locked(dst, RTAX_SSTHRESH))
112 val |= 1 << TCP_METRIC_SSTHRESH;
113 if (dst_metric_locked(dst, RTAX_CWND))
114 val |= 1 << TCP_METRIC_CWND;
115 if (dst_metric_locked(dst, RTAX_REORDERING))
116 val |= 1 << TCP_METRIC_REORDERING;
117 tm->tcpm_lock = val;
118
119 tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
120 tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
121 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
122 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
123 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
David S. Miller81166dd2012-07-10 03:14:24 -0700124 tm->tcpm_ts = 0;
125 tm->tcpm_ts_stamp = 0;
Eric Dumazetefeaa552013-05-03 19:12:45 +0000126 if (fastopen_clear) {
127 tm->tcpm_fastopen.mss = 0;
128 tm->tcpm_fastopen.syn_loss = 0;
129 tm->tcpm_fastopen.cookie.len = 0;
130 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700131}
132
133static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
134 struct inetpeer_addr *addr,
135 unsigned int hash,
136 bool reclaim)
137{
138 struct tcp_metrics_block *tm;
139 struct net *net;
140
141 spin_lock_bh(&tcp_metrics_lock);
142 net = dev_net(dst->dev);
143 if (unlikely(reclaim)) {
144 struct tcp_metrics_block *oldest;
145
146 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
147 for (tm = rcu_dereference(oldest->tcpm_next); tm;
148 tm = rcu_dereference(tm->tcpm_next)) {
149 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
150 oldest = tm;
151 }
152 tm = oldest;
153 } else {
154 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
155 if (!tm)
156 goto out_unlock;
157 }
158 tm->tcpm_addr = *addr;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700159
Eric Dumazetefeaa552013-05-03 19:12:45 +0000160 tcpm_suck_dst(tm, dst, true);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700161
162 if (likely(!reclaim)) {
163 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
164 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
165 }
166
167out_unlock:
168 spin_unlock_bh(&tcp_metrics_lock);
169 return tm;
170}
171
172#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
173
174static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
175{
176 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
Eric Dumazetefeaa552013-05-03 19:12:45 +0000177 tcpm_suck_dst(tm, dst, false);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700178}
179
180#define TCP_METRICS_RECLAIM_DEPTH 5
181#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
182
183static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
184{
185 if (tm)
186 return tm;
187 if (depth > TCP_METRICS_RECLAIM_DEPTH)
188 return TCP_METRICS_RECLAIM_PTR;
189 return NULL;
190}
191
192static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
193 struct net *net, unsigned int hash)
194{
195 struct tcp_metrics_block *tm;
196 int depth = 0;
197
198 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
199 tm = rcu_dereference(tm->tcpm_next)) {
200 if (addr_same(&tm->tcpm_addr, addr))
201 break;
202 depth++;
203 }
204 return tcp_get_encode(tm, depth);
205}
206
207static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
208 struct dst_entry *dst)
209{
210 struct tcp_metrics_block *tm;
211 struct inetpeer_addr addr;
212 unsigned int hash;
213 struct net *net;
214
215 addr.family = req->rsk_ops->family;
216 switch (addr.family) {
217 case AF_INET:
Eric Dumazet634fb9792013-10-09 15:21:29 -0700218 addr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700219 hash = (__force unsigned int) addr.addr.a4;
220 break;
Eric Dumazet634fb9792013-10-09 15:21:29 -0700221#if IS_ENABLED(CONFIG_IPV6)
David S. Miller51c5d0c2012-07-10 00:49:14 -0700222 case AF_INET6:
Eric Dumazet634fb9792013-10-09 15:21:29 -0700223 *(struct in6_addr *)addr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
224 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700225 break;
Eric Dumazet634fb9792013-10-09 15:21:29 -0700226#endif
David S. Miller51c5d0c2012-07-10 00:49:14 -0700227 default:
228 return NULL;
229 }
230
David S. Miller51c5d0c2012-07-10 00:49:14 -0700231 net = dev_net(dst->dev);
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000232 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700233
234 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
235 tm = rcu_dereference(tm->tcpm_next)) {
236 if (addr_same(&tm->tcpm_addr, &addr))
237 break;
238 }
239 tcpm_check_stamp(tm, dst);
240 return tm;
241}
242
David S. Miller81166dd2012-07-10 03:14:24 -0700243static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
244{
David S. Miller81166dd2012-07-10 03:14:24 -0700245 struct tcp_metrics_block *tm;
246 struct inetpeer_addr addr;
247 unsigned int hash;
248 struct net *net;
249
250 addr.family = tw->tw_family;
251 switch (addr.family) {
252 case AF_INET:
253 addr.addr.a4 = tw->tw_daddr;
254 hash = (__force unsigned int) addr.addr.a4;
255 break;
Eric Dumazetc2bb06d2013-10-09 03:05:48 -0700256#if IS_ENABLED(CONFIG_IPV6)
David S. Miller81166dd2012-07-10 03:14:24 -0700257 case AF_INET6:
Eric Dumazetefe42082013-10-03 15:42:29 -0700258 *(struct in6_addr *)addr.addr.a6 = tw->tw_v6_daddr;
259 hash = ipv6_addr_hash(&tw->tw_v6_daddr);
David S. Miller81166dd2012-07-10 03:14:24 -0700260 break;
Eric Dumazetc2bb06d2013-10-09 03:05:48 -0700261#endif
David S. Miller81166dd2012-07-10 03:14:24 -0700262 default:
263 return NULL;
264 }
265
David S. Miller81166dd2012-07-10 03:14:24 -0700266 net = twsk_net(tw);
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000267 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
David S. Miller81166dd2012-07-10 03:14:24 -0700268
269 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
270 tm = rcu_dereference(tm->tcpm_next)) {
271 if (addr_same(&tm->tcpm_addr, &addr))
272 break;
273 }
274 return tm;
275}
276
David S. Miller51c5d0c2012-07-10 00:49:14 -0700277static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
278 struct dst_entry *dst,
279 bool create)
280{
281 struct tcp_metrics_block *tm;
282 struct inetpeer_addr addr;
283 unsigned int hash;
284 struct net *net;
285 bool reclaim;
286
287 addr.family = sk->sk_family;
288 switch (addr.family) {
289 case AF_INET:
290 addr.addr.a4 = inet_sk(sk)->inet_daddr;
291 hash = (__force unsigned int) addr.addr.a4;
292 break;
Eric Dumazetc2bb06d2013-10-09 03:05:48 -0700293#if IS_ENABLED(CONFIG_IPV6)
David S. Miller51c5d0c2012-07-10 00:49:14 -0700294 case AF_INET6:
Eric Dumazetefe42082013-10-03 15:42:29 -0700295 *(struct in6_addr *)addr.addr.a6 = sk->sk_v6_daddr;
296 hash = ipv6_addr_hash(&sk->sk_v6_daddr);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700297 break;
Eric Dumazetc2bb06d2013-10-09 03:05:48 -0700298#endif
David S. Miller51c5d0c2012-07-10 00:49:14 -0700299 default:
300 return NULL;
301 }
302
David S. Miller51c5d0c2012-07-10 00:49:14 -0700303 net = dev_net(dst->dev);
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000304 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700305
306 tm = __tcp_get_metrics(&addr, net, hash);
307 reclaim = false;
308 if (tm == TCP_METRICS_RECLAIM_PTR) {
309 reclaim = true;
310 tm = NULL;
311 }
312 if (!tm && create)
313 tm = tcpm_new(dst, &addr, hash, reclaim);
314 else
315 tcpm_check_stamp(tm, dst);
316
317 return tm;
318}
319
David S. Miller4aabd8e2012-07-09 16:07:30 -0700320/* Save metrics learned by this TCP session. This function is called
321 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
322 * or goes from LAST-ACK to CLOSE.
323 */
324void tcp_update_metrics(struct sock *sk)
325{
David S. Miller51c5d0c2012-07-10 00:49:14 -0700326 const struct inet_connection_sock *icsk = inet_csk(sk);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700327 struct dst_entry *dst = __sk_dst_get(sk);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700328 struct tcp_sock *tp = tcp_sk(sk);
329 struct tcp_metrics_block *tm;
330 unsigned long rtt;
331 u32 val;
332 int m;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700333
David S. Miller51c5d0c2012-07-10 00:49:14 -0700334 if (sysctl_tcp_nometrics_save || !dst)
David S. Miller4aabd8e2012-07-09 16:07:30 -0700335 return;
336
David S. Miller51c5d0c2012-07-10 00:49:14 -0700337 if (dst->flags & DST_HOST)
David S. Miller4aabd8e2012-07-09 16:07:30 -0700338 dst_confirm(dst);
339
David S. Miller51c5d0c2012-07-10 00:49:14 -0700340 rcu_read_lock();
341 if (icsk->icsk_backoff || !tp->srtt) {
342 /* This session failed to estimate rtt. Why?
343 * Probably, no packets returned in time. Reset our
344 * results.
David S. Miller4aabd8e2012-07-09 16:07:30 -0700345 */
David S. Miller51c5d0c2012-07-10 00:49:14 -0700346 tm = tcp_get_metrics(sk, dst, false);
347 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
348 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
349 goto out_unlock;
350 } else
351 tm = tcp_get_metrics(sk, dst, true);
352
353 if (!tm)
354 goto out_unlock;
355
356 rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
357 m = rtt - tp->srtt;
358
359 /* If newly calculated rtt larger than stored one, store new
360 * one. Otherwise, use EWMA. Remember, rtt overestimation is
361 * always better than underestimation.
362 */
363 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
364 if (m <= 0)
365 rtt = tp->srtt;
366 else
367 rtt -= (m >> 3);
368 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
369 }
370
371 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
372 unsigned long var;
373
374 if (m < 0)
375 m = -m;
376
377 /* Scale deviation to rttvar fixed point */
378 m >>= 1;
379 if (m < tp->mdev)
380 m = tp->mdev;
381
382 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
383 if (m >= var)
384 var = m;
385 else
386 var -= (var - m) >> 2;
387
388 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
389 }
390
391 if (tcp_in_initial_slowstart(tp)) {
392 /* Slow start still did not finish. */
393 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
394 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
395 if (val && (tp->snd_cwnd >> 1) > val)
396 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
397 tp->snd_cwnd >> 1);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700398 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700399 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
400 val = tcp_metric_get(tm, TCP_METRIC_CWND);
401 if (tp->snd_cwnd > val)
402 tcp_metric_set(tm, TCP_METRIC_CWND,
403 tp->snd_cwnd);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700404 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700405 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
406 icsk->icsk_ca_state == TCP_CA_Open) {
407 /* Cong. avoidance phase, cwnd is reliable. */
408 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
409 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
410 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
411 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
412 val = tcp_metric_get(tm, TCP_METRIC_CWND);
Alexander Duyck21008442012-07-11 17:18:04 -0700413 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700414 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700415 } else {
416 /* Else slow start did not finish, cwnd is non-sense,
417 * ssthresh may be also invalid.
418 */
419 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
420 val = tcp_metric_get(tm, TCP_METRIC_CWND);
421 tcp_metric_set(tm, TCP_METRIC_CWND,
422 (val + tp->snd_ssthresh) >> 1);
423 }
424 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
425 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
426 if (val && tp->snd_ssthresh > val)
427 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
428 tp->snd_ssthresh);
429 }
430 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
431 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
432 if (val < tp->reordering &&
David S. Miller4aabd8e2012-07-09 16:07:30 -0700433 tp->reordering != sysctl_tcp_reordering)
David S. Miller51c5d0c2012-07-10 00:49:14 -0700434 tcp_metric_set(tm, TCP_METRIC_REORDERING,
435 tp->reordering);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700436 }
437 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700438 tm->tcpm_stamp = jiffies;
439out_unlock:
440 rcu_read_unlock();
David S. Miller4aabd8e2012-07-09 16:07:30 -0700441}
442
443/* Initialize metrics on socket. */
444
445void tcp_init_metrics(struct sock *sk)
446{
David S. Miller4aabd8e2012-07-09 16:07:30 -0700447 struct dst_entry *dst = __sk_dst_get(sk);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700448 struct tcp_sock *tp = tcp_sk(sk);
449 struct tcp_metrics_block *tm;
Yuchung Cheng1b7fdd22013-08-30 08:35:53 -0700450 u32 val, crtt = 0; /* cached RTT scaled by 8 */
David S. Miller4aabd8e2012-07-09 16:07:30 -0700451
452 if (dst == NULL)
453 goto reset;
454
455 dst_confirm(dst);
456
David S. Miller51c5d0c2012-07-10 00:49:14 -0700457 rcu_read_lock();
458 tm = tcp_get_metrics(sk, dst, true);
459 if (!tm) {
460 rcu_read_unlock();
461 goto reset;
462 }
463
464 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
465 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
466
467 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
468 if (val) {
469 tp->snd_ssthresh = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700470 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
471 tp->snd_ssthresh = tp->snd_cwnd_clamp;
472 } else {
473 /* ssthresh may have been reduced unnecessarily during.
474 * 3WHS. Restore it back to its initial default.
475 */
476 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
477 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700478 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
479 if (val && tp->reordering != val) {
David S. Miller4aabd8e2012-07-09 16:07:30 -0700480 tcp_disable_fack(tp);
481 tcp_disable_early_retrans(tp);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700482 tp->reordering = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700483 }
484
Yuchung Cheng1b7fdd22013-08-30 08:35:53 -0700485 crtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700486 rcu_read_unlock();
David S. Miller4aabd8e2012-07-09 16:07:30 -0700487reset:
Yuchung Cheng52f20e62013-09-03 14:14:35 -0700488 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
489 * to seed the RTO for later data packets because SYN packets are
490 * small. Use the per-dst cached values to seed the RTO but keep
491 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
492 * Later the RTO will be updated immediately upon obtaining the first
493 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
494 * influences the first RTO but not later RTT estimation.
495 *
496 * But if RTT is not available from the SYN (due to retransmits or
497 * syn cookies) or the cache, force a conservative 3secs timeout.
498 *
499 * A bit of theory. RTT is time passed after "normal" sized packet
500 * is sent until it is ACKed. In normal circumstances sending small
501 * packets force peer to delay ACKs and calculation is correct too.
502 * The algorithm is adaptive and, provided we follow specs, it
503 * NEVER underestimate RTT. BUT! If peer tries to make some clever
504 * tricks sort of "quick acks" for time long enough to decrease RTT
505 * to low value, and then abruptly stops to do it and starts to delay
506 * ACKs, wait for troubles.
507 */
Yuchung Cheng1b7fdd22013-08-30 08:35:53 -0700508 if (crtt > tp->srtt) {
Neal Cardwell269aa752013-09-16 21:44:20 -0400509 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
510 crtt >>= 3;
511 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
Yuchung Cheng1b7fdd22013-08-30 08:35:53 -0700512 } else if (tp->srtt == 0) {
David S. Miller4aabd8e2012-07-09 16:07:30 -0700513 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
514 * 3WHS. This is most likely due to retransmission,
515 * including spurious one. Reset the RTO back to 3secs
516 * from the more aggressive 1sec to avoid more spurious
517 * retransmission.
518 */
519 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
520 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
521 }
522 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
523 * retransmitted. In light of RFC6298 more aggressive 1sec
524 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
525 * retransmission has occurred.
526 */
527 if (tp->total_retrans > 1)
528 tp->snd_cwnd = 1;
529 else
530 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
531 tp->snd_cwnd_stamp = tcp_time_stamp;
532}
David S. Millerab92bb22012-07-09 16:19:30 -0700533
David S. Miller81166dd2012-07-10 03:14:24 -0700534bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
David S. Millerab92bb22012-07-09 16:19:30 -0700535{
David S. Miller51c5d0c2012-07-10 00:49:14 -0700536 struct tcp_metrics_block *tm;
537 bool ret;
538
David S. Millerab92bb22012-07-09 16:19:30 -0700539 if (!dst)
540 return false;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700541
542 rcu_read_lock();
543 tm = __tcp_get_metrics_req(req, dst);
David S. Miller81166dd2012-07-10 03:14:24 -0700544 if (paws_check) {
545 if (tm &&
546 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
547 (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
548 ret = false;
549 else
550 ret = true;
551 } else {
552 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
553 ret = true;
554 else
555 ret = false;
556 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700557 rcu_read_unlock();
558
559 return ret;
David S. Millerab92bb22012-07-09 16:19:30 -0700560}
561EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700562
David S. Miller81166dd2012-07-10 03:14:24 -0700563void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
564{
565 struct tcp_metrics_block *tm;
566
567 rcu_read_lock();
568 tm = tcp_get_metrics(sk, dst, true);
569 if (tm) {
570 struct tcp_sock *tp = tcp_sk(sk);
571
572 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
573 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
574 tp->rx_opt.ts_recent = tm->tcpm_ts;
575 }
576 }
577 rcu_read_unlock();
578}
579EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
580
581/* VJ's idea. Save last timestamp seen from this destination and hold
582 * it at least for normal timewait interval to use for duplicate
583 * segment detection in subsequent connections, before they enter
584 * synchronized state.
585 */
586bool tcp_remember_stamp(struct sock *sk)
587{
588 struct dst_entry *dst = __sk_dst_get(sk);
589 bool ret = false;
590
591 if (dst) {
592 struct tcp_metrics_block *tm;
593
594 rcu_read_lock();
595 tm = tcp_get_metrics(sk, dst, true);
596 if (tm) {
597 struct tcp_sock *tp = tcp_sk(sk);
598
599 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
600 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
601 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
602 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
603 tm->tcpm_ts = tp->rx_opt.ts_recent;
604 }
605 ret = true;
606 }
607 rcu_read_unlock();
608 }
609 return ret;
610}
611
612bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
613{
614 struct tcp_metrics_block *tm;
615 bool ret = false;
616
617 rcu_read_lock();
618 tm = __tcp_get_metrics_tw(tw);
Julian Anastasov9a0a9502012-07-23 10:46:38 +0300619 if (tm) {
David S. Miller81166dd2012-07-10 03:14:24 -0700620 const struct tcp_timewait_sock *tcptw;
621 struct sock *sk = (struct sock *) tw;
622
623 tcptw = tcp_twsk(sk);
624 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
625 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
626 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
627 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
628 tm->tcpm_ts = tcptw->tw_ts_recent;
629 }
630 ret = true;
631 }
632 rcu_read_unlock();
633
634 return ret;
635}
636
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000637static DEFINE_SEQLOCK(fastopen_seqlock);
638
639void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
Yuchung Chengaab48742012-07-19 06:43:10 +0000640 struct tcp_fastopen_cookie *cookie,
641 int *syn_loss, unsigned long *last_syn_loss)
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000642{
643 struct tcp_metrics_block *tm;
644
645 rcu_read_lock();
646 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
647 if (tm) {
648 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
649 unsigned int seq;
650
651 do {
652 seq = read_seqbegin(&fastopen_seqlock);
653 if (tfom->mss)
654 *mss = tfom->mss;
655 *cookie = tfom->cookie;
Yuchung Chengaab48742012-07-19 06:43:10 +0000656 *syn_loss = tfom->syn_loss;
657 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000658 } while (read_seqretry(&fastopen_seqlock, seq));
659 }
660 rcu_read_unlock();
661}
662
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000663void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
Yuchung Chengaab48742012-07-19 06:43:10 +0000664 struct tcp_fastopen_cookie *cookie, bool syn_lost)
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000665{
Eric Dumazetdccf76c2013-11-13 15:00:46 -0800666 struct dst_entry *dst = __sk_dst_get(sk);
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000667 struct tcp_metrics_block *tm;
668
Eric Dumazetdccf76c2013-11-13 15:00:46 -0800669 if (!dst)
670 return;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000671 rcu_read_lock();
Eric Dumazetdccf76c2013-11-13 15:00:46 -0800672 tm = tcp_get_metrics(sk, dst, true);
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000673 if (tm) {
674 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
675
676 write_seqlock_bh(&fastopen_seqlock);
Yuchung Chengc9686012013-10-29 10:09:05 -0700677 if (mss)
678 tfom->mss = mss;
679 if (cookie && cookie->len > 0)
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000680 tfom->cookie = *cookie;
Yuchung Chengaab48742012-07-19 06:43:10 +0000681 if (syn_lost) {
682 ++tfom->syn_loss;
683 tfom->last_syn_loss = jiffies;
684 } else
685 tfom->syn_loss = 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000686 write_sequnlock_bh(&fastopen_seqlock);
687 }
688 rcu_read_unlock();
689}
690
Julian Anastasovd23ff702012-09-04 11:03:15 +0000691static struct genl_family tcp_metrics_nl_family = {
692 .id = GENL_ID_GENERATE,
693 .hdrsize = 0,
694 .name = TCP_METRICS_GENL_NAME,
695 .version = TCP_METRICS_GENL_VERSION,
696 .maxattr = TCP_METRICS_ATTR_MAX,
697 .netnsok = true,
698};
699
700static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
701 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
702 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
703 .len = sizeof(struct in6_addr), },
704 /* Following attributes are not received for GET/DEL,
705 * we keep them for reference
706 */
707#if 0
708 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
709 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
710 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
711 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
712 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
713 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
714 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
715 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
716 .len = TCP_FASTOPEN_COOKIE_MAX, },
717#endif
718};
719
720/* Add attributes, caller cancels its header on failure */
721static int tcp_metrics_fill_info(struct sk_buff *msg,
722 struct tcp_metrics_block *tm)
723{
724 struct nlattr *nest;
725 int i;
726
727 switch (tm->tcpm_addr.family) {
728 case AF_INET:
729 if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
730 tm->tcpm_addr.addr.a4) < 0)
731 goto nla_put_failure;
732 break;
733 case AF_INET6:
734 if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
735 tm->tcpm_addr.addr.a6) < 0)
736 goto nla_put_failure;
737 break;
738 default:
739 return -EAFNOSUPPORT;
740 }
741
742 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
743 jiffies - tm->tcpm_stamp) < 0)
744 goto nla_put_failure;
745 if (tm->tcpm_ts_stamp) {
746 if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
747 (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
748 goto nla_put_failure;
749 if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
750 tm->tcpm_ts) < 0)
751 goto nla_put_failure;
752 }
753
754 {
755 int n = 0;
756
757 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
758 if (!nest)
759 goto nla_put_failure;
760 for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
761 if (!tm->tcpm_vals[i])
762 continue;
763 if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
764 goto nla_put_failure;
765 n++;
766 }
767 if (n)
768 nla_nest_end(msg, nest);
769 else
770 nla_nest_cancel(msg, nest);
771 }
772
773 {
774 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
775 unsigned int seq;
776
777 do {
778 seq = read_seqbegin(&fastopen_seqlock);
779 tfom_copy[0] = tm->tcpm_fastopen;
780 } while (read_seqretry(&fastopen_seqlock, seq));
781
782 tfom = tfom_copy;
783 if (tfom->mss &&
784 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
785 tfom->mss) < 0)
786 goto nla_put_failure;
787 if (tfom->syn_loss &&
788 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
789 tfom->syn_loss) < 0 ||
790 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
791 jiffies - tfom->last_syn_loss) < 0))
792 goto nla_put_failure;
793 if (tfom->cookie.len > 0 &&
794 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
795 tfom->cookie.len, tfom->cookie.val) < 0)
796 goto nla_put_failure;
797 }
798
799 return 0;
800
801nla_put_failure:
802 return -EMSGSIZE;
803}
804
805static int tcp_metrics_dump_info(struct sk_buff *skb,
806 struct netlink_callback *cb,
807 struct tcp_metrics_block *tm)
808{
809 void *hdr;
810
Eric W. Biederman15e47302012-09-07 20:12:54 +0000811 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
Julian Anastasovd23ff702012-09-04 11:03:15 +0000812 &tcp_metrics_nl_family, NLM_F_MULTI,
813 TCP_METRICS_CMD_GET);
814 if (!hdr)
815 return -EMSGSIZE;
816
817 if (tcp_metrics_fill_info(skb, tm) < 0)
818 goto nla_put_failure;
819
820 return genlmsg_end(skb, hdr);
821
822nla_put_failure:
823 genlmsg_cancel(skb, hdr);
824 return -EMSGSIZE;
825}
826
827static int tcp_metrics_nl_dump(struct sk_buff *skb,
828 struct netlink_callback *cb)
829{
830 struct net *net = sock_net(skb->sk);
831 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
832 unsigned int row, s_row = cb->args[0];
833 int s_col = cb->args[1], col = s_col;
834
835 for (row = s_row; row < max_rows; row++, s_col = 0) {
836 struct tcp_metrics_block *tm;
837 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
838
839 rcu_read_lock();
840 for (col = 0, tm = rcu_dereference(hb->chain); tm;
841 tm = rcu_dereference(tm->tcpm_next), col++) {
842 if (col < s_col)
843 continue;
844 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
845 rcu_read_unlock();
846 goto done;
847 }
848 }
849 rcu_read_unlock();
850 }
851
852done:
853 cb->args[0] = row;
854 cb->args[1] = col;
855 return skb->len;
856}
857
858static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
859 unsigned int *hash, int optional)
860{
861 struct nlattr *a;
862
863 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4];
864 if (a) {
865 addr->family = AF_INET;
866 addr->addr.a4 = nla_get_be32(a);
867 *hash = (__force unsigned int) addr->addr.a4;
868 return 0;
869 }
870 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6];
871 if (a) {
Julian Anastasov2c42a3f2012-10-30 12:03:09 +0000872 if (nla_len(a) != sizeof(struct in6_addr))
Julian Anastasovd23ff702012-09-04 11:03:15 +0000873 return -EINVAL;
874 addr->family = AF_INET6;
875 memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
876 *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
877 return 0;
878 }
879 return optional ? 1 : -EAFNOSUPPORT;
880}
881
882static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
883{
884 struct tcp_metrics_block *tm;
885 struct inetpeer_addr addr;
886 unsigned int hash;
887 struct sk_buff *msg;
888 struct net *net = genl_info_net(info);
889 void *reply;
890 int ret;
891
892 ret = parse_nl_addr(info, &addr, &hash, 0);
893 if (ret < 0)
894 return ret;
895
896 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
897 if (!msg)
898 return -ENOMEM;
899
900 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
901 info->genlhdr->cmd);
902 if (!reply)
903 goto nla_put_failure;
904
905 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
906 ret = -ESRCH;
907 rcu_read_lock();
908 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
909 tm = rcu_dereference(tm->tcpm_next)) {
910 if (addr_same(&tm->tcpm_addr, &addr)) {
911 ret = tcp_metrics_fill_info(msg, tm);
912 break;
913 }
914 }
915 rcu_read_unlock();
916 if (ret < 0)
917 goto out_free;
918
919 genlmsg_end(msg, reply);
920 return genlmsg_reply(msg, info);
921
922nla_put_failure:
923 ret = -EMSGSIZE;
924
925out_free:
926 nlmsg_free(msg);
927 return ret;
928}
929
930#define deref_locked_genl(p) \
931 rcu_dereference_protected(p, lockdep_genl_is_held() && \
932 lockdep_is_held(&tcp_metrics_lock))
933
934#define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
935
936static int tcp_metrics_flush_all(struct net *net)
937{
938 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
939 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
940 struct tcp_metrics_block *tm;
941 unsigned int row;
942
943 for (row = 0; row < max_rows; row++, hb++) {
944 spin_lock_bh(&tcp_metrics_lock);
945 tm = deref_locked_genl(hb->chain);
946 if (tm)
947 hb->chain = NULL;
948 spin_unlock_bh(&tcp_metrics_lock);
949 while (tm) {
950 struct tcp_metrics_block *next;
951
952 next = deref_genl(tm->tcpm_next);
953 kfree_rcu(tm, rcu_head);
954 tm = next;
955 }
956 }
957 return 0;
958}
959
960static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
961{
962 struct tcpm_hash_bucket *hb;
963 struct tcp_metrics_block *tm;
964 struct tcp_metrics_block __rcu **pp;
965 struct inetpeer_addr addr;
966 unsigned int hash;
967 struct net *net = genl_info_net(info);
968 int ret;
969
970 ret = parse_nl_addr(info, &addr, &hash, 1);
971 if (ret < 0)
972 return ret;
973 if (ret > 0)
974 return tcp_metrics_flush_all(net);
975
976 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
977 hb = net->ipv4.tcp_metrics_hash + hash;
978 pp = &hb->chain;
979 spin_lock_bh(&tcp_metrics_lock);
980 for (tm = deref_locked_genl(*pp); tm;
981 pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) {
982 if (addr_same(&tm->tcpm_addr, &addr)) {
983 *pp = tm->tcpm_next;
984 break;
985 }
986 }
987 spin_unlock_bh(&tcp_metrics_lock);
988 if (!tm)
989 return -ESRCH;
990 kfree_rcu(tm, rcu_head);
991 return 0;
992}
993
Johannes Berg4534de82013-11-14 17:14:46 +0100994static const struct genl_ops tcp_metrics_nl_ops[] = {
Julian Anastasovd23ff702012-09-04 11:03:15 +0000995 {
996 .cmd = TCP_METRICS_CMD_GET,
997 .doit = tcp_metrics_nl_cmd_get,
998 .dumpit = tcp_metrics_nl_dump,
999 .policy = tcp_metrics_nl_policy,
1000 .flags = GENL_ADMIN_PERM,
1001 },
1002 {
1003 .cmd = TCP_METRICS_CMD_DEL,
1004 .doit = tcp_metrics_nl_cmd_del,
1005 .policy = tcp_metrics_nl_policy,
1006 .flags = GENL_ADMIN_PERM,
1007 },
1008};
1009
Eric Dumazet5815d5e2012-07-19 23:02:34 +00001010static unsigned int tcpmhash_entries;
David S. Miller51c5d0c2012-07-10 00:49:14 -07001011static int __init set_tcpmhash_entries(char *str)
1012{
1013 ssize_t ret;
1014
1015 if (!str)
1016 return 0;
1017
Eric Dumazet5815d5e2012-07-19 23:02:34 +00001018 ret = kstrtouint(str, 0, &tcpmhash_entries);
David S. Miller51c5d0c2012-07-10 00:49:14 -07001019 if (ret)
1020 return 0;
1021
1022 return 1;
1023}
1024__setup("tcpmhash_entries=", set_tcpmhash_entries);
1025
1026static int __net_init tcp_net_metrics_init(struct net *net)
1027{
Eric Dumazet5815d5e2012-07-19 23:02:34 +00001028 size_t size;
1029 unsigned int slots;
David S. Miller51c5d0c2012-07-10 00:49:14 -07001030
1031 slots = tcpmhash_entries;
1032 if (!slots) {
1033 if (totalram_pages >= 128 * 1024)
1034 slots = 16 * 1024;
1035 else
1036 slots = 8 * 1024;
1037 }
1038
Eric Dumazet5815d5e2012-07-19 23:02:34 +00001039 net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1040 size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
David S. Miller51c5d0c2012-07-10 00:49:14 -07001041
Eric Dumazet976a7022012-11-16 05:31:53 +00001042 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1043 if (!net->ipv4.tcp_metrics_hash)
1044 net->ipv4.tcp_metrics_hash = vzalloc(size);
1045
David S. Miller51c5d0c2012-07-10 00:49:14 -07001046 if (!net->ipv4.tcp_metrics_hash)
1047 return -ENOMEM;
1048
David S. Miller51c5d0c2012-07-10 00:49:14 -07001049 return 0;
1050}
1051
1052static void __net_exit tcp_net_metrics_exit(struct net *net)
1053{
Eric Dumazet36471012012-08-09 11:19:13 +02001054 unsigned int i;
1055
1056 for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
1057 struct tcp_metrics_block *tm, *next;
1058
1059 tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
1060 while (tm) {
1061 next = rcu_dereference_protected(tm->tcpm_next, 1);
1062 kfree(tm);
1063 tm = next;
1064 }
1065 }
Eric Dumazet976a7022012-11-16 05:31:53 +00001066 if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
1067 vfree(net->ipv4.tcp_metrics_hash);
1068 else
1069 kfree(net->ipv4.tcp_metrics_hash);
David S. Miller51c5d0c2012-07-10 00:49:14 -07001070}
1071
1072static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1073 .init = tcp_net_metrics_init,
1074 .exit = tcp_net_metrics_exit,
1075};
1076
1077void __init tcp_metrics_init(void)
1078{
Julian Anastasovd23ff702012-09-04 11:03:15 +00001079 int ret;
1080
1081 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1082 if (ret < 0)
1083 goto cleanup;
1084 ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
Johannes Bergc53ed742013-11-19 15:19:31 +01001085 tcp_metrics_nl_ops);
Julian Anastasovd23ff702012-09-04 11:03:15 +00001086 if (ret < 0)
1087 goto cleanup_subsys;
1088 return;
1089
1090cleanup_subsys:
1091 unregister_pernet_subsys(&tcp_net_metrics_ops);
1092
1093cleanup:
1094 return;
David S. Miller51c5d0c2012-07-10 00:49:14 -07001095}