blob: 273ed735cca2f036b53f62170d0ab39a24c1e617 [file] [log] [blame]
David S. Miller51c5d0c2012-07-10 00:49:14 -07001#include <linux/rcupdate.h>
2#include <linux/spinlock.h>
3#include <linux/jiffies.h>
David S. Millerab92bb22012-07-09 16:19:30 -07004#include <linux/module.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -07005#include <linux/cache.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -07006#include <linux/slab.h>
7#include <linux/init.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -07008#include <linux/tcp.h>
Eric Dumazet5815d5e2012-07-19 23:02:34 +00009#include <linux/hash.h>
Julian Anastasovd23ff702012-09-04 11:03:15 +000010#include <linux/tcp_metrics.h>
Eric Dumazet976a7022012-11-16 05:31:53 +000011#include <linux/vmalloc.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070012
13#include <net/inet_connection_sock.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -070014#include <net/net_namespace.h>
David S. Millerab92bb22012-07-09 16:19:30 -070015#include <net/request_sock.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -070016#include <net/inetpeer.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070017#include <net/sock.h>
David S. Miller51c5d0c2012-07-10 00:49:14 -070018#include <net/ipv6.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070019#include <net/dst.h>
20#include <net/tcp.h>
Julian Anastasovd23ff702012-09-04 11:03:15 +000021#include <net/genetlink.h>
David S. Miller4aabd8e2012-07-09 16:07:30 -070022
23int sysctl_tcp_nometrics_save __read_mostly;
24
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000025struct tcp_fastopen_metrics {
26 u16 mss;
Yuchung Chengaab48742012-07-19 06:43:10 +000027 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
28 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000029 struct tcp_fastopen_cookie cookie;
30};
31
David S. Miller51c5d0c2012-07-10 00:49:14 -070032struct tcp_metrics_block {
33 struct tcp_metrics_block __rcu *tcpm_next;
34 struct inetpeer_addr tcpm_addr;
35 unsigned long tcpm_stamp;
David S. Miller81166dd2012-07-10 03:14:24 -070036 u32 tcpm_ts;
37 u32 tcpm_ts_stamp;
David S. Miller51c5d0c2012-07-10 00:49:14 -070038 u32 tcpm_lock;
Julian Anastasovd23ff702012-09-04 11:03:15 +000039 u32 tcpm_vals[TCP_METRIC_MAX + 1];
Yuchung Cheng1fe4c482012-07-19 06:43:06 +000040 struct tcp_fastopen_metrics tcpm_fastopen;
Julian Anastasovd23ff702012-09-04 11:03:15 +000041
42 struct rcu_head rcu_head;
David S. Miller51c5d0c2012-07-10 00:49:14 -070043};
44
45static bool tcp_metric_locked(struct tcp_metrics_block *tm,
46 enum tcp_metric_index idx)
47{
48 return tm->tcpm_lock & (1 << idx);
49}
50
51static u32 tcp_metric_get(struct tcp_metrics_block *tm,
52 enum tcp_metric_index idx)
53{
54 return tm->tcpm_vals[idx];
55}
56
57static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
58 enum tcp_metric_index idx)
59{
60 return msecs_to_jiffies(tm->tcpm_vals[idx]);
61}
62
63static void tcp_metric_set(struct tcp_metrics_block *tm,
64 enum tcp_metric_index idx,
65 u32 val)
66{
67 tm->tcpm_vals[idx] = val;
68}
69
70static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
71 enum tcp_metric_index idx,
72 u32 val)
73{
74 tm->tcpm_vals[idx] = jiffies_to_msecs(val);
75}
76
77static bool addr_same(const struct inetpeer_addr *a,
78 const struct inetpeer_addr *b)
79{
80 const struct in6_addr *a6, *b6;
81
82 if (a->family != b->family)
83 return false;
84 if (a->family == AF_INET)
85 return a->addr.a4 == b->addr.a4;
86
87 a6 = (const struct in6_addr *) &a->addr.a6[0];
88 b6 = (const struct in6_addr *) &b->addr.a6[0];
89
90 return ipv6_addr_equal(a6, b6);
91}
92
93struct tcpm_hash_bucket {
94 struct tcp_metrics_block __rcu *chain;
95};
96
97static DEFINE_SPINLOCK(tcp_metrics_lock);
98
Eric Dumazetefeaa552013-05-03 19:12:45 +000099static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
100 bool fastopen_clear)
David S. Miller51c5d0c2012-07-10 00:49:14 -0700101{
102 u32 val;
103
Julian Anastasov9a0a9502012-07-23 10:46:38 +0300104 tm->tcpm_stamp = jiffies;
105
David S. Miller51c5d0c2012-07-10 00:49:14 -0700106 val = 0;
107 if (dst_metric_locked(dst, RTAX_RTT))
108 val |= 1 << TCP_METRIC_RTT;
109 if (dst_metric_locked(dst, RTAX_RTTVAR))
110 val |= 1 << TCP_METRIC_RTTVAR;
111 if (dst_metric_locked(dst, RTAX_SSTHRESH))
112 val |= 1 << TCP_METRIC_SSTHRESH;
113 if (dst_metric_locked(dst, RTAX_CWND))
114 val |= 1 << TCP_METRIC_CWND;
115 if (dst_metric_locked(dst, RTAX_REORDERING))
116 val |= 1 << TCP_METRIC_REORDERING;
117 tm->tcpm_lock = val;
118
119 tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
120 tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
121 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
122 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
123 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
David S. Miller81166dd2012-07-10 03:14:24 -0700124 tm->tcpm_ts = 0;
125 tm->tcpm_ts_stamp = 0;
Eric Dumazetefeaa552013-05-03 19:12:45 +0000126 if (fastopen_clear) {
127 tm->tcpm_fastopen.mss = 0;
128 tm->tcpm_fastopen.syn_loss = 0;
129 tm->tcpm_fastopen.cookie.len = 0;
130 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700131}
132
133static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
134 struct inetpeer_addr *addr,
135 unsigned int hash,
136 bool reclaim)
137{
138 struct tcp_metrics_block *tm;
139 struct net *net;
140
141 spin_lock_bh(&tcp_metrics_lock);
142 net = dev_net(dst->dev);
143 if (unlikely(reclaim)) {
144 struct tcp_metrics_block *oldest;
145
146 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
147 for (tm = rcu_dereference(oldest->tcpm_next); tm;
148 tm = rcu_dereference(tm->tcpm_next)) {
149 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
150 oldest = tm;
151 }
152 tm = oldest;
153 } else {
154 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
155 if (!tm)
156 goto out_unlock;
157 }
158 tm->tcpm_addr = *addr;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700159
Eric Dumazetefeaa552013-05-03 19:12:45 +0000160 tcpm_suck_dst(tm, dst, true);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700161
162 if (likely(!reclaim)) {
163 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
164 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
165 }
166
167out_unlock:
168 spin_unlock_bh(&tcp_metrics_lock);
169 return tm;
170}
171
172#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
173
174static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
175{
176 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
Eric Dumazetefeaa552013-05-03 19:12:45 +0000177 tcpm_suck_dst(tm, dst, false);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700178}
179
180#define TCP_METRICS_RECLAIM_DEPTH 5
181#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
182
183static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
184{
185 if (tm)
186 return tm;
187 if (depth > TCP_METRICS_RECLAIM_DEPTH)
188 return TCP_METRICS_RECLAIM_PTR;
189 return NULL;
190}
191
192static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
193 struct net *net, unsigned int hash)
194{
195 struct tcp_metrics_block *tm;
196 int depth = 0;
197
198 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
199 tm = rcu_dereference(tm->tcpm_next)) {
200 if (addr_same(&tm->tcpm_addr, addr))
201 break;
202 depth++;
203 }
204 return tcp_get_encode(tm, depth);
205}
206
207static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
208 struct dst_entry *dst)
209{
210 struct tcp_metrics_block *tm;
211 struct inetpeer_addr addr;
212 unsigned int hash;
213 struct net *net;
214
215 addr.family = req->rsk_ops->family;
216 switch (addr.family) {
217 case AF_INET:
218 addr.addr.a4 = inet_rsk(req)->rmt_addr;
219 hash = (__force unsigned int) addr.addr.a4;
220 break;
221 case AF_INET6:
222 *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
Eric Dumazetddbe5032012-07-18 08:11:12 +0000223 hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700224 break;
225 default:
226 return NULL;
227 }
228
David S. Miller51c5d0c2012-07-10 00:49:14 -0700229 net = dev_net(dst->dev);
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000230 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700231
232 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
233 tm = rcu_dereference(tm->tcpm_next)) {
234 if (addr_same(&tm->tcpm_addr, &addr))
235 break;
236 }
237 tcpm_check_stamp(tm, dst);
238 return tm;
239}
240
David S. Miller81166dd2012-07-10 03:14:24 -0700241static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
242{
243 struct inet6_timewait_sock *tw6;
244 struct tcp_metrics_block *tm;
245 struct inetpeer_addr addr;
246 unsigned int hash;
247 struct net *net;
248
249 addr.family = tw->tw_family;
250 switch (addr.family) {
251 case AF_INET:
252 addr.addr.a4 = tw->tw_daddr;
253 hash = (__force unsigned int) addr.addr.a4;
254 break;
255 case AF_INET6:
256 tw6 = inet6_twsk((struct sock *)tw);
257 *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
Eric Dumazetddbe5032012-07-18 08:11:12 +0000258 hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
David S. Miller81166dd2012-07-10 03:14:24 -0700259 break;
260 default:
261 return NULL;
262 }
263
David S. Miller81166dd2012-07-10 03:14:24 -0700264 net = twsk_net(tw);
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000265 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
David S. Miller81166dd2012-07-10 03:14:24 -0700266
267 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
268 tm = rcu_dereference(tm->tcpm_next)) {
269 if (addr_same(&tm->tcpm_addr, &addr))
270 break;
271 }
272 return tm;
273}
274
David S. Miller51c5d0c2012-07-10 00:49:14 -0700275static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
276 struct dst_entry *dst,
277 bool create)
278{
279 struct tcp_metrics_block *tm;
280 struct inetpeer_addr addr;
281 unsigned int hash;
282 struct net *net;
283 bool reclaim;
284
285 addr.family = sk->sk_family;
286 switch (addr.family) {
287 case AF_INET:
288 addr.addr.a4 = inet_sk(sk)->inet_daddr;
289 hash = (__force unsigned int) addr.addr.a4;
290 break;
291 case AF_INET6:
292 *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
Eric Dumazetddbe5032012-07-18 08:11:12 +0000293 hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700294 break;
295 default:
296 return NULL;
297 }
298
David S. Miller51c5d0c2012-07-10 00:49:14 -0700299 net = dev_net(dst->dev);
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000300 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700301
302 tm = __tcp_get_metrics(&addr, net, hash);
303 reclaim = false;
304 if (tm == TCP_METRICS_RECLAIM_PTR) {
305 reclaim = true;
306 tm = NULL;
307 }
308 if (!tm && create)
309 tm = tcpm_new(dst, &addr, hash, reclaim);
310 else
311 tcpm_check_stamp(tm, dst);
312
313 return tm;
314}
315
David S. Miller4aabd8e2012-07-09 16:07:30 -0700316/* Save metrics learned by this TCP session. This function is called
317 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
318 * or goes from LAST-ACK to CLOSE.
319 */
320void tcp_update_metrics(struct sock *sk)
321{
David S. Miller51c5d0c2012-07-10 00:49:14 -0700322 const struct inet_connection_sock *icsk = inet_csk(sk);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700323 struct dst_entry *dst = __sk_dst_get(sk);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700324 struct tcp_sock *tp = tcp_sk(sk);
325 struct tcp_metrics_block *tm;
326 unsigned long rtt;
327 u32 val;
328 int m;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700329
David S. Miller51c5d0c2012-07-10 00:49:14 -0700330 if (sysctl_tcp_nometrics_save || !dst)
David S. Miller4aabd8e2012-07-09 16:07:30 -0700331 return;
332
David S. Miller51c5d0c2012-07-10 00:49:14 -0700333 if (dst->flags & DST_HOST)
David S. Miller4aabd8e2012-07-09 16:07:30 -0700334 dst_confirm(dst);
335
David S. Miller51c5d0c2012-07-10 00:49:14 -0700336 rcu_read_lock();
337 if (icsk->icsk_backoff || !tp->srtt) {
338 /* This session failed to estimate rtt. Why?
339 * Probably, no packets returned in time. Reset our
340 * results.
David S. Miller4aabd8e2012-07-09 16:07:30 -0700341 */
David S. Miller51c5d0c2012-07-10 00:49:14 -0700342 tm = tcp_get_metrics(sk, dst, false);
343 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
344 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
345 goto out_unlock;
346 } else
347 tm = tcp_get_metrics(sk, dst, true);
348
349 if (!tm)
350 goto out_unlock;
351
352 rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
353 m = rtt - tp->srtt;
354
355 /* If newly calculated rtt larger than stored one, store new
356 * one. Otherwise, use EWMA. Remember, rtt overestimation is
357 * always better than underestimation.
358 */
359 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
360 if (m <= 0)
361 rtt = tp->srtt;
362 else
363 rtt -= (m >> 3);
364 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
365 }
366
367 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
368 unsigned long var;
369
370 if (m < 0)
371 m = -m;
372
373 /* Scale deviation to rttvar fixed point */
374 m >>= 1;
375 if (m < tp->mdev)
376 m = tp->mdev;
377
378 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
379 if (m >= var)
380 var = m;
381 else
382 var -= (var - m) >> 2;
383
384 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
385 }
386
387 if (tcp_in_initial_slowstart(tp)) {
388 /* Slow start still did not finish. */
389 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
390 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
391 if (val && (tp->snd_cwnd >> 1) > val)
392 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
393 tp->snd_cwnd >> 1);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700394 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700395 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
396 val = tcp_metric_get(tm, TCP_METRIC_CWND);
397 if (tp->snd_cwnd > val)
398 tcp_metric_set(tm, TCP_METRIC_CWND,
399 tp->snd_cwnd);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700400 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700401 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
402 icsk->icsk_ca_state == TCP_CA_Open) {
403 /* Cong. avoidance phase, cwnd is reliable. */
404 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
405 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
406 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
407 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
408 val = tcp_metric_get(tm, TCP_METRIC_CWND);
Alexander Duyck21008442012-07-11 17:18:04 -0700409 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700410 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700411 } else {
412 /* Else slow start did not finish, cwnd is non-sense,
413 * ssthresh may be also invalid.
414 */
415 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
416 val = tcp_metric_get(tm, TCP_METRIC_CWND);
417 tcp_metric_set(tm, TCP_METRIC_CWND,
418 (val + tp->snd_ssthresh) >> 1);
419 }
420 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
421 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
422 if (val && tp->snd_ssthresh > val)
423 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
424 tp->snd_ssthresh);
425 }
426 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
427 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
428 if (val < tp->reordering &&
David S. Miller4aabd8e2012-07-09 16:07:30 -0700429 tp->reordering != sysctl_tcp_reordering)
David S. Miller51c5d0c2012-07-10 00:49:14 -0700430 tcp_metric_set(tm, TCP_METRIC_REORDERING,
431 tp->reordering);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700432 }
433 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700434 tm->tcpm_stamp = jiffies;
435out_unlock:
436 rcu_read_unlock();
David S. Miller4aabd8e2012-07-09 16:07:30 -0700437}
438
439/* Initialize metrics on socket. */
440
441void tcp_init_metrics(struct sock *sk)
442{
David S. Miller4aabd8e2012-07-09 16:07:30 -0700443 struct dst_entry *dst = __sk_dst_get(sk);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700444 struct tcp_sock *tp = tcp_sk(sk);
445 struct tcp_metrics_block *tm;
Yuchung Cheng1b7fdd22013-08-30 08:35:53 -0700446 u32 val, crtt = 0; /* cached RTT scaled by 8 */
David S. Miller4aabd8e2012-07-09 16:07:30 -0700447
448 if (dst == NULL)
449 goto reset;
450
451 dst_confirm(dst);
452
David S. Miller51c5d0c2012-07-10 00:49:14 -0700453 rcu_read_lock();
454 tm = tcp_get_metrics(sk, dst, true);
455 if (!tm) {
456 rcu_read_unlock();
457 goto reset;
458 }
459
460 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
461 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
462
463 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
464 if (val) {
465 tp->snd_ssthresh = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700466 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
467 tp->snd_ssthresh = tp->snd_cwnd_clamp;
468 } else {
469 /* ssthresh may have been reduced unnecessarily during.
470 * 3WHS. Restore it back to its initial default.
471 */
472 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
473 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700474 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
475 if (val && tp->reordering != val) {
David S. Miller4aabd8e2012-07-09 16:07:30 -0700476 tcp_disable_fack(tp);
477 tcp_disable_early_retrans(tp);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700478 tp->reordering = val;
David S. Miller4aabd8e2012-07-09 16:07:30 -0700479 }
480
Yuchung Cheng1b7fdd22013-08-30 08:35:53 -0700481 crtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700482 rcu_read_unlock();
David S. Miller4aabd8e2012-07-09 16:07:30 -0700483reset:
Yuchung Cheng1b7fdd22013-08-30 08:35:53 -0700484 if (crtt > tp->srtt) {
485 /* Initial RTT (tp->srtt) from SYN usually don't measure
486 * serialization delay on low BW links well so RTO may be
487 * under-estimated. Stay conservative and seed RTO with
488 * the RTTs from past data exchanges, using the same seeding
489 * formula in tcp_rtt_estimator().
490 */
491 inet_csk(sk)->icsk_rto = crtt + max(crtt >> 2, tcp_rto_min(sk));
492 } else if (tp->srtt == 0) {
David S. Miller4aabd8e2012-07-09 16:07:30 -0700493 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
494 * 3WHS. This is most likely due to retransmission,
495 * including spurious one. Reset the RTO back to 3secs
496 * from the more aggressive 1sec to avoid more spurious
497 * retransmission.
498 */
499 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
500 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
501 }
502 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
503 * retransmitted. In light of RFC6298 more aggressive 1sec
504 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
505 * retransmission has occurred.
506 */
507 if (tp->total_retrans > 1)
508 tp->snd_cwnd = 1;
509 else
510 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
511 tp->snd_cwnd_stamp = tcp_time_stamp;
512}
David S. Millerab92bb22012-07-09 16:19:30 -0700513
David S. Miller81166dd2012-07-10 03:14:24 -0700514bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
David S. Millerab92bb22012-07-09 16:19:30 -0700515{
David S. Miller51c5d0c2012-07-10 00:49:14 -0700516 struct tcp_metrics_block *tm;
517 bool ret;
518
David S. Millerab92bb22012-07-09 16:19:30 -0700519 if (!dst)
520 return false;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700521
522 rcu_read_lock();
523 tm = __tcp_get_metrics_req(req, dst);
David S. Miller81166dd2012-07-10 03:14:24 -0700524 if (paws_check) {
525 if (tm &&
526 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
527 (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
528 ret = false;
529 else
530 ret = true;
531 } else {
532 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
533 ret = true;
534 else
535 ret = false;
536 }
David S. Miller51c5d0c2012-07-10 00:49:14 -0700537 rcu_read_unlock();
538
539 return ret;
David S. Millerab92bb22012-07-09 16:19:30 -0700540}
541EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700542
David S. Miller81166dd2012-07-10 03:14:24 -0700543void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
544{
545 struct tcp_metrics_block *tm;
546
547 rcu_read_lock();
548 tm = tcp_get_metrics(sk, dst, true);
549 if (tm) {
550 struct tcp_sock *tp = tcp_sk(sk);
551
552 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
553 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
554 tp->rx_opt.ts_recent = tm->tcpm_ts;
555 }
556 }
557 rcu_read_unlock();
558}
559EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
560
561/* VJ's idea. Save last timestamp seen from this destination and hold
562 * it at least for normal timewait interval to use for duplicate
563 * segment detection in subsequent connections, before they enter
564 * synchronized state.
565 */
566bool tcp_remember_stamp(struct sock *sk)
567{
568 struct dst_entry *dst = __sk_dst_get(sk);
569 bool ret = false;
570
571 if (dst) {
572 struct tcp_metrics_block *tm;
573
574 rcu_read_lock();
575 tm = tcp_get_metrics(sk, dst, true);
576 if (tm) {
577 struct tcp_sock *tp = tcp_sk(sk);
578
579 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
580 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
581 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
582 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
583 tm->tcpm_ts = tp->rx_opt.ts_recent;
584 }
585 ret = true;
586 }
587 rcu_read_unlock();
588 }
589 return ret;
590}
591
592bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
593{
594 struct tcp_metrics_block *tm;
595 bool ret = false;
596
597 rcu_read_lock();
598 tm = __tcp_get_metrics_tw(tw);
Julian Anastasov9a0a9502012-07-23 10:46:38 +0300599 if (tm) {
David S. Miller81166dd2012-07-10 03:14:24 -0700600 const struct tcp_timewait_sock *tcptw;
601 struct sock *sk = (struct sock *) tw;
602
603 tcptw = tcp_twsk(sk);
604 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
605 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
606 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
607 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
608 tm->tcpm_ts = tcptw->tw_ts_recent;
609 }
610 ret = true;
611 }
612 rcu_read_unlock();
613
614 return ret;
615}
616
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000617static DEFINE_SEQLOCK(fastopen_seqlock);
618
619void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
Yuchung Chengaab48742012-07-19 06:43:10 +0000620 struct tcp_fastopen_cookie *cookie,
621 int *syn_loss, unsigned long *last_syn_loss)
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000622{
623 struct tcp_metrics_block *tm;
624
625 rcu_read_lock();
626 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
627 if (tm) {
628 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
629 unsigned int seq;
630
631 do {
632 seq = read_seqbegin(&fastopen_seqlock);
633 if (tfom->mss)
634 *mss = tfom->mss;
635 *cookie = tfom->cookie;
Yuchung Chengaab48742012-07-19 06:43:10 +0000636 *syn_loss = tfom->syn_loss;
637 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000638 } while (read_seqretry(&fastopen_seqlock, seq));
639 }
640 rcu_read_unlock();
641}
642
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000643void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
Yuchung Chengaab48742012-07-19 06:43:10 +0000644 struct tcp_fastopen_cookie *cookie, bool syn_lost)
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000645{
646 struct tcp_metrics_block *tm;
647
648 rcu_read_lock();
649 tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
650 if (tm) {
651 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
652
653 write_seqlock_bh(&fastopen_seqlock);
654 tfom->mss = mss;
655 if (cookie->len > 0)
656 tfom->cookie = *cookie;
Yuchung Chengaab48742012-07-19 06:43:10 +0000657 if (syn_lost) {
658 ++tfom->syn_loss;
659 tfom->last_syn_loss = jiffies;
660 } else
661 tfom->syn_loss = 0;
Yuchung Cheng1fe4c482012-07-19 06:43:06 +0000662 write_sequnlock_bh(&fastopen_seqlock);
663 }
664 rcu_read_unlock();
665}
666
Julian Anastasovd23ff702012-09-04 11:03:15 +0000667static struct genl_family tcp_metrics_nl_family = {
668 .id = GENL_ID_GENERATE,
669 .hdrsize = 0,
670 .name = TCP_METRICS_GENL_NAME,
671 .version = TCP_METRICS_GENL_VERSION,
672 .maxattr = TCP_METRICS_ATTR_MAX,
673 .netnsok = true,
674};
675
676static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
677 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
678 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
679 .len = sizeof(struct in6_addr), },
680 /* Following attributes are not received for GET/DEL,
681 * we keep them for reference
682 */
683#if 0
684 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
685 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
686 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
687 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
688 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
689 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
690 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
691 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
692 .len = TCP_FASTOPEN_COOKIE_MAX, },
693#endif
694};
695
696/* Add attributes, caller cancels its header on failure */
697static int tcp_metrics_fill_info(struct sk_buff *msg,
698 struct tcp_metrics_block *tm)
699{
700 struct nlattr *nest;
701 int i;
702
703 switch (tm->tcpm_addr.family) {
704 case AF_INET:
705 if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
706 tm->tcpm_addr.addr.a4) < 0)
707 goto nla_put_failure;
708 break;
709 case AF_INET6:
710 if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
711 tm->tcpm_addr.addr.a6) < 0)
712 goto nla_put_failure;
713 break;
714 default:
715 return -EAFNOSUPPORT;
716 }
717
718 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
719 jiffies - tm->tcpm_stamp) < 0)
720 goto nla_put_failure;
721 if (tm->tcpm_ts_stamp) {
722 if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
723 (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
724 goto nla_put_failure;
725 if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
726 tm->tcpm_ts) < 0)
727 goto nla_put_failure;
728 }
729
730 {
731 int n = 0;
732
733 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
734 if (!nest)
735 goto nla_put_failure;
736 for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
737 if (!tm->tcpm_vals[i])
738 continue;
739 if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
740 goto nla_put_failure;
741 n++;
742 }
743 if (n)
744 nla_nest_end(msg, nest);
745 else
746 nla_nest_cancel(msg, nest);
747 }
748
749 {
750 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
751 unsigned int seq;
752
753 do {
754 seq = read_seqbegin(&fastopen_seqlock);
755 tfom_copy[0] = tm->tcpm_fastopen;
756 } while (read_seqretry(&fastopen_seqlock, seq));
757
758 tfom = tfom_copy;
759 if (tfom->mss &&
760 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
761 tfom->mss) < 0)
762 goto nla_put_failure;
763 if (tfom->syn_loss &&
764 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
765 tfom->syn_loss) < 0 ||
766 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
767 jiffies - tfom->last_syn_loss) < 0))
768 goto nla_put_failure;
769 if (tfom->cookie.len > 0 &&
770 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
771 tfom->cookie.len, tfom->cookie.val) < 0)
772 goto nla_put_failure;
773 }
774
775 return 0;
776
777nla_put_failure:
778 return -EMSGSIZE;
779}
780
781static int tcp_metrics_dump_info(struct sk_buff *skb,
782 struct netlink_callback *cb,
783 struct tcp_metrics_block *tm)
784{
785 void *hdr;
786
Eric W. Biederman15e47302012-09-07 20:12:54 +0000787 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
Julian Anastasovd23ff702012-09-04 11:03:15 +0000788 &tcp_metrics_nl_family, NLM_F_MULTI,
789 TCP_METRICS_CMD_GET);
790 if (!hdr)
791 return -EMSGSIZE;
792
793 if (tcp_metrics_fill_info(skb, tm) < 0)
794 goto nla_put_failure;
795
796 return genlmsg_end(skb, hdr);
797
798nla_put_failure:
799 genlmsg_cancel(skb, hdr);
800 return -EMSGSIZE;
801}
802
803static int tcp_metrics_nl_dump(struct sk_buff *skb,
804 struct netlink_callback *cb)
805{
806 struct net *net = sock_net(skb->sk);
807 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
808 unsigned int row, s_row = cb->args[0];
809 int s_col = cb->args[1], col = s_col;
810
811 for (row = s_row; row < max_rows; row++, s_col = 0) {
812 struct tcp_metrics_block *tm;
813 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
814
815 rcu_read_lock();
816 for (col = 0, tm = rcu_dereference(hb->chain); tm;
817 tm = rcu_dereference(tm->tcpm_next), col++) {
818 if (col < s_col)
819 continue;
820 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
821 rcu_read_unlock();
822 goto done;
823 }
824 }
825 rcu_read_unlock();
826 }
827
828done:
829 cb->args[0] = row;
830 cb->args[1] = col;
831 return skb->len;
832}
833
834static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
835 unsigned int *hash, int optional)
836{
837 struct nlattr *a;
838
839 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4];
840 if (a) {
841 addr->family = AF_INET;
842 addr->addr.a4 = nla_get_be32(a);
843 *hash = (__force unsigned int) addr->addr.a4;
844 return 0;
845 }
846 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6];
847 if (a) {
Julian Anastasov2c42a3f2012-10-30 12:03:09 +0000848 if (nla_len(a) != sizeof(struct in6_addr))
Julian Anastasovd23ff702012-09-04 11:03:15 +0000849 return -EINVAL;
850 addr->family = AF_INET6;
851 memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
852 *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
853 return 0;
854 }
855 return optional ? 1 : -EAFNOSUPPORT;
856}
857
858static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
859{
860 struct tcp_metrics_block *tm;
861 struct inetpeer_addr addr;
862 unsigned int hash;
863 struct sk_buff *msg;
864 struct net *net = genl_info_net(info);
865 void *reply;
866 int ret;
867
868 ret = parse_nl_addr(info, &addr, &hash, 0);
869 if (ret < 0)
870 return ret;
871
872 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
873 if (!msg)
874 return -ENOMEM;
875
876 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
877 info->genlhdr->cmd);
878 if (!reply)
879 goto nla_put_failure;
880
881 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
882 ret = -ESRCH;
883 rcu_read_lock();
884 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
885 tm = rcu_dereference(tm->tcpm_next)) {
886 if (addr_same(&tm->tcpm_addr, &addr)) {
887 ret = tcp_metrics_fill_info(msg, tm);
888 break;
889 }
890 }
891 rcu_read_unlock();
892 if (ret < 0)
893 goto out_free;
894
895 genlmsg_end(msg, reply);
896 return genlmsg_reply(msg, info);
897
898nla_put_failure:
899 ret = -EMSGSIZE;
900
901out_free:
902 nlmsg_free(msg);
903 return ret;
904}
905
906#define deref_locked_genl(p) \
907 rcu_dereference_protected(p, lockdep_genl_is_held() && \
908 lockdep_is_held(&tcp_metrics_lock))
909
910#define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
911
912static int tcp_metrics_flush_all(struct net *net)
913{
914 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
915 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
916 struct tcp_metrics_block *tm;
917 unsigned int row;
918
919 for (row = 0; row < max_rows; row++, hb++) {
920 spin_lock_bh(&tcp_metrics_lock);
921 tm = deref_locked_genl(hb->chain);
922 if (tm)
923 hb->chain = NULL;
924 spin_unlock_bh(&tcp_metrics_lock);
925 while (tm) {
926 struct tcp_metrics_block *next;
927
928 next = deref_genl(tm->tcpm_next);
929 kfree_rcu(tm, rcu_head);
930 tm = next;
931 }
932 }
933 return 0;
934}
935
936static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
937{
938 struct tcpm_hash_bucket *hb;
939 struct tcp_metrics_block *tm;
940 struct tcp_metrics_block __rcu **pp;
941 struct inetpeer_addr addr;
942 unsigned int hash;
943 struct net *net = genl_info_net(info);
944 int ret;
945
946 ret = parse_nl_addr(info, &addr, &hash, 1);
947 if (ret < 0)
948 return ret;
949 if (ret > 0)
950 return tcp_metrics_flush_all(net);
951
952 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
953 hb = net->ipv4.tcp_metrics_hash + hash;
954 pp = &hb->chain;
955 spin_lock_bh(&tcp_metrics_lock);
956 for (tm = deref_locked_genl(*pp); tm;
957 pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) {
958 if (addr_same(&tm->tcpm_addr, &addr)) {
959 *pp = tm->tcpm_next;
960 break;
961 }
962 }
963 spin_unlock_bh(&tcp_metrics_lock);
964 if (!tm)
965 return -ESRCH;
966 kfree_rcu(tm, rcu_head);
967 return 0;
968}
969
970static struct genl_ops tcp_metrics_nl_ops[] = {
971 {
972 .cmd = TCP_METRICS_CMD_GET,
973 .doit = tcp_metrics_nl_cmd_get,
974 .dumpit = tcp_metrics_nl_dump,
975 .policy = tcp_metrics_nl_policy,
976 .flags = GENL_ADMIN_PERM,
977 },
978 {
979 .cmd = TCP_METRICS_CMD_DEL,
980 .doit = tcp_metrics_nl_cmd_del,
981 .policy = tcp_metrics_nl_policy,
982 .flags = GENL_ADMIN_PERM,
983 },
984};
985
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000986static unsigned int tcpmhash_entries;
David S. Miller51c5d0c2012-07-10 00:49:14 -0700987static int __init set_tcpmhash_entries(char *str)
988{
989 ssize_t ret;
990
991 if (!str)
992 return 0;
993
Eric Dumazet5815d5e2012-07-19 23:02:34 +0000994 ret = kstrtouint(str, 0, &tcpmhash_entries);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700995 if (ret)
996 return 0;
997
998 return 1;
999}
1000__setup("tcpmhash_entries=", set_tcpmhash_entries);
1001
1002static int __net_init tcp_net_metrics_init(struct net *net)
1003{
Eric Dumazet5815d5e2012-07-19 23:02:34 +00001004 size_t size;
1005 unsigned int slots;
David S. Miller51c5d0c2012-07-10 00:49:14 -07001006
1007 slots = tcpmhash_entries;
1008 if (!slots) {
1009 if (totalram_pages >= 128 * 1024)
1010 slots = 16 * 1024;
1011 else
1012 slots = 8 * 1024;
1013 }
1014
Eric Dumazet5815d5e2012-07-19 23:02:34 +00001015 net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1016 size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
David S. Miller51c5d0c2012-07-10 00:49:14 -07001017
Eric Dumazet976a7022012-11-16 05:31:53 +00001018 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1019 if (!net->ipv4.tcp_metrics_hash)
1020 net->ipv4.tcp_metrics_hash = vzalloc(size);
1021
David S. Miller51c5d0c2012-07-10 00:49:14 -07001022 if (!net->ipv4.tcp_metrics_hash)
1023 return -ENOMEM;
1024
David S. Miller51c5d0c2012-07-10 00:49:14 -07001025 return 0;
1026}
1027
1028static void __net_exit tcp_net_metrics_exit(struct net *net)
1029{
Eric Dumazet36471012012-08-09 11:19:13 +02001030 unsigned int i;
1031
1032 for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
1033 struct tcp_metrics_block *tm, *next;
1034
1035 tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
1036 while (tm) {
1037 next = rcu_dereference_protected(tm->tcpm_next, 1);
1038 kfree(tm);
1039 tm = next;
1040 }
1041 }
Eric Dumazet976a7022012-11-16 05:31:53 +00001042 if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
1043 vfree(net->ipv4.tcp_metrics_hash);
1044 else
1045 kfree(net->ipv4.tcp_metrics_hash);
David S. Miller51c5d0c2012-07-10 00:49:14 -07001046}
1047
1048static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1049 .init = tcp_net_metrics_init,
1050 .exit = tcp_net_metrics_exit,
1051};
1052
1053void __init tcp_metrics_init(void)
1054{
Julian Anastasovd23ff702012-09-04 11:03:15 +00001055 int ret;
1056
1057 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1058 if (ret < 0)
1059 goto cleanup;
1060 ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
1061 tcp_metrics_nl_ops,
1062 ARRAY_SIZE(tcp_metrics_nl_ops));
1063 if (ret < 0)
1064 goto cleanup_subsys;
1065 return;
1066
1067cleanup_subsys:
1068 unregister_pernet_subsys(&tcp_net_metrics_ops);
1069
1070cleanup:
1071 return;
David S. Miller51c5d0c2012-07-10 00:49:14 -07001072}