| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 1 | #include <linux/rcupdate.h> | 
 | 2 | #include <linux/spinlock.h> | 
 | 3 | #include <linux/jiffies.h> | 
| David S. Miller | ab92bb2 | 2012-07-09 16:19:30 -0700 | [diff] [blame] | 4 | #include <linux/module.h> | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 5 | #include <linux/cache.h> | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 6 | #include <linux/slab.h> | 
 | 7 | #include <linux/init.h> | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 8 | #include <linux/tcp.h> | 
| Eric Dumazet | 5815d5e | 2012-07-19 23:02:34 +0000 | [diff] [blame] | 9 | #include <linux/hash.h> | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 10 | #include <linux/tcp_metrics.h> | 
| Eric Dumazet | 976a702 | 2012-11-16 05:31:53 +0000 | [diff] [blame] | 11 | #include <linux/vmalloc.h> | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 12 |  | 
 | 13 | #include <net/inet_connection_sock.h> | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 14 | #include <net/net_namespace.h> | 
| David S. Miller | ab92bb2 | 2012-07-09 16:19:30 -0700 | [diff] [blame] | 15 | #include <net/request_sock.h> | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 16 | #include <net/inetpeer.h> | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 17 | #include <net/sock.h> | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 18 | #include <net/ipv6.h> | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 19 | #include <net/dst.h> | 
 | 20 | #include <net/tcp.h> | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 21 | #include <net/genetlink.h> | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 22 |  | 
 | 23 | int sysctl_tcp_nometrics_save __read_mostly; | 
 | 24 |  | 
| David S. Miller | 4180442 | 2014-01-18 00:55:41 -0800 | [diff] [blame] | 25 | static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr, | 
 | 26 | 						   const struct inetpeer_addr *daddr, | 
| Christoph Paasch | 77f99ad | 2014-01-16 20:01:21 +0100 | [diff] [blame] | 27 | 						   struct net *net, unsigned int hash); | 
 | 28 |  | 
| Yuchung Cheng | 1fe4c48 | 2012-07-19 06:43:06 +0000 | [diff] [blame] | 29 | struct tcp_fastopen_metrics { | 
 | 30 | 	u16	mss; | 
| Daniel Lee | 2646c83 | 2015-04-06 14:37:27 -0700 | [diff] [blame] | 31 | 	u16	syn_loss:10,		/* Recurring Fast Open SYN losses */ | 
 | 32 | 		try_exp:2;		/* Request w/ exp. option (once) */ | 
| Yuchung Cheng | aab4874 | 2012-07-19 06:43:10 +0000 | [diff] [blame] | 33 | 	unsigned long	last_syn_loss;	/* Last Fast Open SYN loss */ | 
| Yuchung Cheng | 1fe4c48 | 2012-07-19 06:43:06 +0000 | [diff] [blame] | 34 | 	struct	tcp_fastopen_cookie	cookie; | 
 | 35 | }; | 
 | 36 |  | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 37 | /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility | 
 | 38 |  * Kernel only stores RTT and RTTVAR in usec resolution | 
 | 39 |  */ | 
 | 40 | #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2) | 
 | 41 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 42 | struct tcp_metrics_block { | 
 | 43 | 	struct tcp_metrics_block __rcu	*tcpm_next; | 
| Eric W. Biederman | 849e8a0 | 2015-03-13 00:05:52 -0500 | [diff] [blame] | 44 | 	possible_net_t			tcpm_net; | 
| Christoph Paasch | a544302 | 2014-01-08 16:05:56 +0100 | [diff] [blame] | 45 | 	struct inetpeer_addr		tcpm_saddr; | 
| Christoph Paasch | 324fd55 | 2014-01-08 16:05:55 +0100 | [diff] [blame] | 46 | 	struct inetpeer_addr		tcpm_daddr; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 47 | 	unsigned long			tcpm_stamp; | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 48 | 	u32				tcpm_ts; | 
 | 49 | 	u32				tcpm_ts_stamp; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 50 | 	u32				tcpm_lock; | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 51 | 	u32				tcpm_vals[TCP_METRIC_MAX_KERNEL + 1]; | 
| Yuchung Cheng | 1fe4c48 | 2012-07-19 06:43:06 +0000 | [diff] [blame] | 52 | 	struct tcp_fastopen_metrics	tcpm_fastopen; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 53 |  | 
 | 54 | 	struct rcu_head			rcu_head; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 55 | }; | 
 | 56 |  | 
| Eric W. Biederman | 849e8a0 | 2015-03-13 00:05:52 -0500 | [diff] [blame] | 57 | static inline struct net *tm_net(struct tcp_metrics_block *tm) | 
 | 58 | { | 
 | 59 | 	return read_pnet(&tm->tcpm_net); | 
 | 60 | } | 
 | 61 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 62 | static bool tcp_metric_locked(struct tcp_metrics_block *tm, | 
 | 63 | 			      enum tcp_metric_index idx) | 
 | 64 | { | 
 | 65 | 	return tm->tcpm_lock & (1 << idx); | 
 | 66 | } | 
 | 67 |  | 
 | 68 | static u32 tcp_metric_get(struct tcp_metrics_block *tm, | 
 | 69 | 			  enum tcp_metric_index idx) | 
 | 70 | { | 
 | 71 | 	return tm->tcpm_vals[idx]; | 
 | 72 | } | 
 | 73 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 74 | static void tcp_metric_set(struct tcp_metrics_block *tm, | 
 | 75 | 			   enum tcp_metric_index idx, | 
 | 76 | 			   u32 val) | 
 | 77 | { | 
 | 78 | 	tm->tcpm_vals[idx] = val; | 
 | 79 | } | 
 | 80 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 81 | static bool addr_same(const struct inetpeer_addr *a, | 
 | 82 | 		      const struct inetpeer_addr *b) | 
 | 83 | { | 
| David Ahern | d39d14f | 2015-08-27 16:07:01 -0700 | [diff] [blame^] | 84 | 	return inetpeer_addr_cmp(a, b) == 0; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 85 | } | 
 | 86 |  | 
 | 87 | struct tcpm_hash_bucket { | 
 | 88 | 	struct tcp_metrics_block __rcu	*chain; | 
 | 89 | }; | 
 | 90 |  | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 91 | static struct tcpm_hash_bucket	*tcp_metrics_hash __read_mostly; | 
 | 92 | static unsigned int		tcp_metrics_hash_log __read_mostly; | 
 | 93 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 94 | static DEFINE_SPINLOCK(tcp_metrics_lock); | 
 | 95 |  | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 96 | static void tcpm_suck_dst(struct tcp_metrics_block *tm, | 
 | 97 | 			  const struct dst_entry *dst, | 
| Eric Dumazet | efeaa55 | 2013-05-03 19:12:45 +0000 | [diff] [blame] | 98 | 			  bool fastopen_clear) | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 99 | { | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 100 | 	u32 msval; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 101 | 	u32 val; | 
 | 102 |  | 
| Julian Anastasov | 9a0a950 | 2012-07-23 10:46:38 +0300 | [diff] [blame] | 103 | 	tm->tcpm_stamp = jiffies; | 
 | 104 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 105 | 	val = 0; | 
 | 106 | 	if (dst_metric_locked(dst, RTAX_RTT)) | 
 | 107 | 		val |= 1 << TCP_METRIC_RTT; | 
 | 108 | 	if (dst_metric_locked(dst, RTAX_RTTVAR)) | 
 | 109 | 		val |= 1 << TCP_METRIC_RTTVAR; | 
 | 110 | 	if (dst_metric_locked(dst, RTAX_SSTHRESH)) | 
 | 111 | 		val |= 1 << TCP_METRIC_SSTHRESH; | 
 | 112 | 	if (dst_metric_locked(dst, RTAX_CWND)) | 
 | 113 | 		val |= 1 << TCP_METRIC_CWND; | 
 | 114 | 	if (dst_metric_locked(dst, RTAX_REORDERING)) | 
 | 115 | 		val |= 1 << TCP_METRIC_REORDERING; | 
 | 116 | 	tm->tcpm_lock = val; | 
 | 117 |  | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 118 | 	msval = dst_metric_raw(dst, RTAX_RTT); | 
 | 119 | 	tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC; | 
 | 120 |  | 
 | 121 | 	msval = dst_metric_raw(dst, RTAX_RTTVAR); | 
 | 122 | 	tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 123 | 	tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH); | 
 | 124 | 	tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND); | 
 | 125 | 	tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING); | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 126 | 	tm->tcpm_ts = 0; | 
 | 127 | 	tm->tcpm_ts_stamp = 0; | 
| Eric Dumazet | efeaa55 | 2013-05-03 19:12:45 +0000 | [diff] [blame] | 128 | 	if (fastopen_clear) { | 
 | 129 | 		tm->tcpm_fastopen.mss = 0; | 
 | 130 | 		tm->tcpm_fastopen.syn_loss = 0; | 
| Daniel Lee | 2646c83 | 2015-04-06 14:37:27 -0700 | [diff] [blame] | 131 | 		tm->tcpm_fastopen.try_exp = 0; | 
 | 132 | 		tm->tcpm_fastopen.cookie.exp = false; | 
| Eric Dumazet | efeaa55 | 2013-05-03 19:12:45 +0000 | [diff] [blame] | 133 | 		tm->tcpm_fastopen.cookie.len = 0; | 
 | 134 | 	} | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 135 | } | 
 | 136 |  | 
| Christoph Paasch | 77f99ad | 2014-01-16 20:01:21 +0100 | [diff] [blame] | 137 | #define TCP_METRICS_TIMEOUT		(60 * 60 * HZ) | 
 | 138 |  | 
 | 139 | static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst) | 
 | 140 | { | 
 | 141 | 	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT))) | 
 | 142 | 		tcpm_suck_dst(tm, dst, false); | 
 | 143 | } | 
 | 144 |  | 
 | 145 | #define TCP_METRICS_RECLAIM_DEPTH	5 | 
 | 146 | #define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL | 
 | 147 |  | 
| Eric Dumazet | 9f1ab18 | 2015-03-16 07:14:34 -0700 | [diff] [blame] | 148 | #define deref_locked(p)	\ | 
 | 149 | 	rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock)) | 
 | 150 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 151 | static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, | 
| Christoph Paasch | a544302 | 2014-01-08 16:05:56 +0100 | [diff] [blame] | 152 | 					  struct inetpeer_addr *saddr, | 
| Christoph Paasch | 324fd55 | 2014-01-08 16:05:55 +0100 | [diff] [blame] | 153 | 					  struct inetpeer_addr *daddr, | 
| Christoph Paasch | 77f99ad | 2014-01-16 20:01:21 +0100 | [diff] [blame] | 154 | 					  unsigned int hash) | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 155 | { | 
 | 156 | 	struct tcp_metrics_block *tm; | 
 | 157 | 	struct net *net; | 
| Christoph Paasch | 77f99ad | 2014-01-16 20:01:21 +0100 | [diff] [blame] | 158 | 	bool reclaim = false; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 159 |  | 
 | 160 | 	spin_lock_bh(&tcp_metrics_lock); | 
 | 161 | 	net = dev_net(dst->dev); | 
| Christoph Paasch | 77f99ad | 2014-01-16 20:01:21 +0100 | [diff] [blame] | 162 |  | 
 | 163 | 	/* While waiting for the spin-lock the cache might have been populated | 
 | 164 | 	 * with this entry and so we have to check again. | 
 | 165 | 	 */ | 
| David S. Miller | 4180442 | 2014-01-18 00:55:41 -0800 | [diff] [blame] | 166 | 	tm = __tcp_get_metrics(saddr, daddr, net, hash); | 
| Christoph Paasch | 77f99ad | 2014-01-16 20:01:21 +0100 | [diff] [blame] | 167 | 	if (tm == TCP_METRICS_RECLAIM_PTR) { | 
 | 168 | 		reclaim = true; | 
 | 169 | 		tm = NULL; | 
 | 170 | 	} | 
 | 171 | 	if (tm) { | 
 | 172 | 		tcpm_check_stamp(tm, dst); | 
 | 173 | 		goto out_unlock; | 
 | 174 | 	} | 
 | 175 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 176 | 	if (unlikely(reclaim)) { | 
 | 177 | 		struct tcp_metrics_block *oldest; | 
 | 178 |  | 
| Eric Dumazet | 9f1ab18 | 2015-03-16 07:14:34 -0700 | [diff] [blame] | 179 | 		oldest = deref_locked(tcp_metrics_hash[hash].chain); | 
 | 180 | 		for (tm = deref_locked(oldest->tcpm_next); tm; | 
 | 181 | 		     tm = deref_locked(tm->tcpm_next)) { | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 182 | 			if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp)) | 
 | 183 | 				oldest = tm; | 
 | 184 | 		} | 
 | 185 | 		tm = oldest; | 
 | 186 | 	} else { | 
 | 187 | 		tm = kmalloc(sizeof(*tm), GFP_ATOMIC); | 
 | 188 | 		if (!tm) | 
 | 189 | 			goto out_unlock; | 
 | 190 | 	} | 
| Eric W. Biederman | 849e8a0 | 2015-03-13 00:05:52 -0500 | [diff] [blame] | 191 | 	write_pnet(&tm->tcpm_net, net); | 
| Christoph Paasch | a544302 | 2014-01-08 16:05:56 +0100 | [diff] [blame] | 192 | 	tm->tcpm_saddr = *saddr; | 
| Christoph Paasch | 324fd55 | 2014-01-08 16:05:55 +0100 | [diff] [blame] | 193 | 	tm->tcpm_daddr = *daddr; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 194 |  | 
| Eric Dumazet | efeaa55 | 2013-05-03 19:12:45 +0000 | [diff] [blame] | 195 | 	tcpm_suck_dst(tm, dst, true); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 196 |  | 
 | 197 | 	if (likely(!reclaim)) { | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 198 | 		tm->tcpm_next = tcp_metrics_hash[hash].chain; | 
 | 199 | 		rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 200 | 	} | 
 | 201 |  | 
 | 202 | out_unlock: | 
 | 203 | 	spin_unlock_bh(&tcp_metrics_lock); | 
 | 204 | 	return tm; | 
 | 205 | } | 
 | 206 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 207 | static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth) | 
 | 208 | { | 
 | 209 | 	if (tm) | 
 | 210 | 		return tm; | 
 | 211 | 	if (depth > TCP_METRICS_RECLAIM_DEPTH) | 
 | 212 | 		return TCP_METRICS_RECLAIM_PTR; | 
 | 213 | 	return NULL; | 
 | 214 | } | 
 | 215 |  | 
| Christoph Paasch | a544302 | 2014-01-08 16:05:56 +0100 | [diff] [blame] | 216 | static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr, | 
 | 217 | 						   const struct inetpeer_addr *daddr, | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 218 | 						   struct net *net, unsigned int hash) | 
 | 219 | { | 
 | 220 | 	struct tcp_metrics_block *tm; | 
 | 221 | 	int depth = 0; | 
 | 222 |  | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 223 | 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 224 | 	     tm = rcu_dereference(tm->tcpm_next)) { | 
| Christoph Paasch | a544302 | 2014-01-08 16:05:56 +0100 | [diff] [blame] | 225 | 		if (addr_same(&tm->tcpm_saddr, saddr) && | 
| Eric W. Biederman | 849e8a0 | 2015-03-13 00:05:52 -0500 | [diff] [blame] | 226 | 		    addr_same(&tm->tcpm_daddr, daddr) && | 
 | 227 | 		    net_eq(tm_net(tm), net)) | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 228 | 			break; | 
 | 229 | 		depth++; | 
 | 230 | 	} | 
 | 231 | 	return tcp_get_encode(tm, depth); | 
 | 232 | } | 
 | 233 |  | 
 | 234 | static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req, | 
 | 235 | 						       struct dst_entry *dst) | 
 | 236 | { | 
 | 237 | 	struct tcp_metrics_block *tm; | 
| Christoph Paasch | a544302 | 2014-01-08 16:05:56 +0100 | [diff] [blame] | 238 | 	struct inetpeer_addr saddr, daddr; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 239 | 	unsigned int hash; | 
 | 240 | 	struct net *net; | 
 | 241 |  | 
| Christoph Paasch | a544302 | 2014-01-08 16:05:56 +0100 | [diff] [blame] | 242 | 	saddr.family = req->rsk_ops->family; | 
| Christoph Paasch | 324fd55 | 2014-01-08 16:05:55 +0100 | [diff] [blame] | 243 | 	daddr.family = req->rsk_ops->family; | 
 | 244 | 	switch (daddr.family) { | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 245 | 	case AF_INET: | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 246 | 		inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr); | 
 | 247 | 		inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr); | 
| David Ahern | 72afa35 | 2015-08-27 16:06:59 -0700 | [diff] [blame] | 248 | 		hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 249 | 		break; | 
| Eric Dumazet | 634fb979 | 2013-10-09 15:21:29 -0700 | [diff] [blame] | 250 | #if IS_ENABLED(CONFIG_IPV6) | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 251 | 	case AF_INET6: | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 252 | 		inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr); | 
 | 253 | 		inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr); | 
| Eric Dumazet | 634fb979 | 2013-10-09 15:21:29 -0700 | [diff] [blame] | 254 | 		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 255 | 		break; | 
| Eric Dumazet | 634fb979 | 2013-10-09 15:21:29 -0700 | [diff] [blame] | 256 | #endif | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 257 | 	default: | 
 | 258 | 		return NULL; | 
 | 259 | 	} | 
 | 260 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 261 | 	net = dev_net(dst->dev); | 
| Eric W. Biederman | 3e5da62 | 2015-03-13 00:05:24 -0500 | [diff] [blame] | 262 | 	hash ^= net_hash_mix(net); | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 263 | 	hash = hash_32(hash, tcp_metrics_hash_log); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 264 |  | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 265 | 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 266 | 	     tm = rcu_dereference(tm->tcpm_next)) { | 
| Christoph Paasch | a544302 | 2014-01-08 16:05:56 +0100 | [diff] [blame] | 267 | 		if (addr_same(&tm->tcpm_saddr, &saddr) && | 
| Eric W. Biederman | 849e8a0 | 2015-03-13 00:05:52 -0500 | [diff] [blame] | 268 | 		    addr_same(&tm->tcpm_daddr, &daddr) && | 
 | 269 | 		    net_eq(tm_net(tm), net)) | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 270 | 			break; | 
 | 271 | 	} | 
 | 272 | 	tcpm_check_stamp(tm, dst); | 
 | 273 | 	return tm; | 
 | 274 | } | 
 | 275 |  | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 276 | static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw) | 
 | 277 | { | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 278 | 	struct tcp_metrics_block *tm; | 
| Christoph Paasch | a544302 | 2014-01-08 16:05:56 +0100 | [diff] [blame] | 279 | 	struct inetpeer_addr saddr, daddr; | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 280 | 	unsigned int hash; | 
 | 281 | 	struct net *net; | 
 | 282 |  | 
| Christoph Paasch | 3ad88cf | 2014-01-22 13:58:44 +0100 | [diff] [blame] | 283 | 	if (tw->tw_family == AF_INET) { | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 284 | 		inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr); | 
 | 285 | 		inetpeer_set_addr_v4(&daddr, tw->tw_daddr); | 
| David Ahern | 72afa35 | 2015-08-27 16:06:59 -0700 | [diff] [blame] | 286 | 		hash = ipv4_addr_hash(tw->tw_daddr); | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 287 | 	} | 
| Christoph Paasch | 3ad88cf | 2014-01-22 13:58:44 +0100 | [diff] [blame] | 288 | #if IS_ENABLED(CONFIG_IPV6) | 
 | 289 | 	else if (tw->tw_family == AF_INET6) { | 
 | 290 | 		if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) { | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 291 | 			inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr); | 
 | 292 | 			inetpeer_set_addr_v4(&daddr, tw->tw_daddr); | 
| David Ahern | 72afa35 | 2015-08-27 16:06:59 -0700 | [diff] [blame] | 293 | 			hash = ipv4_addr_hash(tw->tw_daddr); | 
| Christoph Paasch | 3ad88cf | 2014-01-22 13:58:44 +0100 | [diff] [blame] | 294 | 		} else { | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 295 | 			inetpeer_set_addr_v6(&saddr, &tw->tw_v6_rcv_saddr); | 
 | 296 | 			inetpeer_set_addr_v6(&daddr, &tw->tw_v6_daddr); | 
| Christoph Paasch | 3ad88cf | 2014-01-22 13:58:44 +0100 | [diff] [blame] | 297 | 			hash = ipv6_addr_hash(&tw->tw_v6_daddr); | 
 | 298 | 		} | 
 | 299 | 	} | 
 | 300 | #endif | 
 | 301 | 	else | 
 | 302 | 		return NULL; | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 303 |  | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 304 | 	net = twsk_net(tw); | 
| Eric W. Biederman | 3e5da62 | 2015-03-13 00:05:24 -0500 | [diff] [blame] | 305 | 	hash ^= net_hash_mix(net); | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 306 | 	hash = hash_32(hash, tcp_metrics_hash_log); | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 307 |  | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 308 | 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 309 | 	     tm = rcu_dereference(tm->tcpm_next)) { | 
| Christoph Paasch | a544302 | 2014-01-08 16:05:56 +0100 | [diff] [blame] | 310 | 		if (addr_same(&tm->tcpm_saddr, &saddr) && | 
| Eric W. Biederman | 849e8a0 | 2015-03-13 00:05:52 -0500 | [diff] [blame] | 311 | 		    addr_same(&tm->tcpm_daddr, &daddr) && | 
 | 312 | 		    net_eq(tm_net(tm), net)) | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 313 | 			break; | 
 | 314 | 	} | 
 | 315 | 	return tm; | 
 | 316 | } | 
 | 317 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 318 | static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, | 
 | 319 | 						 struct dst_entry *dst, | 
 | 320 | 						 bool create) | 
 | 321 | { | 
 | 322 | 	struct tcp_metrics_block *tm; | 
| Christoph Paasch | a544302 | 2014-01-08 16:05:56 +0100 | [diff] [blame] | 323 | 	struct inetpeer_addr saddr, daddr; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 324 | 	unsigned int hash; | 
 | 325 | 	struct net *net; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 326 |  | 
| Christoph Paasch | 3ad88cf | 2014-01-22 13:58:44 +0100 | [diff] [blame] | 327 | 	if (sk->sk_family == AF_INET) { | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 328 | 		inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr); | 
 | 329 | 		inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr); | 
| David Ahern | 72afa35 | 2015-08-27 16:06:59 -0700 | [diff] [blame] | 330 | 		hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 331 | 	} | 
| Christoph Paasch | 3ad88cf | 2014-01-22 13:58:44 +0100 | [diff] [blame] | 332 | #if IS_ENABLED(CONFIG_IPV6) | 
 | 333 | 	else if (sk->sk_family == AF_INET6) { | 
 | 334 | 		if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) { | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 335 | 			inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr); | 
 | 336 | 			inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr); | 
| David Ahern | 72afa35 | 2015-08-27 16:06:59 -0700 | [diff] [blame] | 337 | 			hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr); | 
| Christoph Paasch | 3ad88cf | 2014-01-22 13:58:44 +0100 | [diff] [blame] | 338 | 		} else { | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 339 | 			inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr); | 
 | 340 | 			inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr); | 
| Christoph Paasch | 3ad88cf | 2014-01-22 13:58:44 +0100 | [diff] [blame] | 341 | 			hash = ipv6_addr_hash(&sk->sk_v6_daddr); | 
 | 342 | 		} | 
 | 343 | 	} | 
 | 344 | #endif | 
 | 345 | 	else | 
 | 346 | 		return NULL; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 347 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 348 | 	net = dev_net(dst->dev); | 
| Eric W. Biederman | 3e5da62 | 2015-03-13 00:05:24 -0500 | [diff] [blame] | 349 | 	hash ^= net_hash_mix(net); | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 350 | 	hash = hash_32(hash, tcp_metrics_hash_log); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 351 |  | 
| Christoph Paasch | a544302 | 2014-01-08 16:05:56 +0100 | [diff] [blame] | 352 | 	tm = __tcp_get_metrics(&saddr, &daddr, net, hash); | 
| Christoph Paasch | 77f99ad | 2014-01-16 20:01:21 +0100 | [diff] [blame] | 353 | 	if (tm == TCP_METRICS_RECLAIM_PTR) | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 354 | 		tm = NULL; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 355 | 	if (!tm && create) | 
| David S. Miller | 4180442 | 2014-01-18 00:55:41 -0800 | [diff] [blame] | 356 | 		tm = tcpm_new(dst, &saddr, &daddr, hash); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 357 | 	else | 
 | 358 | 		tcpm_check_stamp(tm, dst); | 
 | 359 |  | 
 | 360 | 	return tm; | 
 | 361 | } | 
 | 362 |  | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 363 | /* Save metrics learned by this TCP session.  This function is called | 
 | 364 |  * only, when TCP finishes successfully i.e. when it enters TIME-WAIT | 
 | 365 |  * or goes from LAST-ACK to CLOSE. | 
 | 366 |  */ | 
 | 367 | void tcp_update_metrics(struct sock *sk) | 
 | 368 | { | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 369 | 	const struct inet_connection_sock *icsk = inet_csk(sk); | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 370 | 	struct dst_entry *dst = __sk_dst_get(sk); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 371 | 	struct tcp_sock *tp = tcp_sk(sk); | 
 | 372 | 	struct tcp_metrics_block *tm; | 
 | 373 | 	unsigned long rtt; | 
 | 374 | 	u32 val; | 
 | 375 | 	int m; | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 376 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 377 | 	if (sysctl_tcp_nometrics_save || !dst) | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 378 | 		return; | 
 | 379 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 380 | 	if (dst->flags & DST_HOST) | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 381 | 		dst_confirm(dst); | 
 | 382 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 383 | 	rcu_read_lock(); | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 384 | 	if (icsk->icsk_backoff || !tp->srtt_us) { | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 385 | 		/* This session failed to estimate rtt. Why? | 
 | 386 | 		 * Probably, no packets returned in time.  Reset our | 
 | 387 | 		 * results. | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 388 | 		 */ | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 389 | 		tm = tcp_get_metrics(sk, dst, false); | 
 | 390 | 		if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT)) | 
 | 391 | 			tcp_metric_set(tm, TCP_METRIC_RTT, 0); | 
 | 392 | 		goto out_unlock; | 
 | 393 | 	} else | 
 | 394 | 		tm = tcp_get_metrics(sk, dst, true); | 
 | 395 |  | 
 | 396 | 	if (!tm) | 
 | 397 | 		goto out_unlock; | 
 | 398 |  | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 399 | 	rtt = tcp_metric_get(tm, TCP_METRIC_RTT); | 
 | 400 | 	m = rtt - tp->srtt_us; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 401 |  | 
 | 402 | 	/* If newly calculated rtt larger than stored one, store new | 
 | 403 | 	 * one. Otherwise, use EWMA. Remember, rtt overestimation is | 
 | 404 | 	 * always better than underestimation. | 
 | 405 | 	 */ | 
 | 406 | 	if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) { | 
 | 407 | 		if (m <= 0) | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 408 | 			rtt = tp->srtt_us; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 409 | 		else | 
 | 410 | 			rtt -= (m >> 3); | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 411 | 		tcp_metric_set(tm, TCP_METRIC_RTT, rtt); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 412 | 	} | 
 | 413 |  | 
 | 414 | 	if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) { | 
 | 415 | 		unsigned long var; | 
 | 416 |  | 
 | 417 | 		if (m < 0) | 
 | 418 | 			m = -m; | 
 | 419 |  | 
 | 420 | 		/* Scale deviation to rttvar fixed point */ | 
 | 421 | 		m >>= 1; | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 422 | 		if (m < tp->mdev_us) | 
 | 423 | 			m = tp->mdev_us; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 424 |  | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 425 | 		var = tcp_metric_get(tm, TCP_METRIC_RTTVAR); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 426 | 		if (m >= var) | 
 | 427 | 			var = m; | 
 | 428 | 		else | 
 | 429 | 			var -= (var - m) >> 2; | 
 | 430 |  | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 431 | 		tcp_metric_set(tm, TCP_METRIC_RTTVAR, var); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 432 | 	} | 
 | 433 |  | 
 | 434 | 	if (tcp_in_initial_slowstart(tp)) { | 
 | 435 | 		/* Slow start still did not finish. */ | 
 | 436 | 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { | 
 | 437 | 			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); | 
 | 438 | 			if (val && (tp->snd_cwnd >> 1) > val) | 
 | 439 | 				tcp_metric_set(tm, TCP_METRIC_SSTHRESH, | 
 | 440 | 					       tp->snd_cwnd >> 1); | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 441 | 		} | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 442 | 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { | 
 | 443 | 			val = tcp_metric_get(tm, TCP_METRIC_CWND); | 
 | 444 | 			if (tp->snd_cwnd > val) | 
 | 445 | 				tcp_metric_set(tm, TCP_METRIC_CWND, | 
 | 446 | 					       tp->snd_cwnd); | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 447 | 		} | 
| Yuchung Cheng | 071d508 | 2015-07-09 13:16:29 -0700 | [diff] [blame] | 448 | 	} else if (!tcp_in_slow_start(tp) && | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 449 | 		   icsk->icsk_ca_state == TCP_CA_Open) { | 
 | 450 | 		/* Cong. avoidance phase, cwnd is reliable. */ | 
 | 451 | 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) | 
 | 452 | 			tcp_metric_set(tm, TCP_METRIC_SSTHRESH, | 
 | 453 | 				       max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); | 
 | 454 | 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { | 
 | 455 | 			val = tcp_metric_get(tm, TCP_METRIC_CWND); | 
| Alexander Duyck | 2100844 | 2012-07-11 17:18:04 -0700 | [diff] [blame] | 456 | 			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1); | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 457 | 		} | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 458 | 	} else { | 
 | 459 | 		/* Else slow start did not finish, cwnd is non-sense, | 
 | 460 | 		 * ssthresh may be also invalid. | 
 | 461 | 		 */ | 
 | 462 | 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { | 
 | 463 | 			val = tcp_metric_get(tm, TCP_METRIC_CWND); | 
 | 464 | 			tcp_metric_set(tm, TCP_METRIC_CWND, | 
 | 465 | 				       (val + tp->snd_ssthresh) >> 1); | 
 | 466 | 		} | 
 | 467 | 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { | 
 | 468 | 			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); | 
 | 469 | 			if (val && tp->snd_ssthresh > val) | 
 | 470 | 				tcp_metric_set(tm, TCP_METRIC_SSTHRESH, | 
 | 471 | 					       tp->snd_ssthresh); | 
 | 472 | 		} | 
 | 473 | 		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) { | 
 | 474 | 			val = tcp_metric_get(tm, TCP_METRIC_REORDERING); | 
 | 475 | 			if (val < tp->reordering && | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 476 | 			    tp->reordering != sysctl_tcp_reordering) | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 477 | 				tcp_metric_set(tm, TCP_METRIC_REORDERING, | 
 | 478 | 					       tp->reordering); | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 479 | 		} | 
 | 480 | 	} | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 481 | 	tm->tcpm_stamp = jiffies; | 
 | 482 | out_unlock: | 
 | 483 | 	rcu_read_unlock(); | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 484 | } | 
 | 485 |  | 
 | 486 | /* Initialize metrics on socket. */ | 
 | 487 |  | 
 | 488 | void tcp_init_metrics(struct sock *sk) | 
 | 489 | { | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 490 | 	struct dst_entry *dst = __sk_dst_get(sk); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 491 | 	struct tcp_sock *tp = tcp_sk(sk); | 
 | 492 | 	struct tcp_metrics_block *tm; | 
| Yuchung Cheng | 1b7fdd2 | 2013-08-30 08:35:53 -0700 | [diff] [blame] | 493 | 	u32 val, crtt = 0; /* cached RTT scaled by 8 */ | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 494 |  | 
| Ian Morris | 51456b2 | 2015-04-03 09:17:26 +0100 | [diff] [blame] | 495 | 	if (!dst) | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 496 | 		goto reset; | 
 | 497 |  | 
 | 498 | 	dst_confirm(dst); | 
 | 499 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 500 | 	rcu_read_lock(); | 
 | 501 | 	tm = tcp_get_metrics(sk, dst, true); | 
 | 502 | 	if (!tm) { | 
 | 503 | 		rcu_read_unlock(); | 
 | 504 | 		goto reset; | 
 | 505 | 	} | 
 | 506 |  | 
 | 507 | 	if (tcp_metric_locked(tm, TCP_METRIC_CWND)) | 
 | 508 | 		tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND); | 
 | 509 |  | 
 | 510 | 	val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); | 
 | 511 | 	if (val) { | 
 | 512 | 		tp->snd_ssthresh = val; | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 513 | 		if (tp->snd_ssthresh > tp->snd_cwnd_clamp) | 
 | 514 | 			tp->snd_ssthresh = tp->snd_cwnd_clamp; | 
 | 515 | 	} else { | 
 | 516 | 		/* ssthresh may have been reduced unnecessarily during. | 
 | 517 | 		 * 3WHS. Restore it back to its initial default. | 
 | 518 | 		 */ | 
 | 519 | 		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | 
 | 520 | 	} | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 521 | 	val = tcp_metric_get(tm, TCP_METRIC_REORDERING); | 
 | 522 | 	if (val && tp->reordering != val) { | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 523 | 		tcp_disable_fack(tp); | 
 | 524 | 		tcp_disable_early_retrans(tp); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 525 | 		tp->reordering = val; | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 526 | 	} | 
 | 527 |  | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 528 | 	crtt = tcp_metric_get(tm, TCP_METRIC_RTT); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 529 | 	rcu_read_unlock(); | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 530 | reset: | 
| Yuchung Cheng | 52f20e6 | 2013-09-03 14:14:35 -0700 | [diff] [blame] | 531 | 	/* The initial RTT measurement from the SYN/SYN-ACK is not ideal | 
 | 532 | 	 * to seed the RTO for later data packets because SYN packets are | 
 | 533 | 	 * small. Use the per-dst cached values to seed the RTO but keep | 
 | 534 | 	 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar). | 
 | 535 | 	 * Later the RTO will be updated immediately upon obtaining the first | 
 | 536 | 	 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only | 
 | 537 | 	 * influences the first RTO but not later RTT estimation. | 
 | 538 | 	 * | 
 | 539 | 	 * But if RTT is not available from the SYN (due to retransmits or | 
 | 540 | 	 * syn cookies) or the cache, force a conservative 3secs timeout. | 
 | 541 | 	 * | 
 | 542 | 	 * A bit of theory. RTT is time passed after "normal" sized packet | 
 | 543 | 	 * is sent until it is ACKed. In normal circumstances sending small | 
 | 544 | 	 * packets force peer to delay ACKs and calculation is correct too. | 
 | 545 | 	 * The algorithm is adaptive and, provided we follow specs, it | 
 | 546 | 	 * NEVER underestimate RTT. BUT! If peer tries to make some clever | 
 | 547 | 	 * tricks sort of "quick acks" for time long enough to decrease RTT | 
 | 548 | 	 * to low value, and then abruptly stops to do it and starts to delay | 
 | 549 | 	 * ACKs, wait for troubles. | 
 | 550 | 	 */ | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 551 | 	if (crtt > tp->srtt_us) { | 
| Neal Cardwell | 269aa75 | 2013-09-16 21:44:20 -0400 | [diff] [blame] | 552 | 		/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */ | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 553 | 		crtt /= 8 * USEC_PER_MSEC; | 
| Neal Cardwell | 269aa75 | 2013-09-16 21:44:20 -0400 | [diff] [blame] | 554 | 		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk)); | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 555 | 	} else if (tp->srtt_us == 0) { | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 556 | 		/* RFC6298: 5.7 We've failed to get a valid RTT sample from | 
 | 557 | 		 * 3WHS. This is most likely due to retransmission, | 
 | 558 | 		 * including spurious one. Reset the RTO back to 3secs | 
 | 559 | 		 * from the more aggressive 1sec to avoid more spurious | 
 | 560 | 		 * retransmission. | 
 | 561 | 		 */ | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 562 | 		tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK); | 
 | 563 | 		tp->mdev_us = tp->mdev_max_us = tp->rttvar_us; | 
 | 564 |  | 
| David S. Miller | 4aabd8e | 2012-07-09 16:07:30 -0700 | [diff] [blame] | 565 | 		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; | 
 | 566 | 	} | 
 | 567 | 	/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been | 
 | 568 | 	 * retransmitted. In light of RFC6298 more aggressive 1sec | 
 | 569 | 	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK | 
 | 570 | 	 * retransmission has occurred. | 
 | 571 | 	 */ | 
 | 572 | 	if (tp->total_retrans > 1) | 
 | 573 | 		tp->snd_cwnd = 1; | 
 | 574 | 	else | 
 | 575 | 		tp->snd_cwnd = tcp_init_cwnd(tp, dst); | 
 | 576 | 	tp->snd_cwnd_stamp = tcp_time_stamp; | 
 | 577 | } | 
| David S. Miller | ab92bb2 | 2012-07-09 16:19:30 -0700 | [diff] [blame] | 578 |  | 
| Hannes Frederic Sowa | a26552a | 2014-08-14 22:06:12 +0200 | [diff] [blame] | 579 | bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, | 
 | 580 | 			bool paws_check, bool timestamps) | 
| David S. Miller | ab92bb2 | 2012-07-09 16:19:30 -0700 | [diff] [blame] | 581 | { | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 582 | 	struct tcp_metrics_block *tm; | 
 | 583 | 	bool ret; | 
 | 584 |  | 
| David S. Miller | ab92bb2 | 2012-07-09 16:19:30 -0700 | [diff] [blame] | 585 | 	if (!dst) | 
 | 586 | 		return false; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 587 |  | 
 | 588 | 	rcu_read_lock(); | 
 | 589 | 	tm = __tcp_get_metrics_req(req, dst); | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 590 | 	if (paws_check) { | 
 | 591 | 		if (tm && | 
 | 592 | 		    (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL && | 
| Hannes Frederic Sowa | a26552a | 2014-08-14 22:06:12 +0200 | [diff] [blame] | 593 | 		    ((s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW || | 
 | 594 | 		     !timestamps)) | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 595 | 			ret = false; | 
 | 596 | 		else | 
 | 597 | 			ret = true; | 
 | 598 | 	} else { | 
 | 599 | 		if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp) | 
 | 600 | 			ret = true; | 
 | 601 | 		else | 
 | 602 | 			ret = false; | 
 | 603 | 	} | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 604 | 	rcu_read_unlock(); | 
 | 605 |  | 
 | 606 | 	return ret; | 
| David S. Miller | ab92bb2 | 2012-07-09 16:19:30 -0700 | [diff] [blame] | 607 | } | 
 | 608 | EXPORT_SYMBOL_GPL(tcp_peer_is_proven); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 609 |  | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 610 | void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst) | 
 | 611 | { | 
 | 612 | 	struct tcp_metrics_block *tm; | 
 | 613 |  | 
 | 614 | 	rcu_read_lock(); | 
 | 615 | 	tm = tcp_get_metrics(sk, dst, true); | 
 | 616 | 	if (tm) { | 
 | 617 | 		struct tcp_sock *tp = tcp_sk(sk); | 
 | 618 |  | 
 | 619 | 		if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) { | 
 | 620 | 			tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp; | 
 | 621 | 			tp->rx_opt.ts_recent = tm->tcpm_ts; | 
 | 622 | 		} | 
 | 623 | 	} | 
 | 624 | 	rcu_read_unlock(); | 
 | 625 | } | 
 | 626 | EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp); | 
 | 627 |  | 
 | 628 | /* VJ's idea. Save last timestamp seen from this destination and hold | 
 | 629 |  * it at least for normal timewait interval to use for duplicate | 
 | 630 |  * segment detection in subsequent connections, before they enter | 
 | 631 |  * synchronized state. | 
 | 632 |  */ | 
 | 633 | bool tcp_remember_stamp(struct sock *sk) | 
 | 634 | { | 
 | 635 | 	struct dst_entry *dst = __sk_dst_get(sk); | 
 | 636 | 	bool ret = false; | 
 | 637 |  | 
 | 638 | 	if (dst) { | 
 | 639 | 		struct tcp_metrics_block *tm; | 
 | 640 |  | 
 | 641 | 		rcu_read_lock(); | 
 | 642 | 		tm = tcp_get_metrics(sk, dst, true); | 
 | 643 | 		if (tm) { | 
 | 644 | 			struct tcp_sock *tp = tcp_sk(sk); | 
 | 645 |  | 
 | 646 | 			if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 || | 
 | 647 | 			    ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL && | 
 | 648 | 			     tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) { | 
 | 649 | 				tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp; | 
 | 650 | 				tm->tcpm_ts = tp->rx_opt.ts_recent; | 
 | 651 | 			} | 
 | 652 | 			ret = true; | 
 | 653 | 		} | 
 | 654 | 		rcu_read_unlock(); | 
 | 655 | 	} | 
 | 656 | 	return ret; | 
 | 657 | } | 
 | 658 |  | 
 | 659 | bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw) | 
 | 660 | { | 
 | 661 | 	struct tcp_metrics_block *tm; | 
 | 662 | 	bool ret = false; | 
 | 663 |  | 
 | 664 | 	rcu_read_lock(); | 
 | 665 | 	tm = __tcp_get_metrics_tw(tw); | 
| Julian Anastasov | 9a0a950 | 2012-07-23 10:46:38 +0300 | [diff] [blame] | 666 | 	if (tm) { | 
| David S. Miller | 81166dd | 2012-07-10 03:14:24 -0700 | [diff] [blame] | 667 | 		const struct tcp_timewait_sock *tcptw; | 
 | 668 | 		struct sock *sk = (struct sock *) tw; | 
 | 669 |  | 
 | 670 | 		tcptw = tcp_twsk(sk); | 
 | 671 | 		if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 || | 
 | 672 | 		    ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL && | 
 | 673 | 		     tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) { | 
 | 674 | 			tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp; | 
 | 675 | 			tm->tcpm_ts	   = tcptw->tw_ts_recent; | 
 | 676 | 		} | 
 | 677 | 		ret = true; | 
 | 678 | 	} | 
 | 679 | 	rcu_read_unlock(); | 
 | 680 |  | 
 | 681 | 	return ret; | 
 | 682 | } | 
 | 683 |  | 
| Yuchung Cheng | 1fe4c48 | 2012-07-19 06:43:06 +0000 | [diff] [blame] | 684 | static DEFINE_SEQLOCK(fastopen_seqlock); | 
 | 685 |  | 
 | 686 | void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, | 
| Yuchung Cheng | aab4874 | 2012-07-19 06:43:10 +0000 | [diff] [blame] | 687 | 			    struct tcp_fastopen_cookie *cookie, | 
 | 688 | 			    int *syn_loss, unsigned long *last_syn_loss) | 
| Yuchung Cheng | 1fe4c48 | 2012-07-19 06:43:06 +0000 | [diff] [blame] | 689 | { | 
 | 690 | 	struct tcp_metrics_block *tm; | 
 | 691 |  | 
 | 692 | 	rcu_read_lock(); | 
 | 693 | 	tm = tcp_get_metrics(sk, __sk_dst_get(sk), false); | 
 | 694 | 	if (tm) { | 
 | 695 | 		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen; | 
 | 696 | 		unsigned int seq; | 
 | 697 |  | 
 | 698 | 		do { | 
 | 699 | 			seq = read_seqbegin(&fastopen_seqlock); | 
 | 700 | 			if (tfom->mss) | 
 | 701 | 				*mss = tfom->mss; | 
 | 702 | 			*cookie = tfom->cookie; | 
| Daniel Lee | 2646c83 | 2015-04-06 14:37:27 -0700 | [diff] [blame] | 703 | 			if (cookie->len <= 0 && tfom->try_exp == 1) | 
 | 704 | 				cookie->exp = true; | 
| Yuchung Cheng | aab4874 | 2012-07-19 06:43:10 +0000 | [diff] [blame] | 705 | 			*syn_loss = tfom->syn_loss; | 
 | 706 | 			*last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0; | 
| Yuchung Cheng | 1fe4c48 | 2012-07-19 06:43:06 +0000 | [diff] [blame] | 707 | 		} while (read_seqretry(&fastopen_seqlock, seq)); | 
 | 708 | 	} | 
 | 709 | 	rcu_read_unlock(); | 
 | 710 | } | 
 | 711 |  | 
| Yuchung Cheng | 1fe4c48 | 2012-07-19 06:43:06 +0000 | [diff] [blame] | 712 | void tcp_fastopen_cache_set(struct sock *sk, u16 mss, | 
| Daniel Lee | 2646c83 | 2015-04-06 14:37:27 -0700 | [diff] [blame] | 713 | 			    struct tcp_fastopen_cookie *cookie, bool syn_lost, | 
 | 714 | 			    u16 try_exp) | 
| Yuchung Cheng | 1fe4c48 | 2012-07-19 06:43:06 +0000 | [diff] [blame] | 715 | { | 
| Eric Dumazet | dccf76c | 2013-11-13 15:00:46 -0800 | [diff] [blame] | 716 | 	struct dst_entry *dst = __sk_dst_get(sk); | 
| Yuchung Cheng | 1fe4c48 | 2012-07-19 06:43:06 +0000 | [diff] [blame] | 717 | 	struct tcp_metrics_block *tm; | 
 | 718 |  | 
| Eric Dumazet | dccf76c | 2013-11-13 15:00:46 -0800 | [diff] [blame] | 719 | 	if (!dst) | 
 | 720 | 		return; | 
| Yuchung Cheng | 1fe4c48 | 2012-07-19 06:43:06 +0000 | [diff] [blame] | 721 | 	rcu_read_lock(); | 
| Eric Dumazet | dccf76c | 2013-11-13 15:00:46 -0800 | [diff] [blame] | 722 | 	tm = tcp_get_metrics(sk, dst, true); | 
| Yuchung Cheng | 1fe4c48 | 2012-07-19 06:43:06 +0000 | [diff] [blame] | 723 | 	if (tm) { | 
 | 724 | 		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen; | 
 | 725 |  | 
 | 726 | 		write_seqlock_bh(&fastopen_seqlock); | 
| Yuchung Cheng | c968601 | 2013-10-29 10:09:05 -0700 | [diff] [blame] | 727 | 		if (mss) | 
 | 728 | 			tfom->mss = mss; | 
 | 729 | 		if (cookie && cookie->len > 0) | 
| Yuchung Cheng | 1fe4c48 | 2012-07-19 06:43:06 +0000 | [diff] [blame] | 730 | 			tfom->cookie = *cookie; | 
| Daniel Lee | 2646c83 | 2015-04-06 14:37:27 -0700 | [diff] [blame] | 731 | 		else if (try_exp > tfom->try_exp && | 
 | 732 | 			 tfom->cookie.len <= 0 && !tfom->cookie.exp) | 
 | 733 | 			tfom->try_exp = try_exp; | 
| Yuchung Cheng | aab4874 | 2012-07-19 06:43:10 +0000 | [diff] [blame] | 734 | 		if (syn_lost) { | 
 | 735 | 			++tfom->syn_loss; | 
 | 736 | 			tfom->last_syn_loss = jiffies; | 
 | 737 | 		} else | 
 | 738 | 			tfom->syn_loss = 0; | 
| Yuchung Cheng | 1fe4c48 | 2012-07-19 06:43:06 +0000 | [diff] [blame] | 739 | 		write_sequnlock_bh(&fastopen_seqlock); | 
 | 740 | 	} | 
 | 741 | 	rcu_read_unlock(); | 
 | 742 | } | 
 | 743 |  | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 744 | static struct genl_family tcp_metrics_nl_family = { | 
 | 745 | 	.id		= GENL_ID_GENERATE, | 
 | 746 | 	.hdrsize	= 0, | 
 | 747 | 	.name		= TCP_METRICS_GENL_NAME, | 
 | 748 | 	.version	= TCP_METRICS_GENL_VERSION, | 
 | 749 | 	.maxattr	= TCP_METRICS_ATTR_MAX, | 
 | 750 | 	.netnsok	= true, | 
 | 751 | }; | 
 | 752 |  | 
 | 753 | static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = { | 
 | 754 | 	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, }, | 
 | 755 | 	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY, | 
 | 756 | 					    .len = sizeof(struct in6_addr), }, | 
 | 757 | 	/* Following attributes are not received for GET/DEL, | 
 | 758 | 	 * we keep them for reference | 
 | 759 | 	 */ | 
 | 760 | #if 0 | 
 | 761 | 	[TCP_METRICS_ATTR_AGE]		= { .type = NLA_MSECS, }, | 
 | 762 | 	[TCP_METRICS_ATTR_TW_TSVAL]	= { .type = NLA_U32, }, | 
 | 763 | 	[TCP_METRICS_ATTR_TW_TS_STAMP]	= { .type = NLA_S32, }, | 
 | 764 | 	[TCP_METRICS_ATTR_VALS]		= { .type = NLA_NESTED, }, | 
 | 765 | 	[TCP_METRICS_ATTR_FOPEN_MSS]	= { .type = NLA_U16, }, | 
 | 766 | 	[TCP_METRICS_ATTR_FOPEN_SYN_DROPS]	= { .type = NLA_U16, }, | 
 | 767 | 	[TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]	= { .type = NLA_MSECS, }, | 
 | 768 | 	[TCP_METRICS_ATTR_FOPEN_COOKIE]	= { .type = NLA_BINARY, | 
 | 769 | 					    .len = TCP_FASTOPEN_COOKIE_MAX, }, | 
 | 770 | #endif | 
 | 771 | }; | 
 | 772 |  | 
 | 773 | /* Add attributes, caller cancels its header on failure */ | 
 | 774 | static int tcp_metrics_fill_info(struct sk_buff *msg, | 
 | 775 | 				 struct tcp_metrics_block *tm) | 
 | 776 | { | 
 | 777 | 	struct nlattr *nest; | 
 | 778 | 	int i; | 
 | 779 |  | 
| Christoph Paasch | 324fd55 | 2014-01-08 16:05:55 +0100 | [diff] [blame] | 780 | 	switch (tm->tcpm_daddr.family) { | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 781 | 	case AF_INET: | 
| Jiri Benc | 930345e | 2015-03-29 16:59:25 +0200 | [diff] [blame] | 782 | 		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4, | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 783 | 				    inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0) | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 784 | 			goto nla_put_failure; | 
| Jiri Benc | 930345e | 2015-03-29 16:59:25 +0200 | [diff] [blame] | 785 | 		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4, | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 786 | 				    inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0) | 
| Christoph Paasch | 8a59359 | 2014-01-08 16:05:57 +0100 | [diff] [blame] | 787 | 			goto nla_put_failure; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 788 | 		break; | 
 | 789 | 	case AF_INET6: | 
| Jiri Benc | 930345e | 2015-03-29 16:59:25 +0200 | [diff] [blame] | 790 | 		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6, | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 791 | 				     inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0) | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 792 | 			goto nla_put_failure; | 
| Jiri Benc | 930345e | 2015-03-29 16:59:25 +0200 | [diff] [blame] | 793 | 		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6, | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 794 | 				     inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0) | 
| Christoph Paasch | 8a59359 | 2014-01-08 16:05:57 +0100 | [diff] [blame] | 795 | 			goto nla_put_failure; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 796 | 		break; | 
 | 797 | 	default: | 
 | 798 | 		return -EAFNOSUPPORT; | 
 | 799 | 	} | 
 | 800 |  | 
 | 801 | 	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE, | 
 | 802 | 			  jiffies - tm->tcpm_stamp) < 0) | 
 | 803 | 		goto nla_put_failure; | 
 | 804 | 	if (tm->tcpm_ts_stamp) { | 
 | 805 | 		if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP, | 
 | 806 | 				(s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0) | 
 | 807 | 			goto nla_put_failure; | 
 | 808 | 		if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL, | 
 | 809 | 				tm->tcpm_ts) < 0) | 
 | 810 | 			goto nla_put_failure; | 
 | 811 | 	} | 
 | 812 |  | 
 | 813 | 	{ | 
 | 814 | 		int n = 0; | 
 | 815 |  | 
 | 816 | 		nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS); | 
 | 817 | 		if (!nest) | 
 | 818 | 			goto nla_put_failure; | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 819 | 		for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) { | 
 | 820 | 			u32 val = tm->tcpm_vals[i]; | 
 | 821 |  | 
 | 822 | 			if (!val) | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 823 | 				continue; | 
| Eric Dumazet | 740b0f1 | 2014-02-26 14:02:48 -0800 | [diff] [blame] | 824 | 			if (i == TCP_METRIC_RTT) { | 
 | 825 | 				if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1, | 
 | 826 | 						val) < 0) | 
 | 827 | 					goto nla_put_failure; | 
 | 828 | 				n++; | 
 | 829 | 				val = max(val / 1000, 1U); | 
 | 830 | 			} | 
 | 831 | 			if (i == TCP_METRIC_RTTVAR) { | 
 | 832 | 				if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1, | 
 | 833 | 						val) < 0) | 
 | 834 | 					goto nla_put_failure; | 
 | 835 | 				n++; | 
 | 836 | 				val = max(val / 1000, 1U); | 
 | 837 | 			} | 
 | 838 | 			if (nla_put_u32(msg, i + 1, val) < 0) | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 839 | 				goto nla_put_failure; | 
 | 840 | 			n++; | 
 | 841 | 		} | 
 | 842 | 		if (n) | 
 | 843 | 			nla_nest_end(msg, nest); | 
 | 844 | 		else | 
 | 845 | 			nla_nest_cancel(msg, nest); | 
 | 846 | 	} | 
 | 847 |  | 
 | 848 | 	{ | 
 | 849 | 		struct tcp_fastopen_metrics tfom_copy[1], *tfom; | 
 | 850 | 		unsigned int seq; | 
 | 851 |  | 
 | 852 | 		do { | 
 | 853 | 			seq = read_seqbegin(&fastopen_seqlock); | 
 | 854 | 			tfom_copy[0] = tm->tcpm_fastopen; | 
 | 855 | 		} while (read_seqretry(&fastopen_seqlock, seq)); | 
 | 856 |  | 
 | 857 | 		tfom = tfom_copy; | 
 | 858 | 		if (tfom->mss && | 
 | 859 | 		    nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS, | 
 | 860 | 				tfom->mss) < 0) | 
 | 861 | 			goto nla_put_failure; | 
 | 862 | 		if (tfom->syn_loss && | 
 | 863 | 		    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS, | 
 | 864 | 				tfom->syn_loss) < 0 || | 
 | 865 | 		     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS, | 
 | 866 | 				jiffies - tfom->last_syn_loss) < 0)) | 
 | 867 | 			goto nla_put_failure; | 
 | 868 | 		if (tfom->cookie.len > 0 && | 
 | 869 | 		    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE, | 
 | 870 | 			    tfom->cookie.len, tfom->cookie.val) < 0) | 
 | 871 | 			goto nla_put_failure; | 
 | 872 | 	} | 
 | 873 |  | 
 | 874 | 	return 0; | 
 | 875 |  | 
 | 876 | nla_put_failure: | 
 | 877 | 	return -EMSGSIZE; | 
 | 878 | } | 
 | 879 |  | 
 | 880 | static int tcp_metrics_dump_info(struct sk_buff *skb, | 
 | 881 | 				 struct netlink_callback *cb, | 
 | 882 | 				 struct tcp_metrics_block *tm) | 
 | 883 | { | 
 | 884 | 	void *hdr; | 
 | 885 |  | 
| Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 886 | 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 887 | 			  &tcp_metrics_nl_family, NLM_F_MULTI, | 
 | 888 | 			  TCP_METRICS_CMD_GET); | 
 | 889 | 	if (!hdr) | 
 | 890 | 		return -EMSGSIZE; | 
 | 891 |  | 
 | 892 | 	if (tcp_metrics_fill_info(skb, tm) < 0) | 
 | 893 | 		goto nla_put_failure; | 
 | 894 |  | 
| Johannes Berg | 053c095 | 2015-01-16 22:09:00 +0100 | [diff] [blame] | 895 | 	genlmsg_end(skb, hdr); | 
 | 896 | 	return 0; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 897 |  | 
 | 898 | nla_put_failure: | 
 | 899 | 	genlmsg_cancel(skb, hdr); | 
 | 900 | 	return -EMSGSIZE; | 
 | 901 | } | 
 | 902 |  | 
 | 903 | static int tcp_metrics_nl_dump(struct sk_buff *skb, | 
 | 904 | 			       struct netlink_callback *cb) | 
 | 905 | { | 
 | 906 | 	struct net *net = sock_net(skb->sk); | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 907 | 	unsigned int max_rows = 1U << tcp_metrics_hash_log; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 908 | 	unsigned int row, s_row = cb->args[0]; | 
 | 909 | 	int s_col = cb->args[1], col = s_col; | 
 | 910 |  | 
 | 911 | 	for (row = s_row; row < max_rows; row++, s_col = 0) { | 
 | 912 | 		struct tcp_metrics_block *tm; | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 913 | 		struct tcpm_hash_bucket *hb = tcp_metrics_hash + row; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 914 |  | 
 | 915 | 		rcu_read_lock(); | 
 | 916 | 		for (col = 0, tm = rcu_dereference(hb->chain); tm; | 
 | 917 | 		     tm = rcu_dereference(tm->tcpm_next), col++) { | 
| Eric W. Biederman | 849e8a0 | 2015-03-13 00:05:52 -0500 | [diff] [blame] | 918 | 			if (!net_eq(tm_net(tm), net)) | 
 | 919 | 				continue; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 920 | 			if (col < s_col) | 
 | 921 | 				continue; | 
 | 922 | 			if (tcp_metrics_dump_info(skb, cb, tm) < 0) { | 
 | 923 | 				rcu_read_unlock(); | 
 | 924 | 				goto done; | 
 | 925 | 			} | 
 | 926 | 		} | 
 | 927 | 		rcu_read_unlock(); | 
 | 928 | 	} | 
 | 929 |  | 
 | 930 | done: | 
 | 931 | 	cb->args[0] = row; | 
 | 932 | 	cb->args[1] = col; | 
 | 933 | 	return skb->len; | 
 | 934 | } | 
 | 935 |  | 
| Christoph Paasch | 3e7013d | 2014-01-08 16:05:59 +0100 | [diff] [blame] | 936 | static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr, | 
 | 937 | 			   unsigned int *hash, int optional, int v4, int v6) | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 938 | { | 
 | 939 | 	struct nlattr *a; | 
 | 940 |  | 
| Christoph Paasch | 3e7013d | 2014-01-08 16:05:59 +0100 | [diff] [blame] | 941 | 	a = info->attrs[v4]; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 942 | 	if (a) { | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 943 | 		inetpeer_set_addr_v4(addr, nla_get_in_addr(a)); | 
| Christoph Paasch | 3e7013d | 2014-01-08 16:05:59 +0100 | [diff] [blame] | 944 | 		if (hash) | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 945 | 			*hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr)); | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 946 | 		return 0; | 
 | 947 | 	} | 
| Christoph Paasch | 3e7013d | 2014-01-08 16:05:59 +0100 | [diff] [blame] | 948 | 	a = info->attrs[v6]; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 949 | 	if (a) { | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 950 | 		struct in6_addr in6; | 
 | 951 |  | 
| Julian Anastasov | 2c42a3f | 2012-10-30 12:03:09 +0000 | [diff] [blame] | 952 | 		if (nla_len(a) != sizeof(struct in6_addr)) | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 953 | 			return -EINVAL; | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 954 | 		in6 = nla_get_in6_addr(a); | 
 | 955 | 		inetpeer_set_addr_v6(addr, &in6); | 
| Christoph Paasch | 3e7013d | 2014-01-08 16:05:59 +0100 | [diff] [blame] | 956 | 		if (hash) | 
| David Ahern | 3abef28 | 2015-08-27 16:07:00 -0700 | [diff] [blame] | 957 | 			*hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr)); | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 958 | 		return 0; | 
 | 959 | 	} | 
 | 960 | 	return optional ? 1 : -EAFNOSUPPORT; | 
 | 961 | } | 
 | 962 |  | 
| Christoph Paasch | 3e7013d | 2014-01-08 16:05:59 +0100 | [diff] [blame] | 963 | static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr, | 
 | 964 | 			 unsigned int *hash, int optional) | 
 | 965 | { | 
 | 966 | 	return __parse_nl_addr(info, addr, hash, optional, | 
 | 967 | 			       TCP_METRICS_ATTR_ADDR_IPV4, | 
 | 968 | 			       TCP_METRICS_ATTR_ADDR_IPV6); | 
 | 969 | } | 
 | 970 |  | 
 | 971 | static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr) | 
 | 972 | { | 
 | 973 | 	return __parse_nl_addr(info, addr, NULL, 0, | 
 | 974 | 			       TCP_METRICS_ATTR_SADDR_IPV4, | 
 | 975 | 			       TCP_METRICS_ATTR_SADDR_IPV6); | 
 | 976 | } | 
 | 977 |  | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 978 | static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info) | 
 | 979 | { | 
 | 980 | 	struct tcp_metrics_block *tm; | 
| Christoph Paasch | 3e7013d | 2014-01-08 16:05:59 +0100 | [diff] [blame] | 981 | 	struct inetpeer_addr saddr, daddr; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 982 | 	unsigned int hash; | 
 | 983 | 	struct sk_buff *msg; | 
 | 984 | 	struct net *net = genl_info_net(info); | 
 | 985 | 	void *reply; | 
 | 986 | 	int ret; | 
| Christoph Paasch | 3e7013d | 2014-01-08 16:05:59 +0100 | [diff] [blame] | 987 | 	bool src = true; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 988 |  | 
| Christoph Paasch | 324fd55 | 2014-01-08 16:05:55 +0100 | [diff] [blame] | 989 | 	ret = parse_nl_addr(info, &daddr, &hash, 0); | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 990 | 	if (ret < 0) | 
 | 991 | 		return ret; | 
 | 992 |  | 
| Christoph Paasch | 3e7013d | 2014-01-08 16:05:59 +0100 | [diff] [blame] | 993 | 	ret = parse_nl_saddr(info, &saddr); | 
 | 994 | 	if (ret < 0) | 
 | 995 | 		src = false; | 
 | 996 |  | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 997 | 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 
 | 998 | 	if (!msg) | 
 | 999 | 		return -ENOMEM; | 
 | 1000 |  | 
 | 1001 | 	reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0, | 
 | 1002 | 				  info->genlhdr->cmd); | 
 | 1003 | 	if (!reply) | 
 | 1004 | 		goto nla_put_failure; | 
 | 1005 |  | 
| Eric W. Biederman | 3e5da62 | 2015-03-13 00:05:24 -0500 | [diff] [blame] | 1006 | 	hash ^= net_hash_mix(net); | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 1007 | 	hash = hash_32(hash, tcp_metrics_hash_log); | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1008 | 	ret = -ESRCH; | 
 | 1009 | 	rcu_read_lock(); | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 1010 | 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1011 | 	     tm = rcu_dereference(tm->tcpm_next)) { | 
| Christoph Paasch | 3e7013d | 2014-01-08 16:05:59 +0100 | [diff] [blame] | 1012 | 		if (addr_same(&tm->tcpm_daddr, &daddr) && | 
| Eric W. Biederman | 849e8a0 | 2015-03-13 00:05:52 -0500 | [diff] [blame] | 1013 | 		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) && | 
 | 1014 | 		    net_eq(tm_net(tm), net)) { | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1015 | 			ret = tcp_metrics_fill_info(msg, tm); | 
 | 1016 | 			break; | 
 | 1017 | 		} | 
 | 1018 | 	} | 
 | 1019 | 	rcu_read_unlock(); | 
 | 1020 | 	if (ret < 0) | 
 | 1021 | 		goto out_free; | 
 | 1022 |  | 
 | 1023 | 	genlmsg_end(msg, reply); | 
 | 1024 | 	return genlmsg_reply(msg, info); | 
 | 1025 |  | 
 | 1026 | nla_put_failure: | 
 | 1027 | 	ret = -EMSGSIZE; | 
 | 1028 |  | 
 | 1029 | out_free: | 
 | 1030 | 	nlmsg_free(msg); | 
 | 1031 | 	return ret; | 
 | 1032 | } | 
 | 1033 |  | 
| Eric W. Biederman | 8a4bff7 | 2015-03-13 00:06:43 -0500 | [diff] [blame] | 1034 | static void tcp_metrics_flush_all(struct net *net) | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1035 | { | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 1036 | 	unsigned int max_rows = 1U << tcp_metrics_hash_log; | 
 | 1037 | 	struct tcpm_hash_bucket *hb = tcp_metrics_hash; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1038 | 	struct tcp_metrics_block *tm; | 
 | 1039 | 	unsigned int row; | 
 | 1040 |  | 
 | 1041 | 	for (row = 0; row < max_rows; row++, hb++) { | 
| Eric W. Biederman | 04f721c | 2015-03-13 00:07:10 -0500 | [diff] [blame] | 1042 | 		struct tcp_metrics_block __rcu **pp; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1043 | 		spin_lock_bh(&tcp_metrics_lock); | 
| Eric W. Biederman | 04f721c | 2015-03-13 00:07:10 -0500 | [diff] [blame] | 1044 | 		pp = &hb->chain; | 
| Eric Dumazet | 9f1ab18 | 2015-03-16 07:14:34 -0700 | [diff] [blame] | 1045 | 		for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) { | 
| Eric W. Biederman | 04f721c | 2015-03-13 00:07:10 -0500 | [diff] [blame] | 1046 | 			if (net_eq(tm_net(tm), net)) { | 
 | 1047 | 				*pp = tm->tcpm_next; | 
 | 1048 | 				kfree_rcu(tm, rcu_head); | 
 | 1049 | 			} else { | 
 | 1050 | 				pp = &tm->tcpm_next; | 
 | 1051 | 			} | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1052 | 		} | 
| Eric W. Biederman | 04f721c | 2015-03-13 00:07:10 -0500 | [diff] [blame] | 1053 | 		spin_unlock_bh(&tcp_metrics_lock); | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1054 | 	} | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1055 | } | 
 | 1056 |  | 
 | 1057 | static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info) | 
 | 1058 | { | 
 | 1059 | 	struct tcpm_hash_bucket *hb; | 
| Christoph Paasch | 00ca9c5 | 2014-01-21 13:30:26 +0100 | [diff] [blame] | 1060 | 	struct tcp_metrics_block *tm; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1061 | 	struct tcp_metrics_block __rcu **pp; | 
| Christoph Paasch | 3e7013d | 2014-01-08 16:05:59 +0100 | [diff] [blame] | 1062 | 	struct inetpeer_addr saddr, daddr; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1063 | 	unsigned int hash; | 
 | 1064 | 	struct net *net = genl_info_net(info); | 
 | 1065 | 	int ret; | 
| Christoph Paasch | 00ca9c5 | 2014-01-21 13:30:26 +0100 | [diff] [blame] | 1066 | 	bool src = true, found = false; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1067 |  | 
| Christoph Paasch | 324fd55 | 2014-01-08 16:05:55 +0100 | [diff] [blame] | 1068 | 	ret = parse_nl_addr(info, &daddr, &hash, 1); | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1069 | 	if (ret < 0) | 
 | 1070 | 		return ret; | 
| Eric W. Biederman | 8a4bff7 | 2015-03-13 00:06:43 -0500 | [diff] [blame] | 1071 | 	if (ret > 0) { | 
 | 1072 | 		tcp_metrics_flush_all(net); | 
 | 1073 | 		return 0; | 
 | 1074 | 	} | 
| Christoph Paasch | 3e7013d | 2014-01-08 16:05:59 +0100 | [diff] [blame] | 1075 | 	ret = parse_nl_saddr(info, &saddr); | 
 | 1076 | 	if (ret < 0) | 
 | 1077 | 		src = false; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1078 |  | 
| Eric W. Biederman | 3e5da62 | 2015-03-13 00:05:24 -0500 | [diff] [blame] | 1079 | 	hash ^= net_hash_mix(net); | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 1080 | 	hash = hash_32(hash, tcp_metrics_hash_log); | 
 | 1081 | 	hb = tcp_metrics_hash + hash; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1082 | 	pp = &hb->chain; | 
 | 1083 | 	spin_lock_bh(&tcp_metrics_lock); | 
| Eric Dumazet | 9f1ab18 | 2015-03-16 07:14:34 -0700 | [diff] [blame] | 1084 | 	for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) { | 
| Christoph Paasch | 3e7013d | 2014-01-08 16:05:59 +0100 | [diff] [blame] | 1085 | 		if (addr_same(&tm->tcpm_daddr, &daddr) && | 
| Eric W. Biederman | 849e8a0 | 2015-03-13 00:05:52 -0500 | [diff] [blame] | 1086 | 		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) && | 
 | 1087 | 		    net_eq(tm_net(tm), net)) { | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1088 | 			*pp = tm->tcpm_next; | 
| Christoph Paasch | 00ca9c5 | 2014-01-21 13:30:26 +0100 | [diff] [blame] | 1089 | 			kfree_rcu(tm, rcu_head); | 
 | 1090 | 			found = true; | 
| Christoph Paasch | bbf852b | 2014-01-08 16:05:58 +0100 | [diff] [blame] | 1091 | 		} else { | 
 | 1092 | 			pp = &tm->tcpm_next; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1093 | 		} | 
 | 1094 | 	} | 
 | 1095 | 	spin_unlock_bh(&tcp_metrics_lock); | 
| Christoph Paasch | 00ca9c5 | 2014-01-21 13:30:26 +0100 | [diff] [blame] | 1096 | 	if (!found) | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1097 | 		return -ESRCH; | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1098 | 	return 0; | 
 | 1099 | } | 
 | 1100 |  | 
| Johannes Berg | 4534de8 | 2013-11-14 17:14:46 +0100 | [diff] [blame] | 1101 | static const struct genl_ops tcp_metrics_nl_ops[] = { | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1102 | 	{ | 
 | 1103 | 		.cmd = TCP_METRICS_CMD_GET, | 
 | 1104 | 		.doit = tcp_metrics_nl_cmd_get, | 
 | 1105 | 		.dumpit = tcp_metrics_nl_dump, | 
 | 1106 | 		.policy = tcp_metrics_nl_policy, | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1107 | 	}, | 
 | 1108 | 	{ | 
 | 1109 | 		.cmd = TCP_METRICS_CMD_DEL, | 
 | 1110 | 		.doit = tcp_metrics_nl_cmd_del, | 
 | 1111 | 		.policy = tcp_metrics_nl_policy, | 
 | 1112 | 		.flags = GENL_ADMIN_PERM, | 
 | 1113 | 	}, | 
 | 1114 | }; | 
 | 1115 |  | 
| Eric Dumazet | 5815d5e | 2012-07-19 23:02:34 +0000 | [diff] [blame] | 1116 | static unsigned int tcpmhash_entries; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 1117 | static int __init set_tcpmhash_entries(char *str) | 
 | 1118 | { | 
 | 1119 | 	ssize_t ret; | 
 | 1120 |  | 
 | 1121 | 	if (!str) | 
 | 1122 | 		return 0; | 
 | 1123 |  | 
| Eric Dumazet | 5815d5e | 2012-07-19 23:02:34 +0000 | [diff] [blame] | 1124 | 	ret = kstrtouint(str, 0, &tcpmhash_entries); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 1125 | 	if (ret) | 
 | 1126 | 		return 0; | 
 | 1127 |  | 
 | 1128 | 	return 1; | 
 | 1129 | } | 
 | 1130 | __setup("tcpmhash_entries=", set_tcpmhash_entries); | 
 | 1131 |  | 
 | 1132 | static int __net_init tcp_net_metrics_init(struct net *net) | 
 | 1133 | { | 
| Eric Dumazet | 5815d5e | 2012-07-19 23:02:34 +0000 | [diff] [blame] | 1134 | 	size_t size; | 
 | 1135 | 	unsigned int slots; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 1136 |  | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 1137 | 	if (!net_eq(net, &init_net)) | 
 | 1138 | 		return 0; | 
 | 1139 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 1140 | 	slots = tcpmhash_entries; | 
 | 1141 | 	if (!slots) { | 
 | 1142 | 		if (totalram_pages >= 128 * 1024) | 
 | 1143 | 			slots = 16 * 1024; | 
 | 1144 | 		else | 
 | 1145 | 			slots = 8 * 1024; | 
 | 1146 | 	} | 
 | 1147 |  | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 1148 | 	tcp_metrics_hash_log = order_base_2(slots); | 
 | 1149 | 	size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log; | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 1150 |  | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 1151 | 	tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); | 
 | 1152 | 	if (!tcp_metrics_hash) | 
 | 1153 | 		tcp_metrics_hash = vzalloc(size); | 
| Eric Dumazet | 976a702 | 2012-11-16 05:31:53 +0000 | [diff] [blame] | 1154 |  | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 1155 | 	if (!tcp_metrics_hash) | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 1156 | 		return -ENOMEM; | 
 | 1157 |  | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 1158 | 	return 0; | 
 | 1159 | } | 
 | 1160 |  | 
 | 1161 | static void __net_exit tcp_net_metrics_exit(struct net *net) | 
 | 1162 | { | 
| Eric W. Biederman | 098a697 | 2015-03-13 00:07:44 -0500 | [diff] [blame] | 1163 | 	tcp_metrics_flush_all(net); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 1164 | } | 
 | 1165 |  | 
 | 1166 | static __net_initdata struct pernet_operations tcp_net_metrics_ops = { | 
 | 1167 | 	.init	=	tcp_net_metrics_init, | 
 | 1168 | 	.exit	=	tcp_net_metrics_exit, | 
 | 1169 | }; | 
 | 1170 |  | 
 | 1171 | void __init tcp_metrics_init(void) | 
 | 1172 | { | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1173 | 	int ret; | 
 | 1174 |  | 
 | 1175 | 	ret = register_pernet_subsys(&tcp_net_metrics_ops); | 
 | 1176 | 	if (ret < 0) | 
| Eric W. Biederman | 6493517 | 2015-03-13 00:04:51 -0500 | [diff] [blame] | 1177 | 		panic("Could not allocate the tcp_metrics hash table\n"); | 
 | 1178 |  | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1179 | 	ret = genl_register_family_with_ops(&tcp_metrics_nl_family, | 
| Johannes Berg | c53ed74 | 2013-11-19 15:19:31 +0100 | [diff] [blame] | 1180 | 					    tcp_metrics_nl_ops); | 
| Julian Anastasov | d23ff70 | 2012-09-04 11:03:15 +0000 | [diff] [blame] | 1181 | 	if (ret < 0) | 
| Eric W. Biederman | 6493517 | 2015-03-13 00:04:51 -0500 | [diff] [blame] | 1182 | 		panic("Could not register tcp_metrics generic netlink\n"); | 
| David S. Miller | 51c5d0c | 2012-07-10 00:49:14 -0700 | [diff] [blame] | 1183 | } |