blob: 185efef0f1251ba9d45fabb3ed51777a8be097a6 [file] [log] [blame]
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic INET transport hashtables
7 *
8 * Authors: Lotsa people, from code originally in tcp
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -070016#include <linux/module.h>
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -080017#include <linux/random.h>
Arnaldo Carvalho de Melof3f05f72005-08-09 20:08:09 -070018#include <linux/sched.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070019#include <linux/slab.h>
Arnaldo Carvalho de Melof3f05f72005-08-09 20:08:09 -070020#include <linux/wait.h>
Eric Dumazet095dc8e2015-05-26 07:55:34 -070021#include <linux/vmalloc.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070022
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -070023#include <net/inet_connection_sock.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070024#include <net/inet_hashtables.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070025#include <net/secure_seq.h>
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -080026#include <net/ip.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070027
Eric Dumazet6eada012015-03-18 14:05:33 -070028static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
29 const __u16 lport, const __be32 faddr,
30 const __be16 fport)
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +020031{
Hannes Frederic Sowa1bbdcee2013-10-19 21:48:57 +020032 static u32 inet_ehash_secret __read_mostly;
33
34 net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
35
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +020036 return __inet_ehashfn(laddr, lport, faddr, fport,
37 inet_ehash_secret + net_hash_mix(net));
38}
39
Eric Dumazetd1e559d2015-03-18 14:05:35 -070040/* This function handles inet_sock, but also timewait and request sockets
41 * for IPv4/IPv6.
42 */
Eric Dumazet5b441f72015-03-18 14:05:34 -070043u32 sk_ehashfn(const struct sock *sk)
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +020044{
Eric Dumazetd1e559d2015-03-18 14:05:35 -070045#if IS_ENABLED(CONFIG_IPV6)
46 if (sk->sk_family == AF_INET6 &&
47 !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
48 return inet6_ehashfn(sock_net(sk),
49 &sk->sk_v6_rcv_saddr, sk->sk_num,
50 &sk->sk_v6_daddr, sk->sk_dport);
51#endif
Eric Dumazet5b441f72015-03-18 14:05:34 -070052 return inet_ehashfn(sock_net(sk),
53 sk->sk_rcv_saddr, sk->sk_num,
54 sk->sk_daddr, sk->sk_dport);
Hannes Frederic Sowa65cd8032013-10-19 21:48:51 +020055}
56
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070057/*
58 * Allocate and initialize a new local port bind bucket.
59 * The bindhash mutex for snum's hash chain must be held here.
60 */
Christoph Lametere18b8902006-12-06 20:33:20 -080061struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
Pavel Emelyanov941b1d22008-01-31 05:05:50 -080062 struct net *net,
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070063 struct inet_bind_hashbucket *head,
64 const unsigned short snum)
65{
Christoph Lameter54e6ecb2006-12-06 20:33:16 -080066 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070067
Ian Morris00db4122015-04-03 09:17:27 +010068 if (tb) {
Eric W. Biedermanefd7ef12015-03-11 23:04:08 -050069 write_pnet(&tb->ib_net, net);
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070070 tb->port = snum;
71 tb->fastreuse = 0;
Tom Herbertda5e3632013-01-22 09:50:24 +000072 tb->fastreuseport = 0;
Evgeniy Polyakova9d8f912009-01-19 16:46:02 -080073 tb->num_owners = 0;
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070074 INIT_HLIST_HEAD(&tb->owners);
75 hlist_add_head(&tb->node, &head->chain);
76 }
77 return tb;
78}
79
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070080/*
81 * Caller must hold hashbucket lock for this tb with local BH disabled
82 */
Christoph Lametere18b8902006-12-06 20:33:20 -080083void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070084{
85 if (hlist_empty(&tb->owners)) {
86 __hlist_del(&tb->node);
87 kmem_cache_free(cachep, tb);
88 }
89}
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -070090
91void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
92 const unsigned short snum)
93{
Eric Dumazetc720c7e2009-10-15 06:30:45 +000094 inet_sk(sk)->inet_num = snum;
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -070095 sk_add_bind_node(sk, &tb->owners);
Evgeniy Polyakova9d8f912009-01-19 16:46:02 -080096 tb->num_owners++;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -070097 inet_csk(sk)->icsk_bind_hash = tb;
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -070098}
99
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700100/*
101 * Get rid of any references to a local port held by the given sock.
102 */
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800103static void __inet_put_port(struct sock *sk)
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700104{
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -0700105 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000106 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
Pavel Emelyanov7f635ab2008-06-16 17:12:49 -0700107 hashinfo->bhash_size);
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700108 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
109 struct inet_bind_bucket *tb;
110
111 spin_lock(&head->lock);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700112 tb = inet_csk(sk)->icsk_bind_hash;
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700113 __sk_del_bind_node(sk);
Evgeniy Polyakova9d8f912009-01-19 16:46:02 -0800114 tb->num_owners--;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700115 inet_csk(sk)->icsk_bind_hash = NULL;
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000116 inet_sk(sk)->inet_num = 0;
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700117 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
118 spin_unlock(&head->lock);
119}
120
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800121void inet_put_port(struct sock *sk)
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700122{
123 local_bh_disable();
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800124 __inet_put_port(sk);
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700125 local_bh_enable();
126}
Arnaldo Carvalho de Melo2d8c4ce2005-08-09 20:07:13 -0700127EXPORT_SYMBOL(inet_put_port);
Arnaldo Carvalho de Melof3f05f72005-08-09 20:08:09 -0700128
Balazs Scheidler093d2822010-10-21 13:06:43 +0200129int __inet_inherit_port(struct sock *sk, struct sock *child)
Pavel Emelyanov53083772008-04-17 23:18:15 -0700130{
131 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
Balazs Scheidler093d2822010-10-21 13:06:43 +0200132 unsigned short port = inet_sk(child)->inet_num;
133 const int bhash = inet_bhashfn(sock_net(sk), port,
Pavel Emelyanov7f635ab2008-06-16 17:12:49 -0700134 table->bhash_size);
Pavel Emelyanov53083772008-04-17 23:18:15 -0700135 struct inet_bind_hashbucket *head = &table->bhash[bhash];
136 struct inet_bind_bucket *tb;
137
138 spin_lock(&head->lock);
139 tb = inet_csk(sk)->icsk_bind_hash;
Balazs Scheidler093d2822010-10-21 13:06:43 +0200140 if (tb->port != port) {
141 /* NOTE: using tproxy and redirecting skbs to a proxy
142 * on a different listener port breaks the assumption
143 * that the listener socket's icsk_bind_hash is the same
144 * as that of the child socket. We have to look up or
145 * create a new bind bucket for the child here. */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800146 inet_bind_bucket_for_each(tb, &head->chain) {
Balazs Scheidler093d2822010-10-21 13:06:43 +0200147 if (net_eq(ib_net(tb), sock_net(sk)) &&
148 tb->port == port)
149 break;
150 }
Sasha Levinb67bfe02013-02-27 17:06:00 -0800151 if (!tb) {
Balazs Scheidler093d2822010-10-21 13:06:43 +0200152 tb = inet_bind_bucket_create(table->bind_bucket_cachep,
153 sock_net(sk), head, port);
154 if (!tb) {
155 spin_unlock(&head->lock);
156 return -ENOMEM;
157 }
158 }
159 }
Nagendra Tomarb4ff3c92010-11-26 14:26:27 +0000160 inet_bind_hash(child, tb, port);
Pavel Emelyanov53083772008-04-17 23:18:15 -0700161 spin_unlock(&head->lock);
Balazs Scheidler093d2822010-10-21 13:06:43 +0200162
163 return 0;
Pavel Emelyanov53083772008-04-17 23:18:15 -0700164}
Pavel Emelyanov53083772008-04-17 23:18:15 -0700165EXPORT_SYMBOL_GPL(__inet_inherit_port);
166
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800167static inline int compute_score(struct sock *sk, struct net *net,
168 const unsigned short hnum, const __be32 daddr,
169 const int dif)
170{
171 int score = -1;
172 struct inet_sock *inet = inet_sk(sk);
173
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000174 if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800175 !ipv6_only_sock(sk)) {
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000176 __be32 rcv_saddr = inet->inet_rcv_saddr;
Tom Herbertda5e3632013-01-22 09:50:24 +0000177 score = sk->sk_family == PF_INET ? 2 : 1;
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800178 if (rcv_saddr) {
179 if (rcv_saddr != daddr)
180 return -1;
Tom Herbertda5e3632013-01-22 09:50:24 +0000181 score += 4;
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800182 }
183 if (sk->sk_bound_dev_if) {
184 if (sk->sk_bound_dev_if != dif)
185 return -1;
Tom Herbertda5e3632013-01-22 09:50:24 +0000186 score += 4;
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800187 }
188 }
189 return score;
190}
191
Arnaldo Carvalho de Melof3f05f72005-08-09 20:08:09 -0700192/*
Arnaldo Carvalho de Melo33b62232005-08-09 20:09:06 -0700193 * Don't inline this cruft. Here are some nice properties to exploit here. The
194 * BSD API does not allow a listening sock to specify the remote port nor the
195 * remote address for the connection. So always assume those are both
196 * wildcarded during the search since they can never be otherwise.
197 */
Arnaldo Carvalho de Melo33b62232005-08-09 20:09:06 -0700198
Arnaldo Carvalho de Melo33b62232005-08-09 20:09:06 -0700199
Pavel Emelyanovc67499c2008-01-31 05:06:40 -0800200struct sock *__inet_lookup_listener(struct net *net,
201 struct inet_hashinfo *hashinfo,
Tom Herbertda5e3632013-01-22 09:50:24 +0000202 const __be32 saddr, __be16 sport,
Al Virofb99c842006-09-27 18:43:33 -0700203 const __be32 daddr, const unsigned short hnum,
Herbert Xu8f4910692006-08-09 15:47:12 -0700204 const int dif)
Herbert Xu99a92ff2006-08-08 02:18:10 -0700205{
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800206 struct sock *sk, *result;
207 struct hlist_nulls_node *node;
208 unsigned int hash = inet_lhashfn(net, hnum);
209 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
Tom Herbertda5e3632013-01-22 09:50:24 +0000210 int score, hiscore, matches = 0, reuseport = 0;
211 u32 phash = 0;
Herbert Xu99a92ff2006-08-08 02:18:10 -0700212
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800213 rcu_read_lock();
214begin:
215 result = NULL;
Tom Herbertda5e3632013-01-22 09:50:24 +0000216 hiscore = 0;
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800217 sk_nulls_for_each_rcu(sk, node, &ilb->head) {
218 score = compute_score(sk, net, hnum, daddr, dif);
219 if (score > hiscore) {
220 result = sk;
221 hiscore = score;
Tom Herbertda5e3632013-01-22 09:50:24 +0000222 reuseport = sk->sk_reuseport;
223 if (reuseport) {
224 phash = inet_ehashfn(net, daddr, hnum,
225 saddr, sport);
226 matches = 1;
227 }
228 } else if (score == hiscore && reuseport) {
229 matches++;
Daniel Borkmann8fc54f62014-08-23 20:58:54 +0200230 if (reciprocal_scale(phash, matches) == 0)
Tom Herbertda5e3632013-01-22 09:50:24 +0000231 result = sk;
232 phash = next_pseudo_random32(phash);
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800233 }
Herbert Xu99a92ff2006-08-08 02:18:10 -0700234 }
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800235 /*
236 * if the nulls value we got at the end of this lookup is
237 * not the expected one, we must restart lookup.
238 * We probably met an item that was moved to another chain.
239 */
240 if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
241 goto begin;
242 if (result) {
243 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
244 result = NULL;
245 else if (unlikely(compute_score(result, net, hnum, daddr,
246 dif) < hiscore)) {
247 sock_put(result);
248 goto begin;
249 }
Herbert Xu99a92ff2006-08-08 02:18:10 -0700250 }
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800251 rcu_read_unlock();
252 return result;
Herbert Xu99a92ff2006-08-08 02:18:10 -0700253}
Herbert Xu8f4910692006-08-09 15:47:12 -0700254EXPORT_SYMBOL_GPL(__inet_lookup_listener);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800255
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700256/* All sockets share common refcount, but have different destructors */
257void sock_gen_put(struct sock *sk)
258{
259 if (!atomic_dec_and_test(&sk->sk_refcnt))
260 return;
261
262 if (sk->sk_state == TCP_TIME_WAIT)
263 inet_twsk_free(inet_twsk(sk));
Eric Dumazet41b822c2015-03-12 16:44:08 -0700264 else if (sk->sk_state == TCP_NEW_SYN_RECV)
265 reqsk_free(inet_reqsk(sk));
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700266 else
267 sk_free(sk);
268}
269EXPORT_SYMBOL_GPL(sock_gen_put);
270
Eric Dumazet2c132702015-03-15 21:12:15 -0700271void sock_edemux(struct sk_buff *skb)
272{
273 sock_gen_put(skb->sk);
274}
275EXPORT_SYMBOL(sock_edemux);
276
Daniel Baluta5e73ea12012-04-15 01:34:41 +0000277struct sock *__inet_lookup_established(struct net *net,
Pavel Emelyanovc67499c2008-01-31 05:06:40 -0800278 struct inet_hashinfo *hashinfo,
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800279 const __be32 saddr, const __be16 sport,
280 const __be32 daddr, const u16 hnum,
281 const int dif)
282{
Joe Perchesc7228312014-05-13 20:30:07 -0700283 INET_ADDR_COOKIE(acookie, saddr, daddr);
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800284 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
285 struct sock *sk;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800286 const struct hlist_nulls_node *node;
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800287 /* Optimize here for direct hit, only listening connections can
288 * have wildcards anyways.
289 */
Pavel Emelyanov9f26b3a2008-06-16 17:13:27 -0700290 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
Eric Dumazetf373b532009-10-09 00:16:19 +0000291 unsigned int slot = hash & hashinfo->ehash_mask;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800292 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800293
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800294 rcu_read_lock();
295begin:
296 sk_nulls_for_each_rcu(sk, node, &head->chain) {
Eric Dumazetce43b032012-11-30 09:49:27 +0000297 if (sk->sk_hash != hash)
298 continue;
299 if (likely(INET_MATCH(sk, net, acookie,
300 saddr, daddr, ports, dif))) {
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800301 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700302 goto out;
Eric Dumazetce43b032012-11-30 09:49:27 +0000303 if (unlikely(!INET_MATCH(sk, net, acookie,
304 saddr, daddr, ports, dif))) {
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700305 sock_gen_put(sk);
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800306 goto begin;
307 }
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700308 goto found;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800309 }
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800310 }
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800311 /*
312 * if the nulls value we got at the end of this lookup is
313 * not the expected one, we must restart lookup.
314 * We probably met an item that was moved to another chain.
315 */
316 if (get_nulls_value(node) != slot)
317 goto begin;
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800318out:
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700319 sk = NULL;
320found:
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800321 rcu_read_unlock();
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800322 return sk;
Pavel Emelyanov77a5ba52007-12-20 15:32:17 -0800323}
324EXPORT_SYMBOL_GPL(__inet_lookup_established);
325
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800326/* called with local bh disabled */
327static int __inet_check_established(struct inet_timewait_death_row *death_row,
328 struct sock *sk, __u16 lport,
329 struct inet_timewait_sock **twp)
330{
331 struct inet_hashinfo *hinfo = death_row->hashinfo;
332 struct inet_sock *inet = inet_sk(sk);
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000333 __be32 daddr = inet->inet_rcv_saddr;
334 __be32 saddr = inet->inet_daddr;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800335 int dif = sk->sk_bound_dev_if;
Joe Perchesc7228312014-05-13 20:30:07 -0700336 INET_ADDR_COOKIE(acookie, saddr, daddr);
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000337 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
Pavel Emelyanov9f26b3a2008-06-16 17:13:27 -0700338 struct net *net = sock_net(sk);
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000339 unsigned int hash = inet_ehashfn(net, daddr, lport,
340 saddr, inet->inet_dport);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800341 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
Eric Dumazet9db66bd2008-11-20 20:39:09 -0800342 spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800343 struct sock *sk2;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800344 const struct hlist_nulls_node *node;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700345 struct inet_timewait_sock *tw = NULL;
Eric Dumazet13475a32009-12-02 22:31:19 +0000346 int twrefcnt = 0;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800347
Eric Dumazet9db66bd2008-11-20 20:39:09 -0800348 spin_lock(lock);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800349
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800350 sk_nulls_for_each(sk2, node, &head->chain) {
Eric Dumazetce43b032012-11-30 09:49:27 +0000351 if (sk2->sk_hash != hash)
352 continue;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700353
Eric Dumazetce43b032012-11-30 09:49:27 +0000354 if (likely(INET_MATCH(sk2, net, acookie,
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700355 saddr, daddr, ports, dif))) {
356 if (sk2->sk_state == TCP_TIME_WAIT) {
357 tw = inet_twsk(sk2);
358 if (twsk_unique(sk, sk2, twp))
359 break;
360 }
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800361 goto not_unique;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700362 }
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800363 }
364
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800365 /* Must record num and sport now. Otherwise we will see
Eric Dumazet05dbc7b2013-10-03 00:22:02 -0700366 * in hash table socket with a funny identity.
367 */
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000368 inet->inet_num = lport;
369 inet->inet_sport = htons(lport);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800370 sk->sk_hash = hash;
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700371 WARN_ON(!sk_unhashed(sk));
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800372 __sk_nulls_add_node_rcu(sk, &head->chain);
Eric Dumazet13475a32009-12-02 22:31:19 +0000373 if (tw) {
374 twrefcnt = inet_twsk_unhash(tw);
375 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
376 }
Eric Dumazet9db66bd2008-11-20 20:39:09 -0800377 spin_unlock(lock);
Eric Dumazet13475a32009-12-02 22:31:19 +0000378 if (twrefcnt)
379 inet_twsk_put(tw);
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -0700380 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800381
382 if (twp) {
383 *twp = tw;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800384 } else if (tw) {
385 /* Silly. Should hash-dance instead... */
Eric Dumazet789f5582015-04-12 18:51:09 -0700386 inet_twsk_deschedule(tw);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800387
388 inet_twsk_put(tw);
389 }
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800390 return 0;
391
392not_unique:
Eric Dumazet9db66bd2008-11-20 20:39:09 -0800393 spin_unlock(lock);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800394 return -EADDRNOTAVAIL;
395}
396
397static inline u32 inet_sk_port_offset(const struct sock *sk)
398{
399 const struct inet_sock *inet = inet_sk(sk);
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000400 return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
401 inet->inet_daddr,
402 inet->inet_dport);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800403}
404
Eric Dumazet9327f702009-12-04 03:46:54 +0000405int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
Pavel Emelyanov152da812007-12-20 15:31:33 -0800406{
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -0700407 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800408 struct hlist_nulls_head *list;
Pavel Emelyanov152da812007-12-20 15:31:33 -0800409 struct inet_ehash_bucket *head;
Eric Dumazet5b441f72015-03-18 14:05:34 -0700410 spinlock_t *lock;
Eric Dumazet9327f702009-12-04 03:46:54 +0000411 int twrefcnt = 0;
Pavel Emelyanov152da812007-12-20 15:31:33 -0800412
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700413 WARN_ON(!sk_unhashed(sk));
Pavel Emelyanov152da812007-12-20 15:31:33 -0800414
Eric Dumazet5b441f72015-03-18 14:05:34 -0700415 sk->sk_hash = sk_ehashfn(sk);
Pavel Emelyanov152da812007-12-20 15:31:33 -0800416 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
417 list = &head->chain;
418 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
419
Eric Dumazet9db66bd2008-11-20 20:39:09 -0800420 spin_lock(lock);
Eric Dumazet3ab5aee2008-11-16 19:40:17 -0800421 __sk_nulls_add_node_rcu(sk, list);
Eric Dumazet9327f702009-12-04 03:46:54 +0000422 if (tw) {
423 WARN_ON(sk->sk_hash != tw->tw_hash);
424 twrefcnt = inet_twsk_unhash(tw);
425 }
Eric Dumazet9db66bd2008-11-20 20:39:09 -0800426 spin_unlock(lock);
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -0700427 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Eric Dumazet9327f702009-12-04 03:46:54 +0000428 return twrefcnt;
Pavel Emelyanov152da812007-12-20 15:31:33 -0800429}
430EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
431
Eric Dumazet77a6a472015-03-18 14:05:36 -0700432int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw)
Pavel Emelyanov152da812007-12-20 15:31:33 -0800433{
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -0700434 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
Eric Dumazet5caea4e2008-11-20 00:40:07 -0800435 struct inet_listen_hashbucket *ilb;
Pavel Emelyanov152da812007-12-20 15:31:33 -0800436
Eric Dumazet77a6a472015-03-18 14:05:36 -0700437 if (sk->sk_state != TCP_LISTEN)
438 return __inet_hash_nolisten(sk, tw);
Pavel Emelyanov152da812007-12-20 15:31:33 -0800439
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700440 WARN_ON(!sk_unhashed(sk));
Eric Dumazet5caea4e2008-11-20 00:40:07 -0800441 ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
Pavel Emelyanov152da812007-12-20 15:31:33 -0800442
Eric Dumazet5caea4e2008-11-20 00:40:07 -0800443 spin_lock(&ilb->lock);
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800444 __sk_nulls_add_node_rcu(sk, &ilb->head);
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -0700445 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Eric Dumazet5caea4e2008-11-20 00:40:07 -0800446 spin_unlock(&ilb->lock);
Eric Dumazet77a6a472015-03-18 14:05:36 -0700447 return 0;
Pavel Emelyanov152da812007-12-20 15:31:33 -0800448}
Eric Dumazet77a6a472015-03-18 14:05:36 -0700449EXPORT_SYMBOL(__inet_hash);
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800450
451void inet_hash(struct sock *sk)
452{
453 if (sk->sk_state != TCP_CLOSE) {
454 local_bh_disable();
Eric Dumazet77a6a472015-03-18 14:05:36 -0700455 __inet_hash(sk, NULL);
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800456 local_bh_enable();
457 }
458}
459EXPORT_SYMBOL_GPL(inet_hash);
460
461void inet_unhash(struct sock *sk)
462{
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -0700463 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800464 spinlock_t *lock;
465 int done;
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800466
467 if (sk_unhashed(sk))
Eric Dumazet5caea4e2008-11-20 00:40:07 -0800468 return;
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800469
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800470 if (sk->sk_state == TCP_LISTEN)
471 lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
472 else
473 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
Eric Dumazet5caea4e2008-11-20 00:40:07 -0800474
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800475 spin_lock_bh(lock);
Camelia Groza3b8ccd42013-07-11 09:55:51 +0300476 done = __sk_nulls_del_node_init_rcu(sk);
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800477 if (done)
478 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
Eric Dumazet920de802008-11-24 00:09:29 -0800479 spin_unlock_bh(lock);
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -0800480}
481EXPORT_SYMBOL_GPL(inet_unhash);
Pavel Emelyanov152da812007-12-20 15:31:33 -0800482
Pavel Emelyanov5ee31fc2008-01-31 05:04:45 -0800483int __inet_hash_connect(struct inet_timewait_death_row *death_row,
Pavel Emelyanov5d8c0aa2008-02-05 03:14:44 -0800484 struct sock *sk, u32 port_offset,
Pavel Emelyanov5ee31fc2008-01-31 05:04:45 -0800485 int (*check_established)(struct inet_timewait_death_row *,
Eric Dumazetb4d64442015-03-18 14:05:37 -0700486 struct sock *, __u16, struct inet_timewait_sock **))
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800487{
488 struct inet_hashinfo *hinfo = death_row->hashinfo;
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000489 const unsigned short snum = inet_sk(sk)->inet_num;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900490 struct inet_bind_hashbucket *head;
491 struct inet_bind_bucket *tb;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800492 int ret;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900493 struct net *net = sock_net(sk);
Eric Dumazet9327f702009-12-04 03:46:54 +0000494 int twrefcnt = 1;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800495
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900496 if (!snum) {
Stephen Hemminger227b60f2007-10-10 17:30:46 -0700497 int i, remaining, low, high, port;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800498 static u32 hint;
Pavel Emelyanov5d8c0aa2008-02-05 03:14:44 -0800499 u32 offset = hint + port_offset;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900500 struct inet_timewait_sock *tw = NULL;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800501
Eric W. Biederman0bbf87d2013-09-28 14:10:59 -0700502 inet_get_local_port_range(net, &low, &high);
Anton Arapova25de532007-10-18 22:00:17 -0700503 remaining = (high - low) + 1;
Stephen Hemminger227b60f2007-10-10 17:30:46 -0700504
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900505 local_bh_disable();
Stephen Hemminger227b60f2007-10-10 17:30:46 -0700506 for (i = 1; i <= remaining; i++) {
507 port = low + (i + offset) % remaining;
WANG Cong122ff242014-05-12 16:04:53 -0700508 if (inet_is_local_reserved_port(net, port))
Amerigo Wange3826f12010-05-05 00:27:06 +0000509 continue;
Pavel Emelyanov7f635ab2008-06-16 17:12:49 -0700510 head = &hinfo->bhash[inet_bhashfn(net, port,
511 hinfo->bhash_size)];
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900512 spin_lock(&head->lock);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800513
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900514 /* Does not bother with rcv_saddr checks,
515 * because the established check is already
516 * unique enough.
517 */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800518 inet_bind_bucket_for_each(tb, &head->chain) {
Octavian Purdila09ad9bc2009-11-25 15:14:13 -0800519 if (net_eq(ib_net(tb), net) &&
520 tb->port == port) {
Tom Herbertda5e3632013-01-22 09:50:24 +0000521 if (tb->fastreuse >= 0 ||
522 tb->fastreuseport >= 0)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900523 goto next_port;
Evgeniy Polyakova9d8f912009-01-19 16:46:02 -0800524 WARN_ON(hlist_empty(&tb->owners));
Pavel Emelyanov5ee31fc2008-01-31 05:04:45 -0800525 if (!check_established(death_row, sk,
526 port, &tw))
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900527 goto ok;
528 goto next_port;
529 }
530 }
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800531
Pavel Emelyanov941b1d22008-01-31 05:05:50 -0800532 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
533 net, head, port);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900534 if (!tb) {
535 spin_unlock(&head->lock);
536 break;
537 }
538 tb->fastreuse = -1;
Tom Herbertda5e3632013-01-22 09:50:24 +0000539 tb->fastreuseport = -1;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900540 goto ok;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800541
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900542 next_port:
543 spin_unlock(&head->lock);
544 }
545 local_bh_enable();
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800546
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900547 return -EADDRNOTAVAIL;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800548
549ok:
550 hint += i;
551
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900552 /* Head lock still held and bh's disabled */
553 inet_bind_hash(sk, tb, port);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800554 if (sk_unhashed(sk)) {
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000555 inet_sk(sk)->inet_sport = htons(port);
Eric Dumazetb4d64442015-03-18 14:05:37 -0700556 twrefcnt += __inet_hash_nolisten(sk, tw);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900557 }
Eric Dumazet3cdaeda2009-12-04 03:47:42 +0000558 if (tw)
559 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900560 spin_unlock(&head->lock);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800561
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900562 if (tw) {
Eric Dumazet789f5582015-04-12 18:51:09 -0700563 inet_twsk_deschedule(tw);
Eric Dumazet9327f702009-12-04 03:46:54 +0000564 while (twrefcnt) {
565 twrefcnt--;
566 inet_twsk_put(tw);
567 }
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900568 }
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800569
570 ret = 0;
571 goto out;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900572 }
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800573
Pavel Emelyanov7f635ab2008-06-16 17:12:49 -0700574 head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)];
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900575 tb = inet_csk(sk)->icsk_bind_hash;
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800576 spin_lock_bh(&head->lock);
577 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
Eric Dumazetb4d64442015-03-18 14:05:37 -0700578 __inet_hash_nolisten(sk, NULL);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800579 spin_unlock_bh(&head->lock);
580 return 0;
581 } else {
582 spin_unlock(&head->lock);
583 /* No definite answer... Walk to established hash table */
Pavel Emelyanov5ee31fc2008-01-31 05:04:45 -0800584 ret = check_established(death_row, sk, snum, NULL);
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800585out:
586 local_bh_enable();
587 return ret;
588 }
589}
Pavel Emelyanov5ee31fc2008-01-31 05:04:45 -0800590
591/*
592 * Bind a port for a connect operation and hash it.
593 */
594int inet_hash_connect(struct inet_timewait_death_row *death_row,
595 struct sock *sk)
596{
Pavel Emelyanov5d8c0aa2008-02-05 03:14:44 -0800597 return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
Eric Dumazetb4d64442015-03-18 14:05:37 -0700598 __inet_check_established);
Pavel Emelyanov5ee31fc2008-01-31 05:04:45 -0800599}
Arnaldo Carvalho de Meloa7f5e7f2005-12-13 23:25:31 -0800600EXPORT_SYMBOL_GPL(inet_hash_connect);
Eric Dumazet5caea4e2008-11-20 00:40:07 -0800601
602void inet_hashinfo_init(struct inet_hashinfo *h)
603{
604 int i;
605
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800606 for (i = 0; i < INET_LHTABLE_SIZE; i++) {
Eric Dumazet5caea4e2008-11-20 00:40:07 -0800607 spin_lock_init(&h->listening_hash[i].lock);
Eric Dumazetc25eb3b2008-11-23 17:22:55 -0800608 INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
609 i + LISTENING_NULLS_BASE);
610 }
Eric Dumazet5caea4e2008-11-20 00:40:07 -0800611}
Eric Dumazet5caea4e2008-11-20 00:40:07 -0800612EXPORT_SYMBOL_GPL(inet_hashinfo_init);
Eric Dumazet095dc8e2015-05-26 07:55:34 -0700613
614int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
615{
616 unsigned int i, nblocks = 1;
617
618 if (sizeof(spinlock_t) != 0) {
619 /* allocate 2 cache lines or at least one spinlock per cpu */
620 nblocks = max_t(unsigned int,
621 2 * L1_CACHE_BYTES / sizeof(spinlock_t),
622 1);
623 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
624
625 /* no more locks than number of hash buckets */
626 nblocks = min(nblocks, hashinfo->ehash_mask + 1);
627
628 hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t),
629 GFP_KERNEL | __GFP_NOWARN);
630 if (!hashinfo->ehash_locks)
631 hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t));
632
633 if (!hashinfo->ehash_locks)
634 return -ENOMEM;
635
636 for (i = 0; i < nblocks; i++)
637 spin_lock_init(&hashinfo->ehash_locks[i]);
638 }
639 hashinfo->ehash_locks_mask = nblocks - 1;
640 return 0;
641}
642EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);