blob: 80a2d99b569f380ba965aa764c492b6fa392c189 [file] [log] [blame]
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Support for INET connection oriented protocols.
7 *
8 * Authors: See the TCP sources
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or(at your option) any later version.
14 */
15
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070016#include <linux/module.h>
17#include <linux/jhash.h>
18
19#include <net/inet_connection_sock.h>
20#include <net/inet_hashtables.h>
21#include <net/inet_timewait_sock.h>
22#include <net/ip.h>
23#include <net/route.h>
24#include <net/tcp_states.h>
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -070025#include <net/xfrm.h>
Eric Dumazetfa76ce732015-03-19 19:04:20 -070026#include <net/tcp.h>
Craig Gallekc125e802016-02-10 11:50:40 -050027#include <net/sock_reuseport.h>
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070028
29#ifdef INET_CSK_DEBUG
30const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
31EXPORT_SYMBOL(inet_csk_timer_bug_msg);
32#endif
33
Eric W. Biederman0bbf87d2013-09-28 14:10:59 -070034void inet_get_local_port_range(struct net *net, int *low, int *high)
Stephen Hemminger227b60f2007-10-10 17:30:46 -070035{
Eric Dumazet95c96172012-04-15 05:58:06 +000036 unsigned int seq;
37
Stephen Hemminger227b60f2007-10-10 17:30:46 -070038 do {
Cong Wangc9d8f1a2014-05-06 11:02:49 -070039 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
Stephen Hemminger227b60f2007-10-10 17:30:46 -070040
Cong Wangc9d8f1a2014-05-06 11:02:49 -070041 *low = net->ipv4.ip_local_ports.range[0];
42 *high = net->ipv4.ip_local_ports.range[1];
43 } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
Stephen Hemminger227b60f2007-10-10 17:30:46 -070044}
45EXPORT_SYMBOL(inet_get_local_port_range);
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070046
Arnaldo Carvalho de Melo971af182005-12-13 23:14:47 -080047int inet_csk_bind_conflict(const struct sock *sk,
Alex Copotaacd9282012-04-12 22:21:45 +000048 const struct inet_bind_bucket *tb, bool relax)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070049{
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070050 struct sock *sk2;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070051 int reuse = sk->sk_reuse;
Tom Herbertda5e3632013-01-22 09:50:24 +000052 int reuseport = sk->sk_reuseport;
53 kuid_t uid = sock_i_uid((struct sock *)sk);
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070054
Pavel Emelyanov7477fd2e2008-04-14 02:42:27 -070055 /*
56 * Unlike other sk lookup places we do not check
57 * for sk_net here, since _all_ the socks listed
58 * in tb->owners list belong to the same net - the
59 * one this bucket belongs to.
60 */
61
Sasha Levinb67bfe02013-02-27 17:06:00 -080062 sk_for_each_bound(sk2, &tb->owners) {
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070063 if (sk != sk2 &&
64 !inet_v6_ipv6only(sk2) &&
65 (!sk->sk_bound_dev_if ||
66 !sk2->sk_bound_dev_if ||
67 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
Tom Herbertda5e3632013-01-22 09:50:24 +000068 if ((!reuse || !sk2->sk_reuse ||
69 sk2->sk_state == TCP_LISTEN) &&
70 (!reuseport || !sk2->sk_reuseport ||
Craig Gallekc125e802016-02-10 11:50:40 -050071 rcu_access_pointer(sk->sk_reuseport_cb) ||
72 (sk2->sk_state != TCP_TIME_WAIT &&
Tom Herbertda5e3632013-01-22 09:50:24 +000073 !uid_eq(uid, sock_i_uid(sk2))))) {
Eric Dumazet50805462013-10-02 04:29:50 -070074
75 if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
76 sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070077 break;
David S. Miller8d238b22010-04-28 11:25:59 -070078 }
Alex Copotaacd9282012-04-12 22:21:45 +000079 if (!relax && reuse && sk2->sk_reuse &&
80 sk2->sk_state != TCP_LISTEN) {
Alex Copotaacd9282012-04-12 22:21:45 +000081
Eric Dumazet50805462013-10-02 04:29:50 -070082 if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
83 sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
Alex Copotaacd9282012-04-12 22:21:45 +000084 break;
85 }
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070086 }
87 }
Sasha Levinb67bfe02013-02-27 17:06:00 -080088 return sk2 != NULL;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070089}
Arnaldo Carvalho de Melo971af182005-12-13 23:14:47 -080090EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
91
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070092/* Obtain a reference to a local port for the given sock,
93 * if snum is zero it means select any available local port.
Eric Dumazetea8add22016-02-11 16:28:50 -080094 * We try to allocate an odd port (and leave even ports for connect())
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070095 */
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -080096int inet_csk_get_port(struct sock *sk, unsigned short snum)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070097{
Eric Dumazetea8add22016-02-11 16:28:50 -080098 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
99 struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
100 int ret = 1, attempts = 5, port = snum;
101 int smallest_size = -1, smallest_port;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700102 struct inet_bind_hashbucket *head;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900103 struct net *net = sock_net(sk);
Eric Dumazetea8add22016-02-11 16:28:50 -0800104 int i, low, high, attempt_half;
105 struct inet_bind_bucket *tb;
Tom Herbertda5e3632013-01-22 09:50:24 +0000106 kuid_t uid = sock_i_uid(sk);
Eric Dumazetea8add22016-02-11 16:28:50 -0800107 u32 remaining, offset;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700108
Eric Dumazetea8add22016-02-11 16:28:50 -0800109 if (port) {
110have_port:
111 head = &hinfo->bhash[inet_bhashfn(net, port,
112 hinfo->bhash_size)];
113 spin_lock_bh(&head->lock);
Subash Abhinov Kasiviswanathan51ce9472017-06-01 13:27:12 -0600114
115 if (inet_is_local_reserved_port(net, snum) &&
116 !sysctl_reserved_port_bind) {
117 ret = 1;
118 goto fail_unlock;
119 }
120
Sasha Levinb67bfe02013-02-27 17:06:00 -0800121 inet_bind_bucket_for_each(tb, &head->chain)
Eric Dumazetea8add22016-02-11 16:28:50 -0800122 if (net_eq(ib_net(tb), net) && tb->port == port)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700123 goto tb_found;
Eric Dumazetea8add22016-02-11 16:28:50 -0800124
125 goto tb_not_found;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700126 }
Eric Dumazetea8add22016-02-11 16:28:50 -0800127again:
128 attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
129other_half_scan:
130 inet_get_local_port_range(net, &low, &high);
131 high++; /* [32768, 60999] -> [32768, 61000[ */
132 if (high - low < 4)
133 attempt_half = 0;
134 if (attempt_half) {
135 int half = low + (((high - low) >> 2) << 1);
136
137 if (attempt_half == 1)
138 high = half;
139 else
140 low = half;
141 }
142 remaining = high - low;
143 if (likely(remaining > 1))
144 remaining &= ~1U;
145
146 offset = prandom_u32() % remaining;
147 /* __inet_hash_connect() favors ports having @low parity
148 * We do the opposite to not pollute connect() users.
149 */
150 offset |= 1U;
151 smallest_size = -1;
152 smallest_port = low; /* avoid compiler warning */
153
154other_parity_scan:
155 port = low + offset;
156 for (i = 0; i < remaining; i += 2, port += 2) {
157 if (unlikely(port >= high))
158 port -= remaining;
159 if (inet_is_local_reserved_port(net, port))
160 continue;
161 head = &hinfo->bhash[inet_bhashfn(net, port,
162 hinfo->bhash_size)];
163 spin_lock_bh(&head->lock);
164 inet_bind_bucket_for_each(tb, &head->chain)
165 if (net_eq(ib_net(tb), net) && tb->port == port) {
166 if (((tb->fastreuse > 0 && reuse) ||
167 (tb->fastreuseport > 0 &&
168 sk->sk_reuseport &&
169 !rcu_access_pointer(sk->sk_reuseport_cb) &&
170 uid_eq(tb->fastuid, uid))) &&
171 (tb->num_owners < smallest_size || smallest_size == -1)) {
172 smallest_size = tb->num_owners;
173 smallest_port = port;
174 }
175 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false))
176 goto tb_found;
177 goto next_port;
178 }
179 goto tb_not_found;
180next_port:
181 spin_unlock_bh(&head->lock);
182 cond_resched();
183 }
184
185 if (smallest_size != -1) {
186 port = smallest_port;
187 goto have_port;
188 }
189 offset--;
190 if (!(offset & 1))
191 goto other_parity_scan;
192
193 if (attempt_half == 1) {
194 /* OK we now try the upper half of the range */
195 attempt_half = 2;
196 goto other_half_scan;
197 }
198 return ret;
199
200tb_not_found:
201 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
202 net, head, port);
203 if (!tb)
204 goto fail_unlock;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700205tb_found:
206 if (!hlist_empty(&tb->owners)) {
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000207 if (sk->sk_reuse == SK_FORCE_REUSE)
208 goto success;
209
Eric Dumazetea8add22016-02-11 16:28:50 -0800210 if (((tb->fastreuse > 0 && reuse) ||
Tom Herbertda5e3632013-01-22 09:50:24 +0000211 (tb->fastreuseport > 0 &&
Craig Galleke5fbfc12016-02-22 10:45:29 -0500212 !rcu_access_pointer(sk->sk_reuseport_cb) &&
Eric Dumazetea8add22016-02-11 16:28:50 -0800213 sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
214 smallest_size == -1)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700215 goto success;
Eric Dumazetea8add22016-02-11 16:28:50 -0800216 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
217 if ((reuse ||
218 (tb->fastreuseport > 0 &&
219 sk->sk_reuseport &&
220 !rcu_access_pointer(sk->sk_reuseport_cb) &&
221 uid_eq(tb->fastuid, uid))) &&
222 smallest_size != -1 && --attempts >= 0) {
223 spin_unlock_bh(&head->lock);
224 goto again;
Evgeniy Polyakova9d8f912009-01-19 16:46:02 -0800225 }
Eric Dumazetea8add22016-02-11 16:28:50 -0800226 goto fail_unlock;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700227 }
Eric Dumazetea8add22016-02-11 16:28:50 -0800228 if (!reuse)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700229 tb->fastreuse = 0;
Eric Dumazetea8add22016-02-11 16:28:50 -0800230 if (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid))
231 tb->fastreuseport = 0;
232 } else {
233 tb->fastreuse = reuse;
Tom Herbertda5e3632013-01-22 09:50:24 +0000234 if (sk->sk_reuseport) {
235 tb->fastreuseport = 1;
236 tb->fastuid = uid;
Eric Dumazetea8add22016-02-11 16:28:50 -0800237 } else {
Tom Herbertda5e3632013-01-22 09:50:24 +0000238 tb->fastreuseport = 0;
Eric Dumazetea8add22016-02-11 16:28:50 -0800239 }
Tom Herbertda5e3632013-01-22 09:50:24 +0000240 }
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700241success:
242 if (!inet_csk(sk)->icsk_bind_hash)
Eric Dumazetea8add22016-02-11 16:28:50 -0800243 inet_bind_hash(sk, tb, port);
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700244 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900245 ret = 0;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700246
247fail_unlock:
Eric Dumazetea8add22016-02-11 16:28:50 -0800248 spin_unlock_bh(&head->lock);
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700249 return ret;
250}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700251EXPORT_SYMBOL_GPL(inet_csk_get_port);
252
253/*
254 * Wait for an incoming connection, avoid race conditions. This must be called
255 * with the socket locked.
256 */
257static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
258{
259 struct inet_connection_sock *icsk = inet_csk(sk);
260 DEFINE_WAIT(wait);
261 int err;
262
263 /*
264 * True wake-one mechanism for incoming connections: only
265 * one process gets woken up, not the 'whole herd'.
266 * Since we do not 'race & poll' for established sockets
267 * anymore, the common case will execute the loop only once.
268 *
269 * Subtle issue: "add_wait_queue_exclusive()" will be added
270 * after any current non-exclusive waiters, and we know that
271 * it will always _stay_ after any new non-exclusive waiters
272 * because all non-exclusive waiters are added at the
273 * beginning of the wait-queue. As such, it's ok to "drop"
274 * our exclusiveness temporarily when we get woken up without
275 * having to remove and re-insert us on the wait queue.
276 */
277 for (;;) {
Eric Dumazetaa395142010-04-20 13:03:51 +0000278 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700279 TASK_INTERRUPTIBLE);
280 release_sock(sk);
281 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
282 timeo = schedule_timeout(timeo);
Eric Dumazetcb7cf8a2015-03-16 12:19:24 -0700283 sched_annotate_sleep();
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700284 lock_sock(sk);
285 err = 0;
286 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
287 break;
288 err = -EINVAL;
289 if (sk->sk_state != TCP_LISTEN)
290 break;
291 err = sock_intr_errno(timeo);
292 if (signal_pending(current))
293 break;
294 err = -EAGAIN;
295 if (!timeo)
296 break;
297 }
Eric Dumazetaa395142010-04-20 13:03:51 +0000298 finish_wait(sk_sleep(sk), &wait);
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700299 return err;
300}
301
302/*
303 * This will accept the next outstanding connection.
304 */
305struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
306{
307 struct inet_connection_sock *icsk = inet_csk(sk);
Jerry Chu83368862012-08-31 12:29:12 +0000308 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
Jerry Chu83368862012-08-31 12:29:12 +0000309 struct request_sock *req;
Eric Dumazete3d95ad2015-03-17 18:32:30 -0700310 struct sock *newsk;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700311 int error;
312
313 lock_sock(sk);
314
315 /* We need to make sure that this socket is listening,
316 * and that it has something pending.
317 */
318 error = -EINVAL;
319 if (sk->sk_state != TCP_LISTEN)
320 goto out_err;
321
322 /* Find already established connection */
Jerry Chu83368862012-08-31 12:29:12 +0000323 if (reqsk_queue_empty(queue)) {
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700324 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
325
326 /* If this is a non blocking socket don't sleep */
327 error = -EAGAIN;
328 if (!timeo)
329 goto out_err;
330
331 error = inet_csk_wait_for_connect(sk, timeo);
332 if (error)
333 goto out_err;
334 }
Eric Dumazetfff1f302015-10-02 11:43:23 -0700335 req = reqsk_queue_remove(queue, sk);
Jerry Chu83368862012-08-31 12:29:12 +0000336 newsk = req->sk;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700337
Eric Dumazete3d95ad2015-03-17 18:32:30 -0700338 if (sk->sk_protocol == IPPROTO_TCP &&
Eric Dumazet0536fcc2015-09-29 07:42:52 -0700339 tcp_rsk(req)->tfo_listener) {
340 spin_lock_bh(&queue->fastopenq.lock);
Eric Dumazet9439ce02015-03-17 18:32:29 -0700341 if (tcp_rsk(req)->tfo_listener) {
Jerry Chu83368862012-08-31 12:29:12 +0000342 /* We are still waiting for the final ACK from 3WHS
343 * so can't free req now. Instead, we set req->sk to
344 * NULL to signify that the child socket is taken
345 * so reqsk_fastopen_remove() will free the req
346 * when 3WHS finishes (or is aborted).
347 */
348 req->sk = NULL;
349 req = NULL;
350 }
Eric Dumazet0536fcc2015-09-29 07:42:52 -0700351 spin_unlock_bh(&queue->fastopenq.lock);
Jerry Chu83368862012-08-31 12:29:12 +0000352 }
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700353out:
354 release_sock(sk);
Jerry Chu83368862012-08-31 12:29:12 +0000355 if (req)
Eric Dumazet13854e52015-03-15 21:12:16 -0700356 reqsk_put(req);
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700357 return newsk;
358out_err:
359 newsk = NULL;
Jerry Chu83368862012-08-31 12:29:12 +0000360 req = NULL;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700361 *err = error;
362 goto out;
363}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700364EXPORT_SYMBOL(inet_csk_accept);
365
366/*
367 * Using different timers for retransmit, delayed acks and probes
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900368 * We may wish use just one timer maintaining a list of expire jiffies
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700369 * to optimize.
370 */
371void inet_csk_init_xmit_timers(struct sock *sk,
372 void (*retransmit_handler)(unsigned long),
373 void (*delack_handler)(unsigned long),
374 void (*keepalive_handler)(unsigned long))
375{
376 struct inet_connection_sock *icsk = inet_csk(sk);
377
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -0800378 setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
379 (unsigned long)sk);
380 setup_timer(&icsk->icsk_delack_timer, delack_handler,
381 (unsigned long)sk);
382 setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700383 icsk->icsk_pending = icsk->icsk_ack.pending = 0;
384}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700385EXPORT_SYMBOL(inet_csk_init_xmit_timers);
386
387void inet_csk_clear_xmit_timers(struct sock *sk)
388{
389 struct inet_connection_sock *icsk = inet_csk(sk);
390
391 icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
392
393 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
394 sk_stop_timer(sk, &icsk->icsk_delack_timer);
395 sk_stop_timer(sk, &sk->sk_timer);
396}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700397EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
398
399void inet_csk_delete_keepalive_timer(struct sock *sk)
400{
401 sk_stop_timer(sk, &sk->sk_timer);
402}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700403EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
404
405void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
406{
407 sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
408}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700409EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
410
Eric Dumazete5895bc2015-09-25 07:39:11 -0700411struct dst_entry *inet_csk_route_req(const struct sock *sk,
David S. Miller6bd023f2011-05-18 18:32:03 -0400412 struct flowi4 *fl4,
David S. Millerba3f7f02012-07-17 14:02:46 -0700413 const struct request_sock *req)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700414{
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700415 const struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700416 struct net *net = read_pnet(&ireq->ireq_net);
Eric Dumazet2ffd2612017-10-20 09:04:13 -0700417 struct ip_options_rcu *opt;
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700418 struct rtable *rt;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700419
Eric Dumazet6b2f36b2018-10-02 12:35:05 -0700420 rcu_read_lock();
421 opt = rcu_dereference(ireq->ireq_opt);
Eric Dumazet2af59c62017-10-24 08:20:31 -0700422
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700423 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
David S. Millere79d9bc2011-03-31 04:53:20 -0700424 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700425 sk->sk_protocol, inet_sk_flowi_flags(sk),
Eric Dumazet634fb9792013-10-09 15:21:29 -0700426 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700427 ireq->ir_loc_addr, ireq->ir_rmt_port,
Lorenzo Colitti50442922016-11-04 02:23:43 +0900428 htons(ireq->ir_num), sk->sk_uid);
David S. Miller6bd023f2011-05-18 18:32:03 -0400429 security_req_classify_flow(req, flowi4_to_flowi(fl4));
430 rt = ip_route_output_flow(net, fl4, sk);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800431 if (IS_ERR(rt))
Ilpo Järvinen857a6e02008-12-14 23:13:08 -0800432 goto no_route;
Julian Anastasov155e8332012-10-08 11:41:18 +0000433 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
Ilpo Järvinen857a6e02008-12-14 23:13:08 -0800434 goto route_err;
Eric Dumazet6b2f36b2018-10-02 12:35:05 -0700435 rcu_read_unlock();
Changli Gaod8d1f302010-06-10 23:31:35 -0700436 return &rt->dst;
Ilpo Järvinen857a6e02008-12-14 23:13:08 -0800437
438route_err:
439 ip_rt_put(rt);
440no_route:
Eric Dumazet6b2f36b2018-10-02 12:35:05 -0700441 rcu_read_unlock();
Eric Dumazetb45386e2016-04-27 16:44:35 -0700442 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
Ilpo Järvinen857a6e02008-12-14 23:13:08 -0800443 return NULL;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700444}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700445EXPORT_SYMBOL_GPL(inet_csk_route_req);
446
Eric Dumazeta2432c42015-09-29 07:42:43 -0700447struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
David S. Miller77357a92011-05-08 14:34:22 -0700448 struct sock *newsk,
449 const struct request_sock *req)
450{
451 const struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700452 struct net *net = read_pnet(&ireq->ireq_net);
David S. Miller77357a92011-05-08 14:34:22 -0700453 struct inet_sock *newinet = inet_sk(newsk);
Christoph Paasch1a7b27c2012-08-20 02:52:09 +0000454 struct ip_options_rcu *opt;
David S. Miller77357a92011-05-08 14:34:22 -0700455 struct flowi4 *fl4;
456 struct rtable *rt;
457
Eric Dumazet2ffd2612017-10-20 09:04:13 -0700458 opt = rcu_dereference(ireq->ireq_opt);
David S. Miller77357a92011-05-08 14:34:22 -0700459 fl4 = &newinet->cork.fl.u.ip4;
Christoph Paasch1a7b27c2012-08-20 02:52:09 +0000460
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700461 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
David S. Miller77357a92011-05-08 14:34:22 -0700462 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
463 sk->sk_protocol, inet_sk_flowi_flags(sk),
Eric Dumazet634fb9792013-10-09 15:21:29 -0700464 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700465 ireq->ir_loc_addr, ireq->ir_rmt_port,
Lorenzo Colitti50442922016-11-04 02:23:43 +0900466 htons(ireq->ir_num), sk->sk_uid);
David S. Miller77357a92011-05-08 14:34:22 -0700467 security_req_classify_flow(req, flowi4_to_flowi(fl4));
468 rt = ip_route_output_flow(net, fl4, sk);
469 if (IS_ERR(rt))
470 goto no_route;
Julian Anastasov155e8332012-10-08 11:41:18 +0000471 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
David S. Miller77357a92011-05-08 14:34:22 -0700472 goto route_err;
473 return &rt->dst;
474
475route_err:
476 ip_rt_put(rt);
477no_route:
Eric Dumazetb45386e2016-04-27 16:44:35 -0700478 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
David S. Miller77357a92011-05-08 14:34:22 -0700479 return NULL;
480}
481EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
482
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000483#if IS_ENABLED(CONFIG_IPV6)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700484#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
485#else
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700486#define AF_INET_FAMILY(fam) true
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700487#endif
488
Julian Anastasov0c3d79b2009-10-19 10:03:58 +0000489/* Decide when to expire the request and when to resend SYN-ACK */
490static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
491 const int max_retries,
492 const u8 rskq_defer_accept,
493 int *expire, int *resend)
494{
495 if (!rskq_defer_accept) {
Eric Dumazete6c022a2012-10-27 23:16:46 +0000496 *expire = req->num_timeout >= thresh;
Julian Anastasov0c3d79b2009-10-19 10:03:58 +0000497 *resend = 1;
498 return;
499 }
Eric Dumazete6c022a2012-10-27 23:16:46 +0000500 *expire = req->num_timeout >= thresh &&
501 (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
Julian Anastasov0c3d79b2009-10-19 10:03:58 +0000502 /*
503 * Do not resend while waiting for data after ACK,
504 * start to resend on end of deferring period to give
505 * last chance for data or ACK to create established socket.
506 */
507 *resend = !inet_rsk(req)->acked ||
Eric Dumazete6c022a2012-10-27 23:16:46 +0000508 req->num_timeout >= rskq_defer_accept - 1;
Julian Anastasov0c3d79b2009-10-19 10:03:58 +0000509}
510
Eric Dumazet1b70e972015-09-25 07:39:24 -0700511int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
Eric Dumazete6c022a2012-10-27 23:16:46 +0000512{
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000513 int err = req->rsk_ops->rtx_syn_ack(parent, req);
Eric Dumazete6c022a2012-10-27 23:16:46 +0000514
515 if (!err)
516 req->num_retrans++;
517 return err;
518}
519EXPORT_SYMBOL(inet_rtx_syn_ack);
520
Eric Dumazet079096f2015-10-02 11:43:32 -0700521/* return true if req was found in the ehash table */
Eric Dumazetb357a362015-04-23 18:03:44 -0700522static bool reqsk_queue_unlink(struct request_sock_queue *queue,
523 struct request_sock *req)
524{
Eric Dumazet079096f2015-10-02 11:43:32 -0700525 struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700526 bool found = false;
Eric Dumazetb357a362015-04-23 18:03:44 -0700527
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700528 if (sk_hashed(req_to_sk(req))) {
529 spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
Eric Dumazetb357a362015-04-23 18:03:44 -0700530
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700531 spin_lock(lock);
532 found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
533 spin_unlock(lock);
534 }
Eric Dumazet83fccfc2015-08-13 15:44:51 -0700535 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
Eric Dumazetb357a362015-04-23 18:03:44 -0700536 reqsk_put(req);
537 return found;
538}
539
540void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
541{
542 if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
543 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
544 reqsk_put(req);
545 }
546}
547EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
548
Eric Dumazetf03f2e12015-10-14 11:16:27 -0700549void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
550{
551 inet_csk_reqsk_queue_drop(sk, req);
552 reqsk_put(req);
553}
554EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
555
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700556static void reqsk_timer_handler(unsigned long data)
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700557{
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700558 struct request_sock *req = (struct request_sock *)data;
559 struct sock *sk_listener = req->rsk_listener;
Nikolay Borisov7c083ec2016-02-03 09:46:50 +0200560 struct net *net = sock_net(sk_listener);
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700561 struct inet_connection_sock *icsk = inet_csk(sk_listener);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700562 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
Eric Dumazet2b41fab2015-03-22 10:22:18 -0700563 int qlen, expire = 0, resend = 0;
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700564 int max_retries, thresh;
Eric Dumazet2b41fab2015-03-22 10:22:18 -0700565 u8 defer_accept;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700566
Eric Dumazet00fd38d2015-11-12 08:43:18 -0800567 if (sk_state_load(sk_listener) != TCP_LISTEN)
Eric Dumazet079096f2015-10-02 11:43:32 -0700568 goto drop;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700569
Nikolay Borisov7c083ec2016-02-03 09:46:50 +0200570 max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700571 thresh = max_retries;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700572 /* Normally all the openreqs are young and become mature
573 * (i.e. converted to established socket) for first timeout.
Eric Dumazetfd4f2ce2012-04-12 19:48:40 +0000574 * If synack was not acknowledged for 1 second, it means
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700575 * one of the following things: synack was lost, ack was lost,
576 * rtt is high or nobody planned to ack (i.e. synflood).
577 * When server is a bit loaded, queue is populated with old
578 * open requests, reducing effective size of queue.
579 * When server is well loaded, queue size reduces to zero
580 * after several minutes of work. It is not synflood,
581 * it is normal operation. The solution is pruning
582 * too old entries overriding normal timeout, when
583 * situation becomes dangerous.
584 *
585 * Essentially, we reserve half of room for young
586 * embrions; and abort old ones without pity, if old
587 * ones are about to clog our table.
588 */
Eric Dumazetaac065c2015-10-02 11:43:24 -0700589 qlen = reqsk_queue_len(queue);
Eric Dumazetacb4a6b2015-10-06 14:49:58 -0700590 if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) {
Eric Dumazetaac065c2015-10-02 11:43:24 -0700591 int young = reqsk_queue_len_young(queue) << 1;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700592
593 while (thresh > 2) {
Eric Dumazet2b41fab2015-03-22 10:22:18 -0700594 if (qlen < young)
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700595 break;
596 thresh--;
597 young <<= 1;
598 }
599 }
Eric Dumazet2b41fab2015-03-22 10:22:18 -0700600 defer_accept = READ_ONCE(queue->rskq_defer_accept);
601 if (defer_accept)
602 max_retries = defer_accept;
603 syn_ack_recalc(req, thresh, max_retries, defer_accept,
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700604 &expire, &resend);
Eric Dumazet42cb80a2015-03-22 10:22:19 -0700605 req->rsk_ops->syn_ack_timeout(req);
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700606 if (!expire &&
607 (!resend ||
608 !inet_rtx_syn_ack(sk_listener, req) ||
609 inet_rsk(req)->acked)) {
610 unsigned long timeo;
David S. Millerec0a1962008-06-12 16:31:35 -0700611
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700612 if (req->num_timeout++ == 0)
Eric Dumazetaac065c2015-10-02 11:43:24 -0700613 atomic_dec(&queue->young);
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700614 timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
Thomas Gleixnerf3438bc2016-07-04 09:50:23 +0000615 mod_timer(&req->rsk_timer, jiffies + timeo);
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700616 return;
617 }
Eric Dumazet079096f2015-10-02 11:43:32 -0700618drop:
Eric Dumazetf03f2e12015-10-14 11:16:27 -0700619 inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700620}
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700621
Eric Dumazet079096f2015-10-02 11:43:32 -0700622static void reqsk_queue_hash_req(struct request_sock *req,
623 unsigned long timeout)
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700624{
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700625 req->num_retrans = 0;
626 req->num_timeout = 0;
627 req->sk = NULL;
628
Thomas Gleixnerf3438bc2016-07-04 09:50:23 +0000629 setup_pinned_timer(&req->rsk_timer, reqsk_timer_handler,
630 (unsigned long)req);
631 mod_timer(&req->rsk_timer, jiffies + timeout);
Eric Dumazet29c68522015-09-19 09:48:04 -0700632
Eric Dumazet079096f2015-10-02 11:43:32 -0700633 inet_ehash_insert(req_to_sk(req), NULL);
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700634 /* before letting lookups find us, make sure all req fields
635 * are committed to memory and refcnt initialized.
636 */
637 smp_wmb();
Eric Dumazetca6fb062015-10-02 11:43:35 -0700638 atomic_set(&req->rsk_refcnt, 2 + 1);
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700639}
Eric Dumazet079096f2015-10-02 11:43:32 -0700640
641void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
642 unsigned long timeout)
643{
644 reqsk_queue_hash_req(req, timeout);
645 inet_csk_reqsk_queue_added(sk);
646}
647EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700648
Eric Dumazete56c57d2011-11-08 17:07:07 -0500649/**
650 * inet_csk_clone_lock - clone an inet socket, and lock its clone
651 * @sk: the socket to clone
652 * @req: request_sock
653 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
654 *
655 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
656 */
657struct sock *inet_csk_clone_lock(const struct sock *sk,
658 const struct request_sock *req,
659 const gfp_t priority)
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700660{
Eric Dumazete56c57d2011-11-08 17:07:07 -0500661 struct sock *newsk = sk_clone_lock(sk, priority);
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700662
Ian Morris00db4122015-04-03 09:17:27 +0100663 if (newsk) {
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700664 struct inet_connection_sock *newicsk = inet_csk(newsk);
665
666 newsk->sk_state = TCP_SYN_RECV;
667 newicsk->icsk_bind_hash = NULL;
668
Eric Dumazet634fb9792013-10-09 15:21:29 -0700669 inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
Eric Dumazetb44084c2013-10-10 00:04:37 -0700670 inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
671 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700672 newsk->sk_write_space = sk_stream_write_space;
673
Eric Dumazet85017862016-04-06 22:07:34 -0700674 /* listeners have SOCK_RCU_FREE, not the children */
675 sock_reset_flag(newsk, SOCK_RCU_FREE);
676
Eric Dumazet4eed4402017-05-09 06:29:19 -0700677 inet_sk(newsk)->mc_list = NULL;
678
Lorenzo Colitti84f39b02014-05-13 10:17:35 -0700679 newsk->sk_mark = inet_rsk(req)->ir_mark;
Eric Dumazet33cf7c92015-03-11 18:53:14 -0700680 atomic64_set(&newsk->sk_cookie,
681 atomic64_read(&inet_rsk(req)->ir_cookie));
Lorenzo Colitti84f39b02014-05-13 10:17:35 -0700682
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700683 newicsk->icsk_retransmits = 0;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300684 newicsk->icsk_backoff = 0;
685 newicsk->icsk_probes_out = 0;
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700686
687 /* Deinitialize accept_queue to trap illegal accesses. */
688 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
Venkat Yekkirala4237c752006-07-24 23:32:50 -0700689
690 security_inet_csk_clone(newsk, req);
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700691 }
692 return newsk;
693}
Eric Dumazete56c57d2011-11-08 17:07:07 -0500694EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700695
696/*
697 * At this point, there should be no process reference to this
698 * socket, and thus no user references at all. Therefore we
699 * can assume the socket waitqueue is inactive and nobody will
700 * try to jump onto it.
701 */
702void inet_csk_destroy_sock(struct sock *sk)
703{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700704 WARN_ON(sk->sk_state != TCP_CLOSE);
705 WARN_ON(!sock_flag(sk, SOCK_DEAD));
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700706
707 /* It cannot be in hash table! */
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700708 WARN_ON(!sk_unhashed(sk));
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700709
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000710 /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
711 WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700712
713 sk->sk_prot->destroy(sk);
714
715 sk_stream_kill_queues(sk);
716
717 xfrm_sk_free_policy(sk);
718
719 sk_refcnt_debug_release(sk);
720
Eric Dumazet777c6ae2016-05-04 15:27:29 -0700721 local_bh_disable();
Eric Dumazetdd24c002008-11-25 21:17:14 -0800722 percpu_counter_dec(sk->sk_prot->orphan_count);
Eric Dumazet777c6ae2016-05-04 15:27:29 -0700723 local_bh_enable();
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700724 sock_put(sk);
725}
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700726EXPORT_SYMBOL(inet_csk_destroy_sock);
727
Christoph Paasche337e242012-12-14 04:07:58 +0000728/* This function allows to force a closure of a socket after the call to
729 * tcp/dccp_create_openreq_child().
730 */
731void inet_csk_prepare_forced_close(struct sock *sk)
Christoph Paaschc10cb5f2013-03-07 02:34:33 +0000732 __releases(&sk->sk_lock.slock)
Christoph Paasche337e242012-12-14 04:07:58 +0000733{
734 /* sk_clone_lock locked the socket and set refcnt to 2 */
735 bh_unlock_sock(sk);
736 sock_put(sk);
737
738 /* The below has to be done to allow calling inet_csk_destroy_sock */
739 sock_set_flag(sk, SOCK_DEAD);
740 percpu_counter_inc(sk->sk_prot->orphan_count);
741 inet_sk(sk)->inet_num = 0;
742}
743EXPORT_SYMBOL(inet_csk_prepare_forced_close);
744
Eric Dumazetf985c652015-10-14 06:16:49 -0700745int inet_csk_listen_start(struct sock *sk, int backlog)
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700746{
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700747 struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazet10cbc8f2015-10-02 11:43:36 -0700748 struct inet_sock *inet = inet_sk(sk);
Craig Gallek086c6532016-02-10 11:50:35 -0500749 int err = -EADDRINUSE;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700750
Eric Dumazetef547f22015-10-02 11:43:37 -0700751 reqsk_queue_alloc(&icsk->icsk_accept_queue);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700752
Eric Dumazetf985c652015-10-14 06:16:49 -0700753 sk->sk_max_ack_backlog = backlog;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700754 sk->sk_ack_backlog = 0;
755 inet_csk_delack_init(sk);
756
757 /* There is race window here: we announce ourselves listening,
758 * but this transition is still not validated by get_port().
759 * It is OK, because this socket enters to hash table only
760 * after validation is complete.
761 */
Eric Dumazet00fd38d2015-11-12 08:43:18 -0800762 sk_state_store(sk, TCP_LISTEN);
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000763 if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
764 inet->inet_sport = htons(inet->inet_num);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700765
766 sk_dst_reset(sk);
Craig Gallek086c6532016-02-10 11:50:35 -0500767 err = sk->sk_prot->hash(sk);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700768
Craig Gallek086c6532016-02-10 11:50:35 -0500769 if (likely(!err))
770 return 0;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700771 }
772
773 sk->sk_state = TCP_CLOSE;
Craig Gallek086c6532016-02-10 11:50:35 -0500774 return err;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700775}
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700776EXPORT_SYMBOL_GPL(inet_csk_listen_start);
777
Eric Dumazetebb516a2015-10-14 11:16:28 -0700778static void inet_child_forget(struct sock *sk, struct request_sock *req,
779 struct sock *child)
780{
781 sk->sk_prot->disconnect(child, O_NONBLOCK);
782
783 sock_orphan(child);
784
785 percpu_counter_inc(sk->sk_prot->orphan_count);
786
787 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
788 BUG_ON(tcp_sk(child)->fastopen_rsk != req);
789 BUG_ON(sk != req->rsk_listener);
790
791 /* Paranoid, to prevent race condition if
792 * an inbound pkt destined for child is
793 * blocked by sock lock in tcp_v4_rcv().
794 * Also to satisfy an assertion in
795 * tcp_v4_destroy_sock().
796 */
797 tcp_sk(child)->fastopen_rsk = NULL;
798 }
799 inet_csk_destroy_sock(child);
Eric Dumazetebb516a2015-10-14 11:16:28 -0700800}
801
Eric Dumazet77166822016-02-18 05:39:18 -0800802struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
803 struct request_sock *req,
804 struct sock *child)
Eric Dumazetebb516a2015-10-14 11:16:28 -0700805{
806 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
807
808 spin_lock(&queue->rskq_lock);
809 if (unlikely(sk->sk_state != TCP_LISTEN)) {
810 inet_child_forget(sk, req, child);
Eric Dumazet77166822016-02-18 05:39:18 -0800811 child = NULL;
Eric Dumazetebb516a2015-10-14 11:16:28 -0700812 } else {
813 req->sk = child;
814 req->dl_next = NULL;
815 if (queue->rskq_accept_head == NULL)
816 queue->rskq_accept_head = req;
817 else
818 queue->rskq_accept_tail->dl_next = req;
819 queue->rskq_accept_tail = req;
820 sk_acceptq_added(sk);
821 }
822 spin_unlock(&queue->rskq_lock);
Eric Dumazet77166822016-02-18 05:39:18 -0800823 return child;
Eric Dumazetebb516a2015-10-14 11:16:28 -0700824}
825EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
826
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700827struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
828 struct request_sock *req, bool own_req)
829{
830 if (own_req) {
831 inet_csk_reqsk_queue_drop(sk, req);
832 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
Eric Dumazet77166822016-02-18 05:39:18 -0800833 if (inet_csk_reqsk_queue_add(sk, req, child))
834 return child;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700835 }
836 /* Too bad, another child took ownership of the request, undo. */
837 bh_unlock_sock(child);
838 sock_put(child);
839 return NULL;
840}
841EXPORT_SYMBOL(inet_csk_complete_hashdance);
842
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700843/*
844 * This routine closes sockets which have been at least partially
845 * opened, but not yet accepted.
846 */
847void inet_csk_listen_stop(struct sock *sk)
848{
849 struct inet_connection_sock *icsk = inet_csk(sk);
Jerry Chu83368862012-08-31 12:29:12 +0000850 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
Eric Dumazetfff1f302015-10-02 11:43:23 -0700851 struct request_sock *next, *req;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700852
853 /* Following specs, it would be better either to send FIN
854 * (and enter FIN-WAIT-1, it is normal close)
855 * or to send active reset (abort).
856 * Certainly, it is pretty dangerous while synflood, but it is
857 * bad justification for our negligence 8)
858 * To be honest, we are not able to make either
859 * of the variants now. --ANK
860 */
Eric Dumazetfff1f302015-10-02 11:43:23 -0700861 while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700862 struct sock *child = req->sk;
863
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700864 local_bh_disable();
865 bh_lock_sock(child);
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700866 WARN_ON(sock_owned_by_user(child));
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700867 sock_hold(child);
868
Eric Dumazetebb516a2015-10-14 11:16:28 -0700869 inet_child_forget(sk, req, child);
Eric Dumazet83fe8732017-09-11 15:58:38 -0700870 reqsk_put(req);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700871 bh_unlock_sock(child);
872 local_bh_enable();
873 sock_put(child);
874
Eric Dumazet92d6f172015-10-02 11:43:38 -0700875 cond_resched();
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700876 }
Eric Dumazet0536fcc2015-09-29 07:42:52 -0700877 if (queue->fastopenq.rskq_rst_head) {
Jerry Chu83368862012-08-31 12:29:12 +0000878 /* Free all the reqs queued in rskq_rst_head. */
Eric Dumazet0536fcc2015-09-29 07:42:52 -0700879 spin_lock_bh(&queue->fastopenq.lock);
Eric Dumazetfff1f302015-10-02 11:43:23 -0700880 req = queue->fastopenq.rskq_rst_head;
Eric Dumazet0536fcc2015-09-29 07:42:52 -0700881 queue->fastopenq.rskq_rst_head = NULL;
882 spin_unlock_bh(&queue->fastopenq.lock);
Eric Dumazetfff1f302015-10-02 11:43:23 -0700883 while (req != NULL) {
884 next = req->dl_next;
Eric Dumazet13854e52015-03-15 21:12:16 -0700885 reqsk_put(req);
Eric Dumazetfff1f302015-10-02 11:43:23 -0700886 req = next;
Jerry Chu83368862012-08-31 12:29:12 +0000887 }
888 }
Eric Dumazetebb516a2015-10-14 11:16:28 -0700889 WARN_ON_ONCE(sk->sk_ack_backlog);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700890}
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700891EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
Arnaldo Carvalho de Meloaf05dc92005-12-13 23:16:04 -0800892
893void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
894{
895 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
896 const struct inet_sock *inet = inet_sk(sk);
897
898 sin->sin_family = AF_INET;
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000899 sin->sin_addr.s_addr = inet->inet_daddr;
900 sin->sin_port = inet->inet_dport;
Arnaldo Carvalho de Meloaf05dc92005-12-13 23:16:04 -0800901}
Arnaldo Carvalho de Meloaf05dc92005-12-13 23:16:04 -0800902EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
Arnaldo Carvalho de Meloc4d93902006-03-20 22:01:03 -0800903
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -0800904#ifdef CONFIG_COMPAT
905int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
906 char __user *optval, int __user *optlen)
907{
David S. Millerdbeff122006-03-20 22:52:32 -0800908 const struct inet_connection_sock *icsk = inet_csk(sk);
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -0800909
Ian Morris00db4122015-04-03 09:17:27 +0100910 if (icsk->icsk_af_ops->compat_getsockopt)
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -0800911 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
912 optval, optlen);
913 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
914 optval, optlen);
915}
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -0800916EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
917
918int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700919 char __user *optval, unsigned int optlen)
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -0800920{
David S. Millerdbeff122006-03-20 22:52:32 -0800921 const struct inet_connection_sock *icsk = inet_csk(sk);
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -0800922
Ian Morris00db4122015-04-03 09:17:27 +0100923 if (icsk->icsk_af_ops->compat_setsockopt)
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -0800924 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
925 optval, optlen);
926 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
927 optval, optlen);
928}
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -0800929EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
930#endif
David S. Miller80d0a692012-07-16 03:28:06 -0700931
932static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
933{
Eric Dumazet5abf7f72012-07-17 22:42:13 +0200934 const struct inet_sock *inet = inet_sk(sk);
935 const struct ip_options_rcu *inet_opt;
David S. Miller80d0a692012-07-16 03:28:06 -0700936 __be32 daddr = inet->inet_daddr;
937 struct flowi4 *fl4;
938 struct rtable *rt;
939
940 rcu_read_lock();
941 inet_opt = rcu_dereference(inet->inet_opt);
942 if (inet_opt && inet_opt->opt.srr)
943 daddr = inet_opt->opt.faddr;
944 fl4 = &fl->u.ip4;
945 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
946 inet->inet_saddr, inet->inet_dport,
947 inet->inet_sport, sk->sk_protocol,
948 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
949 if (IS_ERR(rt))
950 rt = NULL;
951 if (rt)
952 sk_setup_caps(sk, &rt->dst);
953 rcu_read_unlock();
954
955 return &rt->dst;
956}
957
958struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
959{
960 struct dst_entry *dst = __sk_dst_check(sk, 0);
961 struct inet_sock *inet = inet_sk(sk);
962
963 if (!dst) {
964 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
965 if (!dst)
966 goto out;
967 }
David S. Miller6700c272012-07-17 03:29:28 -0700968 dst->ops->update_pmtu(dst, sk, NULL, mtu);
David S. Miller80d0a692012-07-16 03:28:06 -0700969
970 dst = __sk_dst_check(sk, 0);
971 if (!dst)
972 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
973out:
974 return dst;
975}
976EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);