Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 1 | /* |
| 2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 3 | * operating system. INET is implemented using the BSD Socket |
| 4 | * interface as the means of communication with the user level. |
| 5 | * |
| 6 | * Support for INET connection oriented protocols. |
| 7 | * |
| 8 | * Authors: See the TCP sources |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or |
| 11 | * modify it under the terms of the GNU General Public License |
| 12 | * as published by the Free Software Foundation; either version |
| 13 | * 2 of the License, or(at your option) any later version. |
| 14 | */ |
| 15 | |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 16 | #include <linux/module.h> |
| 17 | #include <linux/jhash.h> |
| 18 | |
| 19 | #include <net/inet_connection_sock.h> |
| 20 | #include <net/inet_hashtables.h> |
| 21 | #include <net/inet_timewait_sock.h> |
| 22 | #include <net/ip.h> |
| 23 | #include <net/route.h> |
| 24 | #include <net/tcp_states.h> |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 25 | #include <net/xfrm.h> |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 26 | |
| 27 | #ifdef INET_CSK_DEBUG |
| 28 | const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n"; |
| 29 | EXPORT_SYMBOL(inet_csk_timer_bug_msg); |
| 30 | #endif |
| 31 | |
| 32 | /* |
Eric Dumazet | 3c689b7 | 2008-10-08 14:18:04 -0700 | [diff] [blame] | 33 | * This struct holds the first and last local port number. |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 34 | */ |
Eric Dumazet | 3c689b7 | 2008-10-08 14:18:04 -0700 | [diff] [blame] | 35 | struct local_ports sysctl_local_ports __read_mostly = { |
Eric Dumazet | c4dbe54 | 2011-05-24 14:08:08 +0200 | [diff] [blame] | 36 | .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock), |
Eric Dumazet | 3c689b7 | 2008-10-08 14:18:04 -0700 | [diff] [blame] | 37 | .range = { 32768, 61000 }, |
| 38 | }; |
Stephen Hemminger | 227b60f | 2007-10-10 17:30:46 -0700 | [diff] [blame] | 39 | |
Amerigo Wang | e3826f1 | 2010-05-05 00:27:06 +0000 | [diff] [blame] | 40 | unsigned long *sysctl_local_reserved_ports; |
| 41 | EXPORT_SYMBOL(sysctl_local_reserved_ports); |
| 42 | |
Stephen Hemminger | 227b60f | 2007-10-10 17:30:46 -0700 | [diff] [blame] | 43 | void inet_get_local_port_range(int *low, int *high) |
| 44 | { |
| 45 | unsigned seq; |
| 46 | do { |
Eric Dumazet | 3c689b7 | 2008-10-08 14:18:04 -0700 | [diff] [blame] | 47 | seq = read_seqbegin(&sysctl_local_ports.lock); |
Stephen Hemminger | 227b60f | 2007-10-10 17:30:46 -0700 | [diff] [blame] | 48 | |
Eric Dumazet | 3c689b7 | 2008-10-08 14:18:04 -0700 | [diff] [blame] | 49 | *low = sysctl_local_ports.range[0]; |
| 50 | *high = sysctl_local_ports.range[1]; |
| 51 | } while (read_seqretry(&sysctl_local_ports.lock, seq)); |
Stephen Hemminger | 227b60f | 2007-10-10 17:30:46 -0700 | [diff] [blame] | 52 | } |
| 53 | EXPORT_SYMBOL(inet_get_local_port_range); |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 54 | |
Arnaldo Carvalho de Melo | 971af18 | 2005-12-13 23:14:47 -0800 | [diff] [blame] | 55 | int inet_csk_bind_conflict(const struct sock *sk, |
| 56 | const struct inet_bind_bucket *tb) |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 57 | { |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 58 | struct sock *sk2; |
| 59 | struct hlist_node *node; |
| 60 | int reuse = sk->sk_reuse; |
| 61 | |
Pavel Emelyanov | 7477fd2 | 2008-04-14 02:42:27 -0700 | [diff] [blame] | 62 | /* |
| 63 | * Unlike other sk lookup places we do not check |
| 64 | * for sk_net here, since _all_ the socks listed |
| 65 | * in tb->owners list belong to the same net - the |
| 66 | * one this bucket belongs to. |
| 67 | */ |
| 68 | |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 69 | sk_for_each_bound(sk2, node, &tb->owners) { |
| 70 | if (sk != sk2 && |
| 71 | !inet_v6_ipv6only(sk2) && |
| 72 | (!sk->sk_bound_dev_if || |
| 73 | !sk2->sk_bound_dev_if || |
| 74 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { |
| 75 | if (!reuse || !sk2->sk_reuse || |
David S. Miller | 3e8c806 | 2011-04-13 12:01:14 -0700 | [diff] [blame] | 76 | sk2->sk_state == TCP_LISTEN) { |
Eric Dumazet | 68835ab | 2010-11-30 19:04:07 +0000 | [diff] [blame] | 77 | const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); |
| 78 | if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || |
| 79 | sk2_rcv_saddr == sk_rcv_saddr(sk)) |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 80 | break; |
David S. Miller | 8d238b2 | 2010-04-28 11:25:59 -0700 | [diff] [blame] | 81 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 82 | } |
| 83 | } |
| 84 | return node != NULL; |
| 85 | } |
Arnaldo Carvalho de Melo | 971af18 | 2005-12-13 23:14:47 -0800 | [diff] [blame] | 86 | EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); |
| 87 | |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 88 | /* Obtain a reference to a local port for the given sock, |
| 89 | * if snum is zero it means select any available local port. |
| 90 | */ |
Arnaldo Carvalho de Melo | ab1e0a1 | 2008-02-03 04:06:04 -0800 | [diff] [blame] | 91 | int inet_csk_get_port(struct sock *sk, unsigned short snum) |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 92 | { |
Pavel Emelyanov | 39d8cda | 2008-03-22 16:50:58 -0700 | [diff] [blame] | 93 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 94 | struct inet_bind_hashbucket *head; |
| 95 | struct hlist_node *node; |
| 96 | struct inet_bind_bucket *tb; |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 97 | int ret, attempts = 5; |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 98 | struct net *net = sock_net(sk); |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 99 | int smallest_size = -1, smallest_rover; |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 100 | |
| 101 | local_bh_disable(); |
| 102 | if (!snum) { |
Stephen Hemminger | 227b60f | 2007-10-10 17:30:46 -0700 | [diff] [blame] | 103 | int remaining, rover, low, high; |
| 104 | |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 105 | again: |
Stephen Hemminger | 227b60f | 2007-10-10 17:30:46 -0700 | [diff] [blame] | 106 | inet_get_local_port_range(&low, &high); |
Anton Arapov | a25de53 | 2007-10-18 22:00:17 -0700 | [diff] [blame] | 107 | remaining = (high - low) + 1; |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 108 | smallest_rover = rover = net_random() % remaining + low; |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 109 | |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 110 | smallest_size = -1; |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 111 | do { |
Amerigo Wang | e3826f1 | 2010-05-05 00:27:06 +0000 | [diff] [blame] | 112 | if (inet_is_reserved_local_port(rover)) |
| 113 | goto next_nolock; |
Pavel Emelyanov | 7f635ab | 2008-06-16 17:12:49 -0700 | [diff] [blame] | 114 | head = &hashinfo->bhash[inet_bhashfn(net, rover, |
| 115 | hashinfo->bhash_size)]; |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 116 | spin_lock(&head->lock); |
| 117 | inet_bind_bucket_for_each(tb, node, &head->chain) |
Octavian Purdila | 09ad9bc | 2009-11-25 15:14:13 -0800 | [diff] [blame] | 118 | if (net_eq(ib_net(tb), net) && tb->port == rover) { |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 119 | if (tb->fastreuse > 0 && |
| 120 | sk->sk_reuse && |
| 121 | sk->sk_state != TCP_LISTEN && |
| 122 | (tb->num_owners < smallest_size || smallest_size == -1)) { |
| 123 | smallest_size = tb->num_owners; |
| 124 | smallest_rover = rover; |
David S. Miller | 3e8c806 | 2011-04-13 12:01:14 -0700 | [diff] [blame] | 125 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { |
David S. Miller | 8d238b2 | 2010-04-28 11:25:59 -0700 | [diff] [blame] | 126 | spin_unlock(&head->lock); |
| 127 | snum = smallest_rover; |
| 128 | goto have_snum; |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 129 | } |
| 130 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 131 | goto next; |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 132 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 133 | break; |
| 134 | next: |
| 135 | spin_unlock(&head->lock); |
Amerigo Wang | e3826f1 | 2010-05-05 00:27:06 +0000 | [diff] [blame] | 136 | next_nolock: |
Stephen Hemminger | 6df7163 | 2005-11-03 16:33:23 -0800 | [diff] [blame] | 137 | if (++rover > high) |
| 138 | rover = low; |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 139 | } while (--remaining > 0); |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 140 | |
| 141 | /* Exhausted local port range during search? It is not |
| 142 | * possible for us to be holding one of the bind hash |
| 143 | * locks if this test triggers, because if 'remaining' |
| 144 | * drops to zero, we broke out of the do/while loop at |
| 145 | * the top level, not from the 'break;' statement. |
| 146 | */ |
| 147 | ret = 1; |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 148 | if (remaining <= 0) { |
| 149 | if (smallest_size != -1) { |
| 150 | snum = smallest_rover; |
| 151 | goto have_snum; |
| 152 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 153 | goto fail; |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 154 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 155 | /* OK, here is the one we will use. HEAD is |
| 156 | * non-NULL and we hold it's mutex. |
| 157 | */ |
| 158 | snum = rover; |
| 159 | } else { |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 160 | have_snum: |
Pavel Emelyanov | 7f635ab | 2008-06-16 17:12:49 -0700 | [diff] [blame] | 161 | head = &hashinfo->bhash[inet_bhashfn(net, snum, |
| 162 | hashinfo->bhash_size)]; |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 163 | spin_lock(&head->lock); |
| 164 | inet_bind_bucket_for_each(tb, node, &head->chain) |
Octavian Purdila | 09ad9bc | 2009-11-25 15:14:13 -0800 | [diff] [blame] | 165 | if (net_eq(ib_net(tb), net) && tb->port == snum) |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 166 | goto tb_found; |
| 167 | } |
| 168 | tb = NULL; |
| 169 | goto tb_not_found; |
| 170 | tb_found: |
| 171 | if (!hlist_empty(&tb->owners)) { |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 172 | if (tb->fastreuse > 0 && |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 173 | sk->sk_reuse && sk->sk_state != TCP_LISTEN && |
| 174 | smallest_size == -1) { |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 175 | goto success; |
| 176 | } else { |
| 177 | ret = 1; |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 178 | if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { |
Stephen Hemminger | 5add300 | 2009-02-01 01:40:17 -0800 | [diff] [blame] | 179 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN && |
| 180 | smallest_size != -1 && --attempts >= 0) { |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 181 | spin_unlock(&head->lock); |
| 182 | goto again; |
| 183 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 184 | goto fail_unlock; |
Evgeniy Polyakov | a9d8f91 | 2009-01-19 16:46:02 -0800 | [diff] [blame] | 185 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 186 | } |
| 187 | } |
| 188 | tb_not_found: |
| 189 | ret = 1; |
Pavel Emelyanov | 941b1d2 | 2008-01-31 05:05:50 -0800 | [diff] [blame] | 190 | if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, |
| 191 | net, head, snum)) == NULL) |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 192 | goto fail_unlock; |
| 193 | if (hlist_empty(&tb->owners)) { |
| 194 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) |
| 195 | tb->fastreuse = 1; |
| 196 | else |
| 197 | tb->fastreuse = 0; |
| 198 | } else if (tb->fastreuse && |
| 199 | (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) |
| 200 | tb->fastreuse = 0; |
| 201 | success: |
| 202 | if (!inet_csk(sk)->icsk_bind_hash) |
| 203 | inet_bind_hash(sk, tb, snum); |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 204 | WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 205 | ret = 0; |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 206 | |
| 207 | fail_unlock: |
| 208 | spin_unlock(&head->lock); |
| 209 | fail: |
| 210 | local_bh_enable(); |
| 211 | return ret; |
| 212 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 213 | EXPORT_SYMBOL_GPL(inet_csk_get_port); |
| 214 | |
| 215 | /* |
| 216 | * Wait for an incoming connection, avoid race conditions. This must be called |
| 217 | * with the socket locked. |
| 218 | */ |
| 219 | static int inet_csk_wait_for_connect(struct sock *sk, long timeo) |
| 220 | { |
| 221 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 222 | DEFINE_WAIT(wait); |
| 223 | int err; |
| 224 | |
| 225 | /* |
| 226 | * True wake-one mechanism for incoming connections: only |
| 227 | * one process gets woken up, not the 'whole herd'. |
| 228 | * Since we do not 'race & poll' for established sockets |
| 229 | * anymore, the common case will execute the loop only once. |
| 230 | * |
| 231 | * Subtle issue: "add_wait_queue_exclusive()" will be added |
| 232 | * after any current non-exclusive waiters, and we know that |
| 233 | * it will always _stay_ after any new non-exclusive waiters |
| 234 | * because all non-exclusive waiters are added at the |
| 235 | * beginning of the wait-queue. As such, it's ok to "drop" |
| 236 | * our exclusiveness temporarily when we get woken up without |
| 237 | * having to remove and re-insert us on the wait queue. |
| 238 | */ |
| 239 | for (;;) { |
Eric Dumazet | aa39514 | 2010-04-20 13:03:51 +0000 | [diff] [blame] | 240 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 241 | TASK_INTERRUPTIBLE); |
| 242 | release_sock(sk); |
| 243 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) |
| 244 | timeo = schedule_timeout(timeo); |
| 245 | lock_sock(sk); |
| 246 | err = 0; |
| 247 | if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) |
| 248 | break; |
| 249 | err = -EINVAL; |
| 250 | if (sk->sk_state != TCP_LISTEN) |
| 251 | break; |
| 252 | err = sock_intr_errno(timeo); |
| 253 | if (signal_pending(current)) |
| 254 | break; |
| 255 | err = -EAGAIN; |
| 256 | if (!timeo) |
| 257 | break; |
| 258 | } |
Eric Dumazet | aa39514 | 2010-04-20 13:03:51 +0000 | [diff] [blame] | 259 | finish_wait(sk_sleep(sk), &wait); |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 260 | return err; |
| 261 | } |
| 262 | |
| 263 | /* |
| 264 | * This will accept the next outstanding connection. |
| 265 | */ |
| 266 | struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) |
| 267 | { |
| 268 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 269 | struct sock *newsk; |
| 270 | int error; |
| 271 | |
| 272 | lock_sock(sk); |
| 273 | |
| 274 | /* We need to make sure that this socket is listening, |
| 275 | * and that it has something pending. |
| 276 | */ |
| 277 | error = -EINVAL; |
| 278 | if (sk->sk_state != TCP_LISTEN) |
| 279 | goto out_err; |
| 280 | |
| 281 | /* Find already established connection */ |
| 282 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) { |
| 283 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); |
| 284 | |
| 285 | /* If this is a non blocking socket don't sleep */ |
| 286 | error = -EAGAIN; |
| 287 | if (!timeo) |
| 288 | goto out_err; |
| 289 | |
| 290 | error = inet_csk_wait_for_connect(sk, timeo); |
| 291 | if (error) |
| 292 | goto out_err; |
| 293 | } |
| 294 | |
| 295 | newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 296 | WARN_ON(newsk->sk_state == TCP_SYN_RECV); |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 297 | out: |
| 298 | release_sock(sk); |
| 299 | return newsk; |
| 300 | out_err: |
| 301 | newsk = NULL; |
| 302 | *err = error; |
| 303 | goto out; |
| 304 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 305 | EXPORT_SYMBOL(inet_csk_accept); |
| 306 | |
| 307 | /* |
| 308 | * Using different timers for retransmit, delayed acks and probes |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 309 | * We may wish use just one timer maintaining a list of expire jiffies |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 310 | * to optimize. |
| 311 | */ |
| 312 | void inet_csk_init_xmit_timers(struct sock *sk, |
| 313 | void (*retransmit_handler)(unsigned long), |
| 314 | void (*delack_handler)(unsigned long), |
| 315 | void (*keepalive_handler)(unsigned long)) |
| 316 | { |
| 317 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 318 | |
Pavel Emelyanov | b24b8a2 | 2008-01-23 21:20:07 -0800 | [diff] [blame] | 319 | setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler, |
| 320 | (unsigned long)sk); |
| 321 | setup_timer(&icsk->icsk_delack_timer, delack_handler, |
| 322 | (unsigned long)sk); |
| 323 | setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk); |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 324 | icsk->icsk_pending = icsk->icsk_ack.pending = 0; |
| 325 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 326 | EXPORT_SYMBOL(inet_csk_init_xmit_timers); |
| 327 | |
| 328 | void inet_csk_clear_xmit_timers(struct sock *sk) |
| 329 | { |
| 330 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 331 | |
| 332 | icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; |
| 333 | |
| 334 | sk_stop_timer(sk, &icsk->icsk_retransmit_timer); |
| 335 | sk_stop_timer(sk, &icsk->icsk_delack_timer); |
| 336 | sk_stop_timer(sk, &sk->sk_timer); |
| 337 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 338 | EXPORT_SYMBOL(inet_csk_clear_xmit_timers); |
| 339 | |
| 340 | void inet_csk_delete_keepalive_timer(struct sock *sk) |
| 341 | { |
| 342 | sk_stop_timer(sk, &sk->sk_timer); |
| 343 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 344 | EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); |
| 345 | |
| 346 | void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) |
| 347 | { |
| 348 | sk_reset_timer(sk, &sk->sk_timer, jiffies + len); |
| 349 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 350 | EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); |
| 351 | |
Jianjun Kong | d931910 | 2008-11-03 00:23:42 -0800 | [diff] [blame] | 352 | struct dst_entry *inet_csk_route_req(struct sock *sk, |
David S. Miller | 6bd023f | 2011-05-18 18:32:03 -0400 | [diff] [blame] | 353 | struct flowi4 *fl4, |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 354 | const struct request_sock *req) |
| 355 | { |
| 356 | struct rtable *rt; |
| 357 | const struct inet_request_sock *ireq = inet_rsk(req); |
Eric Dumazet | f6d8bd0 | 2011-04-21 09:45:37 +0000 | [diff] [blame] | 358 | struct ip_options_rcu *opt = inet_rsk(req)->opt; |
Pavel Emelyanov | 84a3aa0 | 2008-07-16 20:19:08 -0700 | [diff] [blame] | 359 | struct net *net = sock_net(sk); |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 360 | |
David S. Miller | 6bd023f | 2011-05-18 18:32:03 -0400 | [diff] [blame] | 361 | flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, |
David S. Miller | e79d9bc | 2011-03-31 04:53:20 -0700 | [diff] [blame] | 362 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, |
| 363 | sk->sk_protocol, inet_sk_flowi_flags(sk), |
Eric Dumazet | f6d8bd0 | 2011-04-21 09:45:37 +0000 | [diff] [blame] | 364 | (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, |
David S. Miller | e79d9bc | 2011-03-31 04:53:20 -0700 | [diff] [blame] | 365 | ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); |
David S. Miller | 6bd023f | 2011-05-18 18:32:03 -0400 | [diff] [blame] | 366 | security_req_classify_flow(req, flowi4_to_flowi(fl4)); |
| 367 | rt = ip_route_output_flow(net, fl4, sk); |
David S. Miller | b23dd4f | 2011-03-02 14:31:35 -0800 | [diff] [blame] | 368 | if (IS_ERR(rt)) |
Ilpo Järvinen | 857a6e0 | 2008-12-14 23:13:08 -0800 | [diff] [blame] | 369 | goto no_route; |
David S. Miller | 6bd023f | 2011-05-18 18:32:03 -0400 | [diff] [blame] | 370 | if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway) |
Ilpo Järvinen | 857a6e0 | 2008-12-14 23:13:08 -0800 | [diff] [blame] | 371 | goto route_err; |
Changli Gao | d8d1f30 | 2010-06-10 23:31:35 -0700 | [diff] [blame] | 372 | return &rt->dst; |
Ilpo Järvinen | 857a6e0 | 2008-12-14 23:13:08 -0800 | [diff] [blame] | 373 | |
| 374 | route_err: |
| 375 | ip_rt_put(rt); |
| 376 | no_route: |
| 377 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); |
| 378 | return NULL; |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 379 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 380 | EXPORT_SYMBOL_GPL(inet_csk_route_req); |
| 381 | |
David S. Miller | 77357a9 | 2011-05-08 14:34:22 -0700 | [diff] [blame] | 382 | struct dst_entry *inet_csk_route_child_sock(struct sock *sk, |
| 383 | struct sock *newsk, |
| 384 | const struct request_sock *req) |
| 385 | { |
| 386 | const struct inet_request_sock *ireq = inet_rsk(req); |
| 387 | struct inet_sock *newinet = inet_sk(newsk); |
| 388 | struct ip_options_rcu *opt = ireq->opt; |
| 389 | struct net *net = sock_net(sk); |
| 390 | struct flowi4 *fl4; |
| 391 | struct rtable *rt; |
| 392 | |
| 393 | fl4 = &newinet->cork.fl.u.ip4; |
| 394 | flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, |
| 395 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, |
| 396 | sk->sk_protocol, inet_sk_flowi_flags(sk), |
| 397 | (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, |
| 398 | ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); |
| 399 | security_req_classify_flow(req, flowi4_to_flowi(fl4)); |
| 400 | rt = ip_route_output_flow(net, fl4, sk); |
| 401 | if (IS_ERR(rt)) |
| 402 | goto no_route; |
| 403 | if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway) |
| 404 | goto route_err; |
| 405 | return &rt->dst; |
| 406 | |
| 407 | route_err: |
| 408 | ip_rt_put(rt); |
| 409 | no_route: |
| 410 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); |
| 411 | return NULL; |
| 412 | } |
| 413 | EXPORT_SYMBOL_GPL(inet_csk_route_child_sock); |
| 414 | |
Al Viro | 6b72977 | 2006-09-27 18:36:59 -0700 | [diff] [blame] | 415 | static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, |
Eric Dumazet | 72a3eff | 2006-11-16 02:30:37 -0800 | [diff] [blame] | 416 | const u32 rnd, const u32 synq_hsize) |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 417 | { |
Al Viro | 6b72977 | 2006-09-27 18:36:59 -0700 | [diff] [blame] | 418 | return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1); |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 419 | } |
| 420 | |
| 421 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
| 422 | #define AF_INET_FAMILY(fam) ((fam) == AF_INET) |
| 423 | #else |
| 424 | #define AF_INET_FAMILY(fam) 1 |
| 425 | #endif |
| 426 | |
| 427 | struct request_sock *inet_csk_search_req(const struct sock *sk, |
| 428 | struct request_sock ***prevp, |
Al Viro | 6b72977 | 2006-09-27 18:36:59 -0700 | [diff] [blame] | 429 | const __be16 rport, const __be32 raddr, |
Al Viro | 7f25afb | 2006-09-27 18:27:47 -0700 | [diff] [blame] | 430 | const __be32 laddr) |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 431 | { |
| 432 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 433 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; |
| 434 | struct request_sock *req, **prev; |
| 435 | |
| 436 | for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd, |
| 437 | lopt->nr_table_entries)]; |
| 438 | (req = *prev) != NULL; |
| 439 | prev = &req->dl_next) { |
| 440 | const struct inet_request_sock *ireq = inet_rsk(req); |
| 441 | |
| 442 | if (ireq->rmt_port == rport && |
| 443 | ireq->rmt_addr == raddr && |
| 444 | ireq->loc_addr == laddr && |
| 445 | AF_INET_FAMILY(req->rsk_ops->family)) { |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 446 | WARN_ON(req->sk); |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 447 | *prevp = prev; |
| 448 | break; |
| 449 | } |
| 450 | } |
| 451 | |
| 452 | return req; |
| 453 | } |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 454 | EXPORT_SYMBOL_GPL(inet_csk_search_req); |
| 455 | |
| 456 | void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, |
Arnaldo Carvalho de Melo | c2977c2 | 2005-12-13 23:15:12 -0800 | [diff] [blame] | 457 | unsigned long timeout) |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 458 | { |
| 459 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 460 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; |
| 461 | const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, |
| 462 | lopt->hash_rnd, lopt->nr_table_entries); |
| 463 | |
| 464 | reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); |
| 465 | inet_csk_reqsk_queue_added(sk, timeout); |
| 466 | } |
Eric Dumazet | 4bc2f18 | 2010-07-09 21:22:10 +0000 | [diff] [blame] | 467 | EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 468 | |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 469 | /* Only thing we need from tcp.h */ |
| 470 | extern int sysctl_tcp_synack_retries; |
| 471 | |
Arnaldo Carvalho de Melo | 9f1d260 | 2005-08-09 20:11:24 -0700 | [diff] [blame] | 472 | |
Julian Anastasov | 0c3d79b | 2009-10-19 10:03:58 +0000 | [diff] [blame] | 473 | /* Decide when to expire the request and when to resend SYN-ACK */ |
| 474 | static inline void syn_ack_recalc(struct request_sock *req, const int thresh, |
| 475 | const int max_retries, |
| 476 | const u8 rskq_defer_accept, |
| 477 | int *expire, int *resend) |
| 478 | { |
| 479 | if (!rskq_defer_accept) { |
| 480 | *expire = req->retrans >= thresh; |
| 481 | *resend = 1; |
| 482 | return; |
| 483 | } |
| 484 | *expire = req->retrans >= thresh && |
| 485 | (!inet_rsk(req)->acked || req->retrans >= max_retries); |
| 486 | /* |
| 487 | * Do not resend while waiting for data after ACK, |
| 488 | * start to resend on end of deferring period to give |
| 489 | * last chance for data or ACK to create established socket. |
| 490 | */ |
| 491 | *resend = !inet_rsk(req)->acked || |
| 492 | req->retrans >= rskq_defer_accept - 1; |
| 493 | } |
| 494 | |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 495 | void inet_csk_reqsk_queue_prune(struct sock *parent, |
| 496 | const unsigned long interval, |
| 497 | const unsigned long timeout, |
| 498 | const unsigned long max_rto) |
| 499 | { |
| 500 | struct inet_connection_sock *icsk = inet_csk(parent); |
| 501 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; |
| 502 | struct listen_sock *lopt = queue->listen_opt; |
David S. Miller | ec0a196 | 2008-06-12 16:31:35 -0700 | [diff] [blame] | 503 | int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; |
| 504 | int thresh = max_retries; |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 505 | unsigned long now = jiffies; |
| 506 | struct request_sock **reqp, *req; |
| 507 | int i, budget; |
| 508 | |
| 509 | if (lopt == NULL || lopt->qlen == 0) |
| 510 | return; |
| 511 | |
| 512 | /* Normally all the openreqs are young and become mature |
| 513 | * (i.e. converted to established socket) for first timeout. |
| 514 | * If synack was not acknowledged for 3 seconds, it means |
| 515 | * one of the following things: synack was lost, ack was lost, |
| 516 | * rtt is high or nobody planned to ack (i.e. synflood). |
| 517 | * When server is a bit loaded, queue is populated with old |
| 518 | * open requests, reducing effective size of queue. |
| 519 | * When server is well loaded, queue size reduces to zero |
| 520 | * after several minutes of work. It is not synflood, |
| 521 | * it is normal operation. The solution is pruning |
| 522 | * too old entries overriding normal timeout, when |
| 523 | * situation becomes dangerous. |
| 524 | * |
| 525 | * Essentially, we reserve half of room for young |
| 526 | * embrions; and abort old ones without pity, if old |
| 527 | * ones are about to clog our table. |
| 528 | */ |
| 529 | if (lopt->qlen>>(lopt->max_qlen_log-1)) { |
| 530 | int young = (lopt->qlen_young<<1); |
| 531 | |
| 532 | while (thresh > 2) { |
| 533 | if (lopt->qlen < young) |
| 534 | break; |
| 535 | thresh--; |
| 536 | young <<= 1; |
| 537 | } |
| 538 | } |
| 539 | |
David S. Miller | ec0a196 | 2008-06-12 16:31:35 -0700 | [diff] [blame] | 540 | if (queue->rskq_defer_accept) |
| 541 | max_retries = queue->rskq_defer_accept; |
| 542 | |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 543 | budget = 2 * (lopt->nr_table_entries / (timeout / interval)); |
| 544 | i = lopt->clock_hand; |
| 545 | |
| 546 | do { |
| 547 | reqp=&lopt->syn_table[i]; |
| 548 | while ((req = *reqp) != NULL) { |
| 549 | if (time_after_eq(now, req->expires)) { |
Julian Anastasov | 0c3d79b | 2009-10-19 10:03:58 +0000 | [diff] [blame] | 550 | int expire = 0, resend = 0; |
| 551 | |
| 552 | syn_ack_recalc(req, thresh, max_retries, |
| 553 | queue->rskq_defer_accept, |
| 554 | &expire, &resend); |
Octavian Purdila | 72659ec | 2010-01-17 19:09:39 -0800 | [diff] [blame] | 555 | if (req->rsk_ops->syn_ack_timeout) |
| 556 | req->rsk_ops->syn_ack_timeout(parent, req); |
Julian Anastasov | 0c3d79b | 2009-10-19 10:03:58 +0000 | [diff] [blame] | 557 | if (!expire && |
| 558 | (!resend || |
William Allen Simpson | e6b4d11 | 2009-12-02 18:07:39 +0000 | [diff] [blame] | 559 | !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || |
Julian Anastasov | 0c3d79b | 2009-10-19 10:03:58 +0000 | [diff] [blame] | 560 | inet_rsk(req)->acked)) { |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 561 | unsigned long timeo; |
| 562 | |
| 563 | if (req->retrans++ == 0) |
| 564 | lopt->qlen_young--; |
| 565 | timeo = min((timeout << req->retrans), max_rto); |
| 566 | req->expires = now + timeo; |
| 567 | reqp = &req->dl_next; |
| 568 | continue; |
| 569 | } |
| 570 | |
| 571 | /* Drop this request */ |
| 572 | inet_csk_reqsk_queue_unlink(parent, req, reqp); |
| 573 | reqsk_queue_removed(queue, req); |
| 574 | reqsk_free(req); |
| 575 | continue; |
| 576 | } |
| 577 | reqp = &req->dl_next; |
| 578 | } |
| 579 | |
| 580 | i = (i + 1) & (lopt->nr_table_entries - 1); |
| 581 | |
| 582 | } while (--budget > 0); |
| 583 | |
| 584 | lopt->clock_hand = i; |
| 585 | |
| 586 | if (lopt->qlen) |
| 587 | inet_csk_reset_keepalive_timer(parent, interval); |
| 588 | } |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 589 | EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); |
| 590 | |
Arnaldo Carvalho de Melo | 9f1d260 | 2005-08-09 20:11:24 -0700 | [diff] [blame] | 591 | struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 592 | const gfp_t priority) |
Arnaldo Carvalho de Melo | 9f1d260 | 2005-08-09 20:11:24 -0700 | [diff] [blame] | 593 | { |
| 594 | struct sock *newsk = sk_clone(sk, priority); |
| 595 | |
| 596 | if (newsk != NULL) { |
| 597 | struct inet_connection_sock *newicsk = inet_csk(newsk); |
| 598 | |
| 599 | newsk->sk_state = TCP_SYN_RECV; |
| 600 | newicsk->icsk_bind_hash = NULL; |
| 601 | |
Eric Dumazet | c720c7e | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 602 | inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port; |
| 603 | inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port); |
| 604 | inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port; |
Arnaldo Carvalho de Melo | 9f1d260 | 2005-08-09 20:11:24 -0700 | [diff] [blame] | 605 | newsk->sk_write_space = sk_stream_write_space; |
| 606 | |
| 607 | newicsk->icsk_retransmits = 0; |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 608 | newicsk->icsk_backoff = 0; |
| 609 | newicsk->icsk_probes_out = 0; |
Arnaldo Carvalho de Melo | 9f1d260 | 2005-08-09 20:11:24 -0700 | [diff] [blame] | 610 | |
| 611 | /* Deinitialize accept_queue to trap illegal accesses. */ |
| 612 | memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); |
Venkat Yekkirala | 4237c75 | 2006-07-24 23:32:50 -0700 | [diff] [blame] | 613 | |
| 614 | security_inet_csk_clone(newsk, req); |
Arnaldo Carvalho de Melo | 9f1d260 | 2005-08-09 20:11:24 -0700 | [diff] [blame] | 615 | } |
| 616 | return newsk; |
| 617 | } |
Arnaldo Carvalho de Melo | 9f1d260 | 2005-08-09 20:11:24 -0700 | [diff] [blame] | 618 | EXPORT_SYMBOL_GPL(inet_csk_clone); |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 619 | |
| 620 | /* |
| 621 | * At this point, there should be no process reference to this |
| 622 | * socket, and thus no user references at all. Therefore we |
| 623 | * can assume the socket waitqueue is inactive and nobody will |
| 624 | * try to jump onto it. |
| 625 | */ |
| 626 | void inet_csk_destroy_sock(struct sock *sk) |
| 627 | { |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 628 | WARN_ON(sk->sk_state != TCP_CLOSE); |
| 629 | WARN_ON(!sock_flag(sk, SOCK_DEAD)); |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 630 | |
| 631 | /* It cannot be in hash table! */ |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 632 | WARN_ON(!sk_unhashed(sk)); |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 633 | |
Eric Dumazet | c720c7e | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 634 | /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ |
| 635 | WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 636 | |
| 637 | sk->sk_prot->destroy(sk); |
| 638 | |
| 639 | sk_stream_kill_queues(sk); |
| 640 | |
| 641 | xfrm_sk_free_policy(sk); |
| 642 | |
| 643 | sk_refcnt_debug_release(sk); |
| 644 | |
Eric Dumazet | dd24c00 | 2008-11-25 21:17:14 -0800 | [diff] [blame] | 645 | percpu_counter_dec(sk->sk_prot->orphan_count); |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 646 | sock_put(sk); |
| 647 | } |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 648 | EXPORT_SYMBOL(inet_csk_destroy_sock); |
| 649 | |
| 650 | int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) |
| 651 | { |
| 652 | struct inet_sock *inet = inet_sk(sk); |
| 653 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 654 | int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries); |
| 655 | |
| 656 | if (rc != 0) |
| 657 | return rc; |
| 658 | |
| 659 | sk->sk_max_ack_backlog = 0; |
| 660 | sk->sk_ack_backlog = 0; |
| 661 | inet_csk_delack_init(sk); |
| 662 | |
| 663 | /* There is race window here: we announce ourselves listening, |
| 664 | * but this transition is still not validated by get_port(). |
| 665 | * It is OK, because this socket enters to hash table only |
| 666 | * after validation is complete. |
| 667 | */ |
| 668 | sk->sk_state = TCP_LISTEN; |
Eric Dumazet | c720c7e | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 669 | if (!sk->sk_prot->get_port(sk, inet->inet_num)) { |
| 670 | inet->inet_sport = htons(inet->inet_num); |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 671 | |
| 672 | sk_dst_reset(sk); |
| 673 | sk->sk_prot->hash(sk); |
| 674 | |
| 675 | return 0; |
| 676 | } |
| 677 | |
| 678 | sk->sk_state = TCP_CLOSE; |
| 679 | __reqsk_queue_destroy(&icsk->icsk_accept_queue); |
| 680 | return -EADDRINUSE; |
| 681 | } |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 682 | EXPORT_SYMBOL_GPL(inet_csk_listen_start); |
| 683 | |
| 684 | /* |
| 685 | * This routine closes sockets which have been at least partially |
| 686 | * opened, but not yet accepted. |
| 687 | */ |
| 688 | void inet_csk_listen_stop(struct sock *sk) |
| 689 | { |
| 690 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 691 | struct request_sock *acc_req; |
| 692 | struct request_sock *req; |
| 693 | |
| 694 | inet_csk_delete_keepalive_timer(sk); |
| 695 | |
| 696 | /* make all the listen_opt local to us */ |
| 697 | acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue); |
| 698 | |
| 699 | /* Following specs, it would be better either to send FIN |
| 700 | * (and enter FIN-WAIT-1, it is normal close) |
| 701 | * or to send active reset (abort). |
| 702 | * Certainly, it is pretty dangerous while synflood, but it is |
| 703 | * bad justification for our negligence 8) |
| 704 | * To be honest, we are not able to make either |
| 705 | * of the variants now. --ANK |
| 706 | */ |
| 707 | reqsk_queue_destroy(&icsk->icsk_accept_queue); |
| 708 | |
| 709 | while ((req = acc_req) != NULL) { |
| 710 | struct sock *child = req->sk; |
| 711 | |
| 712 | acc_req = req->dl_next; |
| 713 | |
| 714 | local_bh_disable(); |
| 715 | bh_lock_sock(child); |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 716 | WARN_ON(sock_owned_by_user(child)); |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 717 | sock_hold(child); |
| 718 | |
| 719 | sk->sk_prot->disconnect(child, O_NONBLOCK); |
| 720 | |
| 721 | sock_orphan(child); |
| 722 | |
Herbert Xu | eb4dea5 | 2008-12-29 23:04:08 -0800 | [diff] [blame] | 723 | percpu_counter_inc(sk->sk_prot->orphan_count); |
| 724 | |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 725 | inet_csk_destroy_sock(child); |
| 726 | |
| 727 | bh_unlock_sock(child); |
| 728 | local_bh_enable(); |
| 729 | sock_put(child); |
| 730 | |
| 731 | sk_acceptq_removed(sk); |
| 732 | __reqsk_free(req); |
| 733 | } |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 734 | WARN_ON(sk->sk_ack_backlog); |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 735 | } |
Arnaldo Carvalho de Melo | a019d6f | 2005-08-09 20:15:09 -0700 | [diff] [blame] | 736 | EXPORT_SYMBOL_GPL(inet_csk_listen_stop); |
Arnaldo Carvalho de Melo | af05dc9 | 2005-12-13 23:16:04 -0800 | [diff] [blame] | 737 | |
| 738 | void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) |
| 739 | { |
| 740 | struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; |
| 741 | const struct inet_sock *inet = inet_sk(sk); |
| 742 | |
| 743 | sin->sin_family = AF_INET; |
Eric Dumazet | c720c7e | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 744 | sin->sin_addr.s_addr = inet->inet_daddr; |
| 745 | sin->sin_port = inet->inet_dport; |
Arnaldo Carvalho de Melo | af05dc9 | 2005-12-13 23:16:04 -0800 | [diff] [blame] | 746 | } |
Arnaldo Carvalho de Melo | af05dc9 | 2005-12-13 23:16:04 -0800 | [diff] [blame] | 747 | EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); |
Arnaldo Carvalho de Melo | c4d9390 | 2006-03-20 22:01:03 -0800 | [diff] [blame] | 748 | |
Arnaldo Carvalho de Melo | dec73ff | 2006-03-20 22:46:16 -0800 | [diff] [blame] | 749 | #ifdef CONFIG_COMPAT |
| 750 | int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, |
| 751 | char __user *optval, int __user *optlen) |
| 752 | { |
David S. Miller | dbeff12 | 2006-03-20 22:52:32 -0800 | [diff] [blame] | 753 | const struct inet_connection_sock *icsk = inet_csk(sk); |
Arnaldo Carvalho de Melo | dec73ff | 2006-03-20 22:46:16 -0800 | [diff] [blame] | 754 | |
| 755 | if (icsk->icsk_af_ops->compat_getsockopt != NULL) |
| 756 | return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, |
| 757 | optval, optlen); |
| 758 | return icsk->icsk_af_ops->getsockopt(sk, level, optname, |
| 759 | optval, optlen); |
| 760 | } |
Arnaldo Carvalho de Melo | dec73ff | 2006-03-20 22:46:16 -0800 | [diff] [blame] | 761 | EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt); |
| 762 | |
| 763 | int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, |
David S. Miller | b705884 | 2009-09-30 16:12:20 -0700 | [diff] [blame] | 764 | char __user *optval, unsigned int optlen) |
Arnaldo Carvalho de Melo | dec73ff | 2006-03-20 22:46:16 -0800 | [diff] [blame] | 765 | { |
David S. Miller | dbeff12 | 2006-03-20 22:52:32 -0800 | [diff] [blame] | 766 | const struct inet_connection_sock *icsk = inet_csk(sk); |
Arnaldo Carvalho de Melo | dec73ff | 2006-03-20 22:46:16 -0800 | [diff] [blame] | 767 | |
| 768 | if (icsk->icsk_af_ops->compat_setsockopt != NULL) |
| 769 | return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, |
| 770 | optval, optlen); |
| 771 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, |
| 772 | optval, optlen); |
| 773 | } |
Arnaldo Carvalho de Melo | dec73ff | 2006-03-20 22:46:16 -0800 | [diff] [blame] | 774 | EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt); |
| 775 | #endif |