Arnaldo Carvalho de Melo | 0e87506 | 2005-06-18 22:47:59 -0700 | [diff] [blame] | 1 | /* |
| 2 | * NET Generic infrastructure for Network protocols. |
| 3 | * |
| 4 | * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br> |
| 5 | * |
| 6 | * From code originally in include/net/tcp.h |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU General Public License |
| 10 | * as published by the Free Software Foundation; either version |
| 11 | * 2 of the License, or (at your option) any later version. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/random.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/string.h> |
Jerry Chu | 8336886 | 2012-08-31 12:29:12 +0000 | [diff] [blame] | 18 | #include <linux/tcp.h> |
Eric Dumazet | 72a3eff | 2006-11-16 02:30:37 -0800 | [diff] [blame] | 19 | #include <linux/vmalloc.h> |
Arnaldo Carvalho de Melo | 0e87506 | 2005-06-18 22:47:59 -0700 | [diff] [blame] | 20 | |
| 21 | #include <net/request_sock.h> |
| 22 | |
David S. Miller | e52c1f1 | 2005-06-18 22:49:40 -0700 | [diff] [blame] | 23 | /* |
| 24 | * Maximum number of SYN_RECV sockets in queue per LISTEN socket. |
| 25 | * One SYN_RECV socket costs about 80bytes on a 32bit machine. |
| 26 | * It would be better to replace it with a global counter for all sockets |
| 27 | * but then some measure against one socket starving all other sockets |
| 28 | * would be needed. |
| 29 | * |
Peter Pan(潘卫平) | 99b53bd | 2011-12-05 21:39:41 +0000 | [diff] [blame] | 30 | * The minimum value of it is 128. Experiments with real servers show that |
David S. Miller | e52c1f1 | 2005-06-18 22:49:40 -0700 | [diff] [blame] | 31 | * it is absolutely not enough even at 100conn/sec. 256 cures most |
Peter Pan(潘卫平) | 99b53bd | 2011-12-05 21:39:41 +0000 | [diff] [blame] | 32 | * of problems. |
| 33 | * This value is adjusted to 128 for low memory machines, |
| 34 | * and it will increase in proportion to the memory of machine. |
Eric Dumazet | 72a3eff | 2006-11-16 02:30:37 -0800 | [diff] [blame] | 35 | * Note : Dont forget somaxconn that may limit backlog too. |
David S. Miller | e52c1f1 | 2005-06-18 22:49:40 -0700 | [diff] [blame] | 36 | */ |
| 37 | int sysctl_max_syn_backlog = 256; |
David S. Miller | 493f377 | 2010-12-02 12:14:29 -0800 | [diff] [blame] | 38 | EXPORT_SYMBOL(sysctl_max_syn_backlog); |
David S. Miller | e52c1f1 | 2005-06-18 22:49:40 -0700 | [diff] [blame] | 39 | |
Arnaldo Carvalho de Melo | 0e87506 | 2005-06-18 22:47:59 -0700 | [diff] [blame] | 40 | int reqsk_queue_alloc(struct request_sock_queue *queue, |
Eric Dumazet | 72a3eff | 2006-11-16 02:30:37 -0800 | [diff] [blame] | 41 | unsigned int nr_table_entries) |
Arnaldo Carvalho de Melo | 0e87506 | 2005-06-18 22:47:59 -0700 | [diff] [blame] | 42 | { |
Eric Dumazet | 72a3eff | 2006-11-16 02:30:37 -0800 | [diff] [blame] | 43 | size_t lopt_size = sizeof(struct listen_sock); |
Eric Dumazet | f6d8cb2 | 2014-06-24 05:32:48 -0700 | [diff] [blame] | 44 | struct listen_sock *lopt = NULL; |
Arnaldo Carvalho de Melo | 0e87506 | 2005-06-18 22:47:59 -0700 | [diff] [blame] | 45 | |
Eric Dumazet | 72a3eff | 2006-11-16 02:30:37 -0800 | [diff] [blame] | 46 | nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog); |
| 47 | nr_table_entries = max_t(u32, nr_table_entries, 8); |
| 48 | nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); |
| 49 | lopt_size += nr_table_entries * sizeof(struct request_sock *); |
Arnaldo Carvalho de Melo | 0e87506 | 2005-06-18 22:47:59 -0700 | [diff] [blame] | 50 | |
Eric Dumazet | f6d8cb2 | 2014-06-24 05:32:48 -0700 | [diff] [blame] | 51 | if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) |
| 52 | lopt = kzalloc(lopt_size, GFP_KERNEL | |
| 53 | __GFP_NOWARN | |
| 54 | __GFP_NORETRY); |
| 55 | if (!lopt) |
| 56 | lopt = vzalloc(lopt_size); |
| 57 | if (!lopt) |
| 58 | return -ENOMEM; |
Arnaldo Carvalho de Melo | 0e87506 | 2005-06-18 22:47:59 -0700 | [diff] [blame] | 59 | |
| 60 | get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); |
Eric Dumazet | b282705 | 2015-03-22 10:22:21 -0700 | [diff] [blame] | 61 | spin_lock_init(&queue->syn_wait_lock); |
Norbert Kiesel | 3eb4801 | 2006-03-26 17:39:55 -0800 | [diff] [blame] | 62 | queue->rskq_accept_head = NULL; |
Arnaldo Carvalho de Melo | 83e3609 | 2005-08-09 19:33:31 -0700 | [diff] [blame] | 63 | lopt->nr_table_entries = nr_table_entries; |
Eric Dumazet | f6d8cb2 | 2014-06-24 05:32:48 -0700 | [diff] [blame] | 64 | lopt->max_qlen_log = ilog2(nr_table_entries); |
Arnaldo Carvalho de Melo | 0e87506 | 2005-06-18 22:47:59 -0700 | [diff] [blame] | 65 | |
Eric Dumazet | b282705 | 2015-03-22 10:22:21 -0700 | [diff] [blame] | 66 | spin_lock_bh(&queue->syn_wait_lock); |
Arnaldo Carvalho de Melo | 0e87506 | 2005-06-18 22:47:59 -0700 | [diff] [blame] | 67 | queue->listen_opt = lopt; |
Eric Dumazet | b282705 | 2015-03-22 10:22:21 -0700 | [diff] [blame] | 68 | spin_unlock_bh(&queue->syn_wait_lock); |
Arnaldo Carvalho de Melo | 0e87506 | 2005-06-18 22:47:59 -0700 | [diff] [blame] | 69 | |
| 70 | return 0; |
| 71 | } |
| 72 | |
Pavel Emelyanov | dab6ba3 | 2007-11-15 02:57:06 -0800 | [diff] [blame] | 73 | void __reqsk_queue_destroy(struct request_sock_queue *queue) |
| 74 | { |
Eric Dumazet | f6d8cb2 | 2014-06-24 05:32:48 -0700 | [diff] [blame] | 75 | /* This is an error recovery path only, no locking needed */ |
| 76 | kvfree(queue->listen_opt); |
Pavel Emelyanov | dab6ba3 | 2007-11-15 02:57:06 -0800 | [diff] [blame] | 77 | } |
| 78 | |
Pavel Emelyanov | dab6ba3 | 2007-11-15 02:57:06 -0800 | [diff] [blame] | 79 | static inline struct listen_sock *reqsk_queue_yank_listen_sk( |
| 80 | struct request_sock_queue *queue) |
| 81 | { |
| 82 | struct listen_sock *lopt; |
| 83 | |
Eric Dumazet | b282705 | 2015-03-22 10:22:21 -0700 | [diff] [blame] | 84 | spin_lock_bh(&queue->syn_wait_lock); |
Pavel Emelyanov | dab6ba3 | 2007-11-15 02:57:06 -0800 | [diff] [blame] | 85 | lopt = queue->listen_opt; |
| 86 | queue->listen_opt = NULL; |
Eric Dumazet | b282705 | 2015-03-22 10:22:21 -0700 | [diff] [blame] | 87 | spin_unlock_bh(&queue->syn_wait_lock); |
Pavel Emelyanov | dab6ba3 | 2007-11-15 02:57:06 -0800 | [diff] [blame] | 88 | |
| 89 | return lopt; |
| 90 | } |
| 91 | |
Arnaldo Carvalho de Melo | 83e3609 | 2005-08-09 19:33:31 -0700 | [diff] [blame] | 92 | void reqsk_queue_destroy(struct request_sock_queue *queue) |
| 93 | { |
| 94 | /* make all the listen_opt local to us */ |
| 95 | struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue); |
| 96 | |
Eric Dumazet | fa76ce73 | 2015-03-19 19:04:20 -0700 | [diff] [blame] | 97 | if (listen_sock_qlen(lopt) != 0) { |
Eric Dumazet | 72a3eff | 2006-11-16 02:30:37 -0800 | [diff] [blame] | 98 | unsigned int i; |
Arnaldo Carvalho de Melo | 83e3609 | 2005-08-09 19:33:31 -0700 | [diff] [blame] | 99 | |
| 100 | for (i = 0; i < lopt->nr_table_entries; i++) { |
| 101 | struct request_sock *req; |
| 102 | |
Eric Dumazet | b282705 | 2015-03-22 10:22:21 -0700 | [diff] [blame] | 103 | spin_lock_bh(&queue->syn_wait_lock); |
Arnaldo Carvalho de Melo | 83e3609 | 2005-08-09 19:33:31 -0700 | [diff] [blame] | 104 | while ((req = lopt->syn_table[i]) != NULL) { |
| 105 | lopt->syn_table[i] = req->dl_next; |
Eric Dumazet | 2235f2a | 2015-08-10 09:09:13 -0700 | [diff] [blame] | 106 | /* Because of following del_timer_sync(), |
| 107 | * we must release the spinlock here |
| 108 | * or risk a dead lock. |
| 109 | */ |
| 110 | spin_unlock_bh(&queue->syn_wait_lock); |
Eric Dumazet | fa76ce73 | 2015-03-19 19:04:20 -0700 | [diff] [blame] | 111 | atomic_inc(&lopt->qlen_dec); |
Eric Dumazet | 2235f2a | 2015-08-10 09:09:13 -0700 | [diff] [blame] | 112 | if (del_timer_sync(&req->rsk_timer)) |
Eric Dumazet | fa76ce73 | 2015-03-19 19:04:20 -0700 | [diff] [blame] | 113 | reqsk_put(req); |
Eric Dumazet | 13854e5 | 2015-03-15 21:12:16 -0700 | [diff] [blame] | 114 | reqsk_put(req); |
Eric Dumazet | 2235f2a | 2015-08-10 09:09:13 -0700 | [diff] [blame] | 115 | spin_lock_bh(&queue->syn_wait_lock); |
Arnaldo Carvalho de Melo | 83e3609 | 2005-08-09 19:33:31 -0700 | [diff] [blame] | 116 | } |
Eric Dumazet | b282705 | 2015-03-22 10:22:21 -0700 | [diff] [blame] | 117 | spin_unlock_bh(&queue->syn_wait_lock); |
Arnaldo Carvalho de Melo | 83e3609 | 2005-08-09 19:33:31 -0700 | [diff] [blame] | 118 | } |
| 119 | } |
| 120 | |
Eric Dumazet | fa76ce73 | 2015-03-19 19:04:20 -0700 | [diff] [blame] | 121 | if (WARN_ON(listen_sock_qlen(lopt) != 0)) |
| 122 | pr_err("qlen %u\n", listen_sock_qlen(lopt)); |
Eric Dumazet | f6d8cb2 | 2014-06-24 05:32:48 -0700 | [diff] [blame] | 123 | kvfree(lopt); |
Arnaldo Carvalho de Melo | 83e3609 | 2005-08-09 19:33:31 -0700 | [diff] [blame] | 124 | } |
| 125 | |
Jerry Chu | 8336886 | 2012-08-31 12:29:12 +0000 | [diff] [blame] | 126 | /* |
| 127 | * This function is called to set a Fast Open socket's "fastopen_rsk" field |
| 128 | * to NULL when a TFO socket no longer needs to access the request_sock. |
| 129 | * This happens only after 3WHS has been either completed or aborted (e.g., |
| 130 | * RST is received). |
| 131 | * |
| 132 | * Before TFO, a child socket is created only after 3WHS is completed, |
| 133 | * hence it never needs to access the request_sock. things get a lot more |
| 134 | * complex with TFO. A child socket, accepted or not, has to access its |
| 135 | * request_sock for 3WHS processing, e.g., to retransmit SYN-ACK pkts, |
| 136 | * until 3WHS is either completed or aborted. Afterwards the req will stay |
| 137 | * until either the child socket is accepted, or in the rare case when the |
| 138 | * listener is closed before the child is accepted. |
| 139 | * |
| 140 | * In short, a request socket is only freed after BOTH 3WHS has completed |
| 141 | * (or aborted) and the child socket has been accepted (or listener closed). |
| 142 | * When a child socket is accepted, its corresponding req->sk is set to |
| 143 | * NULL since it's no longer needed. More importantly, "req->sk == NULL" |
| 144 | * will be used by the code below to determine if a child socket has been |
| 145 | * accepted or not, and the check is protected by the fastopenq->lock |
| 146 | * described below. |
| 147 | * |
| 148 | * Note that fastopen_rsk is only accessed from the child socket's context |
| 149 | * with its socket lock held. But a request_sock (req) can be accessed by |
| 150 | * both its child socket through fastopen_rsk, and a listener socket through |
| 151 | * icsk_accept_queue.rskq_accept_head. To protect the access a simple spin |
| 152 | * lock per listener "icsk->icsk_accept_queue.fastopenq->lock" is created. |
| 153 | * only in the rare case when both the listener and the child locks are held, |
| 154 | * e.g., in inet_csk_listen_stop() do we not need to acquire the lock. |
| 155 | * The lock also protects other fields such as fastopenq->qlen, which is |
| 156 | * decremented by this function when fastopen_rsk is no longer needed. |
| 157 | * |
| 158 | * Note that another solution was to simply use the existing socket lock |
| 159 | * from the listener. But first socket lock is difficult to use. It is not |
| 160 | * a simple spin lock - one must consider sock_owned_by_user() and arrange |
| 161 | * to use sk_add_backlog() stuff. But what really makes it infeasible is the |
| 162 | * locking hierarchy violation. E.g., inet_csk_listen_stop() may try to |
| 163 | * acquire a child's lock while holding listener's socket lock. A corner |
| 164 | * case might also exist in tcp_v4_hnd_req() that will trigger this locking |
| 165 | * order. |
| 166 | * |
Eric Dumazet | 9439ce0 | 2015-03-17 18:32:29 -0700 | [diff] [blame] | 167 | * This function also sets "treq->tfo_listener" to false. |
| 168 | * treq->tfo_listener is used by the listener so it is protected by the |
Jerry Chu | 8336886 | 2012-08-31 12:29:12 +0000 | [diff] [blame] | 169 | * fastopenq->lock in this function. |
| 170 | */ |
| 171 | void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, |
| 172 | bool reset) |
| 173 | { |
Eric Dumazet | 9439ce0 | 2015-03-17 18:32:29 -0700 | [diff] [blame] | 174 | struct sock *lsk = req->rsk_listener; |
| 175 | struct fastopen_queue *fastopenq; |
| 176 | |
| 177 | fastopenq = inet_csk(lsk)->icsk_accept_queue.fastopenq; |
Jerry Chu | 8336886 | 2012-08-31 12:29:12 +0000 | [diff] [blame] | 178 | |
Jerry Chu | 8336886 | 2012-08-31 12:29:12 +0000 | [diff] [blame] | 179 | tcp_sk(sk)->fastopen_rsk = NULL; |
| 180 | spin_lock_bh(&fastopenq->lock); |
| 181 | fastopenq->qlen--; |
Eric Dumazet | 9439ce0 | 2015-03-17 18:32:29 -0700 | [diff] [blame] | 182 | tcp_rsk(req)->tfo_listener = false; |
Jerry Chu | 8336886 | 2012-08-31 12:29:12 +0000 | [diff] [blame] | 183 | if (req->sk) /* the child socket hasn't been accepted yet */ |
| 184 | goto out; |
| 185 | |
| 186 | if (!reset || lsk->sk_state != TCP_LISTEN) { |
| 187 | /* If the listener has been closed don't bother with the |
| 188 | * special RST handling below. |
| 189 | */ |
| 190 | spin_unlock_bh(&fastopenq->lock); |
Eric Dumazet | 13854e5 | 2015-03-15 21:12:16 -0700 | [diff] [blame] | 191 | reqsk_put(req); |
Jerry Chu | 8336886 | 2012-08-31 12:29:12 +0000 | [diff] [blame] | 192 | return; |
| 193 | } |
| 194 | /* Wait for 60secs before removing a req that has triggered RST. |
| 195 | * This is a simple defense against TFO spoofing attack - by |
| 196 | * counting the req against fastopen.max_qlen, and disabling |
| 197 | * TFO when the qlen exceeds max_qlen. |
| 198 | * |
| 199 | * For more details see CoNext'11 "TCP Fast Open" paper. |
| 200 | */ |
Eric Dumazet | fa76ce73 | 2015-03-19 19:04:20 -0700 | [diff] [blame] | 201 | req->rsk_timer.expires = jiffies + 60*HZ; |
Jerry Chu | 8336886 | 2012-08-31 12:29:12 +0000 | [diff] [blame] | 202 | if (fastopenq->rskq_rst_head == NULL) |
| 203 | fastopenq->rskq_rst_head = req; |
| 204 | else |
| 205 | fastopenq->rskq_rst_tail->dl_next = req; |
| 206 | |
| 207 | req->dl_next = NULL; |
| 208 | fastopenq->rskq_rst_tail = req; |
| 209 | fastopenq->qlen++; |
| 210 | out: |
| 211 | spin_unlock_bh(&fastopenq->lock); |
Jerry Chu | 8336886 | 2012-08-31 12:29:12 +0000 | [diff] [blame] | 212 | } |