Herbert Xu | cf80e0e | 2016-01-24 21:20:23 +0800 | [diff] [blame] | 1 | #include <linux/crypto.h> |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 2 | #include <linux/err.h> |
Yuchung Cheng | 2100c8d | 2012-07-19 06:43:05 +0000 | [diff] [blame] | 3 | #include <linux/init.h> |
| 4 | #include <linux/kernel.h> |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 5 | #include <linux/list.h> |
| 6 | #include <linux/tcp.h> |
| 7 | #include <linux/rcupdate.h> |
| 8 | #include <linux/rculist.h> |
| 9 | #include <net/inetpeer.h> |
| 10 | #include <net/tcp.h> |
Yuchung Cheng | 2100c8d | 2012-07-19 06:43:05 +0000 | [diff] [blame] | 11 | |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 12 | void tcp_fastopen_init_key_once(struct net *net) |
Hannes Frederic Sowa | 222e83d | 2013-10-19 21:48:58 +0200 | [diff] [blame] | 13 | { |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 14 | u8 key[TCP_FASTOPEN_KEY_LENGTH]; |
| 15 | struct tcp_fastopen_context *ctxt; |
| 16 | |
| 17 | rcu_read_lock(); |
| 18 | ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx); |
| 19 | if (ctxt) { |
| 20 | rcu_read_unlock(); |
| 21 | return; |
| 22 | } |
| 23 | rcu_read_unlock(); |
Hannes Frederic Sowa | 222e83d | 2013-10-19 21:48:58 +0200 | [diff] [blame] | 24 | |
| 25 | /* tcp_fastopen_reset_cipher publishes the new context |
| 26 | * atomically, so we allow this race happening here. |
| 27 | * |
| 28 | * All call sites of tcp_fastopen_cookie_gen also check |
| 29 | * for a valid cookie, so this is an acceptable risk. |
| 30 | */ |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 31 | get_random_bytes(key, sizeof(key)); |
| 32 | tcp_fastopen_reset_cipher(net, key, sizeof(key)); |
Hannes Frederic Sowa | 222e83d | 2013-10-19 21:48:58 +0200 | [diff] [blame] | 33 | } |
| 34 | |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 35 | static void tcp_fastopen_ctx_free(struct rcu_head *head) |
| 36 | { |
| 37 | struct tcp_fastopen_context *ctx = |
| 38 | container_of(head, struct tcp_fastopen_context, rcu); |
| 39 | crypto_free_cipher(ctx->tfm); |
| 40 | kfree(ctx); |
| 41 | } |
| 42 | |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 43 | void tcp_fastopen_ctx_destroy(struct net *net) |
| 44 | { |
| 45 | struct tcp_fastopen_context *ctxt; |
| 46 | |
| 47 | spin_lock(&net->ipv4.tcp_fastopen_ctx_lock); |
| 48 | |
| 49 | ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx, |
| 50 | lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); |
| 51 | rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL); |
| 52 | spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock); |
| 53 | |
| 54 | if (ctxt) |
| 55 | call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free); |
| 56 | } |
| 57 | |
| 58 | int tcp_fastopen_reset_cipher(struct net *net, void *key, unsigned int len) |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 59 | { |
| 60 | int err; |
| 61 | struct tcp_fastopen_context *ctx, *octx; |
| 62 | |
| 63 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
| 64 | if (!ctx) |
| 65 | return -ENOMEM; |
| 66 | ctx->tfm = crypto_alloc_cipher("aes", 0, 0); |
| 67 | |
| 68 | if (IS_ERR(ctx->tfm)) { |
| 69 | err = PTR_ERR(ctx->tfm); |
| 70 | error: kfree(ctx); |
| 71 | pr_err("TCP: TFO aes cipher alloc error: %d\n", err); |
| 72 | return err; |
| 73 | } |
| 74 | err = crypto_cipher_setkey(ctx->tfm, key, len); |
| 75 | if (err) { |
| 76 | pr_err("TCP: TFO cipher key error: %d\n", err); |
| 77 | crypto_free_cipher(ctx->tfm); |
| 78 | goto error; |
| 79 | } |
| 80 | memcpy(ctx->key, key, len); |
| 81 | |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 82 | spin_lock(&net->ipv4.tcp_fastopen_ctx_lock); |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 83 | |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 84 | octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx, |
| 85 | lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); |
| 86 | rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx); |
| 87 | spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock); |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 88 | |
| 89 | if (octx) |
| 90 | call_rcu(&octx->rcu, tcp_fastopen_ctx_free); |
| 91 | return err; |
| 92 | } |
| 93 | |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 94 | static bool __tcp_fastopen_cookie_gen(struct net *net, |
| 95 | const void *path, |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 96 | struct tcp_fastopen_cookie *foc) |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 97 | { |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 98 | struct tcp_fastopen_context *ctx; |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 99 | bool ok = false; |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 100 | |
| 101 | rcu_read_lock(); |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 102 | ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx); |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 103 | if (ctx) { |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 104 | crypto_cipher_encrypt_one(ctx->tfm, foc->val, path); |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 105 | foc->len = TCP_FASTOPEN_COOKIE_SIZE; |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 106 | ok = true; |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 107 | } |
| 108 | rcu_read_unlock(); |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 109 | return ok; |
| 110 | } |
| 111 | |
| 112 | /* Generate the fastopen cookie by doing aes128 encryption on both |
| 113 | * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6 |
| 114 | * addresses. For the longer IPv6 addresses use CBC-MAC. |
| 115 | * |
| 116 | * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE. |
| 117 | */ |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 118 | static bool tcp_fastopen_cookie_gen(struct net *net, |
| 119 | struct request_sock *req, |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 120 | struct sk_buff *syn, |
| 121 | struct tcp_fastopen_cookie *foc) |
| 122 | { |
| 123 | if (req->rsk_ops->family == AF_INET) { |
| 124 | const struct iphdr *iph = ip_hdr(syn); |
| 125 | |
| 126 | __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 }; |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 127 | return __tcp_fastopen_cookie_gen(net, path, foc); |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | #if IS_ENABLED(CONFIG_IPV6) |
| 131 | if (req->rsk_ops->family == AF_INET6) { |
| 132 | const struct ipv6hdr *ip6h = ipv6_hdr(syn); |
| 133 | struct tcp_fastopen_cookie tmp; |
| 134 | |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 135 | if (__tcp_fastopen_cookie_gen(net, &ip6h->saddr, &tmp)) { |
Shannon Nelson | 003c941 | 2017-01-12 14:24:58 -0800 | [diff] [blame] | 136 | struct in6_addr *buf = &tmp.addr; |
Li RongQing | 41c9199 | 2014-09-29 15:04:37 +0800 | [diff] [blame] | 137 | int i; |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 138 | |
| 139 | for (i = 0; i < 4; i++) |
| 140 | buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i]; |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 141 | return __tcp_fastopen_cookie_gen(net, buf, foc); |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 142 | } |
| 143 | } |
| 144 | #endif |
| 145 | return false; |
Jerry Chu | 1046716 | 2012-08-31 12:29:11 +0000 | [diff] [blame] | 146 | } |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 147 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 148 | |
| 149 | /* If an incoming SYN or SYNACK frame contains a payload and/or FIN, |
| 150 | * queue this additional data / FIN. |
| 151 | */ |
| 152 | void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) |
| 153 | { |
| 154 | struct tcp_sock *tp = tcp_sk(sk); |
| 155 | |
| 156 | if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) |
| 157 | return; |
| 158 | |
| 159 | skb = skb_clone(skb, GFP_ATOMIC); |
| 160 | if (!skb) |
| 161 | return; |
| 162 | |
| 163 | skb_dst_drop(skb); |
Martin KaFai Lau | a44d6ea | 2016-03-14 10:52:15 -0700 | [diff] [blame] | 164 | /* segs_in has been initialized to 1 in tcp_create_openreq_child(). |
| 165 | * Hence, reset segs_in to 0 before calling tcp_segs_in() |
| 166 | * to avoid double counting. Also, tcp_segs_in() expects |
| 167 | * skb->len to include the tcp_hdrlen. Hence, it should |
| 168 | * be called before __skb_pull(). |
| 169 | */ |
| 170 | tp->segs_in = 0; |
| 171 | tcp_segs_in(tp, skb); |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 172 | __skb_pull(skb, tcp_hdrlen(skb)); |
Eric Dumazet | 76061f6 | 2016-09-07 08:34:11 -0700 | [diff] [blame] | 173 | sk_forced_mem_schedule(sk, skb->truesize); |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 174 | skb_set_owner_r(skb, sk); |
| 175 | |
Eric Dumazet | 9d69153 | 2016-02-01 21:03:08 -0800 | [diff] [blame] | 176 | TCP_SKB_CB(skb)->seq++; |
| 177 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; |
| 178 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 179 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
| 180 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
| 181 | tp->syn_data_acked = 1; |
| 182 | |
| 183 | /* u64_stats_update_begin(&tp->syncp) not needed here, |
| 184 | * as we certainly are not changing upper 32bit value (0) |
| 185 | */ |
| 186 | tp->bytes_received = skb->len; |
Eric Dumazet | e3e17b7 | 2016-02-06 11:16:28 -0800 | [diff] [blame] | 187 | |
| 188 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
| 189 | tcp_fin(sk); |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 190 | } |
| 191 | |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 192 | static struct sock *tcp_fastopen_create_child(struct sock *sk, |
| 193 | struct sk_buff *skb, |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 194 | struct request_sock *req) |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 195 | { |
Dave Jones | 1784637 | 2014-06-16 16:30:36 -0400 | [diff] [blame] | 196 | struct tcp_sock *tp; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 197 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 198 | struct sock *child; |
Eric Dumazet | 5e0724d | 2015-10-22 08:20:46 -0700 | [diff] [blame] | 199 | bool own_req; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 200 | |
| 201 | req->num_retrans = 0; |
| 202 | req->num_timeout = 0; |
| 203 | req->sk = NULL; |
| 204 | |
Eric Dumazet | 5e0724d | 2015-10-22 08:20:46 -0700 | [diff] [blame] | 205 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, |
| 206 | NULL, &own_req); |
Ian Morris | 51456b2 | 2015-04-03 09:17:26 +0100 | [diff] [blame] | 207 | if (!child) |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 208 | return NULL; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 209 | |
Eric Dumazet | 0536fcc | 2015-09-29 07:42:52 -0700 | [diff] [blame] | 210 | spin_lock(&queue->fastopenq.lock); |
| 211 | queue->fastopenq.qlen++; |
| 212 | spin_unlock(&queue->fastopenq.lock); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 213 | |
| 214 | /* Initialize the child socket. Have to fix some values to take |
| 215 | * into account the child is a Fast Open socket and is created |
| 216 | * only out of the bits carried in the SYN packet. |
| 217 | */ |
| 218 | tp = tcp_sk(child); |
| 219 | |
| 220 | tp->fastopen_rsk = req; |
Eric Dumazet | 9439ce0 | 2015-03-17 18:32:29 -0700 | [diff] [blame] | 221 | tcp_rsk(req)->tfo_listener = true; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 222 | |
| 223 | /* RFC1323: The window in SYN & SYN/ACK segments is never |
| 224 | * scaled. So correct it appropriately. |
| 225 | */ |
| 226 | tp->snd_wnd = ntohs(tcp_hdr(skb)->window); |
Alexey Kodanev | 0dbd7ff | 2017-01-19 16:36:39 +0300 | [diff] [blame] | 227 | tp->max_window = tp->snd_wnd; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 228 | |
| 229 | /* Activate the retrans timer so that SYNACK can be retransmitted. |
Eric Dumazet | ca6fb06 | 2015-10-02 11:43:35 -0700 | [diff] [blame] | 230 | * The request socket is not added to the ehash |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 231 | * because it's been added to the accept queue directly. |
| 232 | */ |
| 233 | inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS, |
| 234 | TCP_TIMEOUT_INIT, TCP_RTO_MAX); |
| 235 | |
Reshetova, Elena | 41c6d65 | 2017-06-30 13:08:01 +0300 | [diff] [blame] | 236 | refcount_set(&req->rsk_refcnt, 2); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 237 | |
| 238 | /* Now finish processing the fastopen child socket. */ |
| 239 | inet_csk(child)->icsk_af_ops->rebuild_header(child); |
| 240 | tcp_init_congestion_control(child); |
| 241 | tcp_mtup_init(child); |
| 242 | tcp_init_metrics(child); |
Lawrence Brakmo | 9872a4b | 2017-06-30 20:02:47 -0700 | [diff] [blame] | 243 | tcp_call_bpf(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 244 | tcp_init_buffer_space(child); |
| 245 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 246 | tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; |
Eric Dumazet | ba34e6d | 2015-02-13 04:47:12 -0800 | [diff] [blame] | 247 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 248 | tcp_fastopen_add_skb(child, skb); |
Eric Dumazet | d654976 | 2015-05-21 21:51:19 -0700 | [diff] [blame] | 249 | |
Eric Dumazet | 61d2bca | 2016-02-01 21:03:07 -0800 | [diff] [blame] | 250 | tcp_rsk(req)->rcv_nxt = tp->rcv_nxt; |
Neal Cardwell | 28b346c | 2016-08-30 11:55:23 -0400 | [diff] [blame] | 251 | tp->rcv_wup = tp->rcv_nxt; |
Eric Dumazet | 7656d84 | 2015-10-04 21:08:07 -0700 | [diff] [blame] | 252 | /* tcp_conn_request() is sending the SYNACK, |
| 253 | * and queues the child into listener accept queue. |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 254 | */ |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 255 | return child; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 256 | } |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 257 | |
| 258 | static bool tcp_fastopen_queue_check(struct sock *sk) |
| 259 | { |
| 260 | struct fastopen_queue *fastopenq; |
| 261 | |
| 262 | /* Make sure the listener has enabled fastopen, and we don't |
| 263 | * exceed the max # of pending TFO requests allowed before trying |
| 264 | * to validating the cookie in order to avoid burning CPU cycles |
| 265 | * unnecessarily. |
| 266 | * |
| 267 | * XXX (TFO) - The implication of checking the max_qlen before |
| 268 | * processing a cookie request is that clients can't differentiate |
| 269 | * between qlen overflow causing Fast Open to be disabled |
| 270 | * temporarily vs a server not supporting Fast Open at all. |
| 271 | */ |
Eric Dumazet | 0536fcc | 2015-09-29 07:42:52 -0700 | [diff] [blame] | 272 | fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; |
| 273 | if (fastopenq->max_qlen == 0) |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 274 | return false; |
| 275 | |
| 276 | if (fastopenq->qlen >= fastopenq->max_qlen) { |
| 277 | struct request_sock *req1; |
| 278 | spin_lock(&fastopenq->lock); |
| 279 | req1 = fastopenq->rskq_rst_head; |
Eric Dumazet | fa76ce73 | 2015-03-19 19:04:20 -0700 | [diff] [blame] | 280 | if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) { |
Eric Dumazet | 02a1d6e | 2016-04-27 16:44:39 -0700 | [diff] [blame] | 281 | __NET_INC_STATS(sock_net(sk), |
| 282 | LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); |
Eric Dumazet | c10d931 | 2016-04-29 14:16:47 -0700 | [diff] [blame] | 283 | spin_unlock(&fastopenq->lock); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 284 | return false; |
| 285 | } |
| 286 | fastopenq->rskq_rst_head = req1->dl_next; |
| 287 | fastopenq->qlen--; |
| 288 | spin_unlock(&fastopenq->lock); |
Eric Dumazet | 13854e5 | 2015-03-15 21:12:16 -0700 | [diff] [blame] | 289 | reqsk_put(req1); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 290 | } |
| 291 | return true; |
| 292 | } |
| 293 | |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 294 | /* Returns true if we should perform Fast Open on the SYN. The cookie (foc) |
| 295 | * may be updated and return the client in the SYN-ACK later. E.g., Fast Open |
| 296 | * cookie request (foc->len == 0). |
| 297 | */ |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 298 | struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, |
| 299 | struct request_sock *req, |
Tonghao Zhang | 1119936 | 2017-08-21 23:33:49 -0700 | [diff] [blame] | 300 | struct tcp_fastopen_cookie *foc) |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 301 | { |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 302 | bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; |
Haishuang Yan | e1cfcbe | 2017-09-27 11:35:40 +0800 | [diff] [blame] | 303 | int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen; |
| 304 | struct tcp_fastopen_cookie valid_foc = { .len = -1 }; |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 305 | struct sock *child; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 306 | |
Yuchung Cheng | 531c94a | 2015-02-09 12:35:23 -0800 | [diff] [blame] | 307 | if (foc->len == 0) /* Client requests a cookie */ |
Eric Dumazet | c10d931 | 2016-04-29 14:16:47 -0700 | [diff] [blame] | 308 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); |
Yuchung Cheng | 531c94a | 2015-02-09 12:35:23 -0800 | [diff] [blame] | 309 | |
Haishuang Yan | e1cfcbe | 2017-09-27 11:35:40 +0800 | [diff] [blame] | 310 | if (!((tcp_fastopen & TFO_SERVER_ENABLE) && |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 311 | (syn_data || foc->len >= 0) && |
| 312 | tcp_fastopen_queue_check(sk))) { |
| 313 | foc->len = -1; |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 314 | return NULL; |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 315 | } |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 316 | |
Haishuang Yan | e1cfcbe | 2017-09-27 11:35:40 +0800 | [diff] [blame] | 317 | if (syn_data && (tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD)) |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 318 | goto fastopen; |
| 319 | |
Yuchung Cheng | 531c94a | 2015-02-09 12:35:23 -0800 | [diff] [blame] | 320 | if (foc->len >= 0 && /* Client presents or requests a cookie */ |
Haishuang Yan | 4371384 | 2017-09-27 11:35:42 +0800 | [diff] [blame] | 321 | tcp_fastopen_cookie_gen(sock_net(sk), req, skb, &valid_foc) && |
Daniel Lee | 3a19ce0 | 2014-05-11 20:22:13 -0700 | [diff] [blame] | 322 | foc->len == TCP_FASTOPEN_COOKIE_SIZE && |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 323 | foc->len == valid_foc.len && |
| 324 | !memcmp(foc->val, valid_foc.val, foc->len)) { |
Yuchung Cheng | 843f4a5 | 2014-05-11 20:22:11 -0700 | [diff] [blame] | 325 | /* Cookie is valid. Create a (full) child socket to accept |
| 326 | * the data in SYN before returning a SYN-ACK to ack the |
| 327 | * data. If we fail to create the socket, fall back and |
| 328 | * ack the ISN only but includes the same cookie. |
| 329 | * |
| 330 | * Note: Data-less SYN with valid cookie is allowed to send |
| 331 | * data in SYN_RECV state. |
| 332 | */ |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 333 | fastopen: |
Tonghao Zhang | 1119936 | 2017-08-21 23:33:49 -0700 | [diff] [blame] | 334 | child = tcp_fastopen_create_child(sk, skb, req); |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 335 | if (child) { |
Yuchung Cheng | 843f4a5 | 2014-05-11 20:22:11 -0700 | [diff] [blame] | 336 | foc->len = -1; |
Eric Dumazet | c10d931 | 2016-04-29 14:16:47 -0700 | [diff] [blame] | 337 | NET_INC_STATS(sock_net(sk), |
| 338 | LINUX_MIB_TCPFASTOPENPASSIVE); |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 339 | return child; |
Yuchung Cheng | 843f4a5 | 2014-05-11 20:22:11 -0700 | [diff] [blame] | 340 | } |
Eric Dumazet | c10d931 | 2016-04-29 14:16:47 -0700 | [diff] [blame] | 341 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); |
Yuchung Cheng | 531c94a | 2015-02-09 12:35:23 -0800 | [diff] [blame] | 342 | } else if (foc->len > 0) /* Client presents an invalid cookie */ |
Eric Dumazet | c10d931 | 2016-04-29 14:16:47 -0700 | [diff] [blame] | 343 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 344 | |
Daniel Lee | 7f9b838 | 2015-04-06 14:37:26 -0700 | [diff] [blame] | 345 | valid_foc.exp = foc->exp; |
Yuchung Cheng | 89278c9 | 2014-05-11 20:22:10 -0700 | [diff] [blame] | 346 | *foc = valid_foc; |
Eric Dumazet | 7c85af8 | 2015-09-24 17:16:05 -0700 | [diff] [blame] | 347 | return NULL; |
Yuchung Cheng | 5b7ed08 | 2014-05-11 20:22:09 -0700 | [diff] [blame] | 348 | } |
Wei Wang | 065263f | 2017-01-23 10:59:20 -0800 | [diff] [blame] | 349 | |
| 350 | bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, |
| 351 | struct tcp_fastopen_cookie *cookie) |
| 352 | { |
| 353 | unsigned long last_syn_loss = 0; |
| 354 | int syn_loss = 0; |
| 355 | |
| 356 | tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss); |
| 357 | |
| 358 | /* Recurring FO SYN losses: no cookie or data in SYN */ |
| 359 | if (syn_loss > 1 && |
| 360 | time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) { |
| 361 | cookie->len = -1; |
| 362 | return false; |
| 363 | } |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 364 | |
| 365 | /* Firewall blackhole issue check */ |
| 366 | if (tcp_fastopen_active_should_disable(sk)) { |
| 367 | cookie->len = -1; |
| 368 | return false; |
| 369 | } |
| 370 | |
Haishuang Yan | e1cfcbe | 2017-09-27 11:35:40 +0800 | [diff] [blame] | 371 | if (sock_net(sk)->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) { |
Wei Wang | 065263f | 2017-01-23 10:59:20 -0800 | [diff] [blame] | 372 | cookie->len = -1; |
| 373 | return true; |
| 374 | } |
| 375 | return cookie->len > 0; |
| 376 | } |
Wei Wang | 19f6d3f | 2017-01-23 10:59:22 -0800 | [diff] [blame] | 377 | |
| 378 | /* This function checks if we want to defer sending SYN until the first |
| 379 | * write(). We defer under the following conditions: |
| 380 | * 1. fastopen_connect sockopt is set |
| 381 | * 2. we have a valid cookie |
| 382 | * Return value: return true if we want to defer until application writes data |
| 383 | * return false if we want to send out SYN immediately |
| 384 | */ |
| 385 | bool tcp_fastopen_defer_connect(struct sock *sk, int *err) |
| 386 | { |
| 387 | struct tcp_fastopen_cookie cookie = { .len = 0 }; |
| 388 | struct tcp_sock *tp = tcp_sk(sk); |
| 389 | u16 mss; |
| 390 | |
| 391 | if (tp->fastopen_connect && !tp->fastopen_req) { |
| 392 | if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) { |
| 393 | inet_sk(sk)->defer_connect = 1; |
| 394 | return true; |
| 395 | } |
| 396 | |
| 397 | /* Alloc fastopen_req in order for FO option to be included |
| 398 | * in SYN |
| 399 | */ |
| 400 | tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req), |
| 401 | sk->sk_allocation); |
| 402 | if (tp->fastopen_req) |
| 403 | tp->fastopen_req->cookie = cookie; |
| 404 | else |
| 405 | *err = -ENOBUFS; |
| 406 | } |
| 407 | return false; |
| 408 | } |
| 409 | EXPORT_SYMBOL(tcp_fastopen_defer_connect); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 410 | |
| 411 | /* |
| 412 | * The following code block is to deal with middle box issues with TFO: |
| 413 | * Middlebox firewall issues can potentially cause server's data being |
| 414 | * blackholed after a successful 3WHS using TFO. |
| 415 | * The proposed solution is to disable active TFO globally under the |
| 416 | * following circumstances: |
| 417 | * 1. client side TFO socket receives out of order FIN |
| 418 | * 2. client side TFO socket receives out of order RST |
| 419 | * We disable active side TFO globally for 1hr at first. Then if it |
| 420 | * happens again, we disable it for 2h, then 4h, 8h, ... |
| 421 | * And we reset the timeout back to 1hr when we see a successful active |
| 422 | * TFO connection with data exchanges. |
| 423 | */ |
| 424 | |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 425 | /* Disable active TFO and record current jiffies and |
| 426 | * tfo_active_disable_times |
| 427 | */ |
Wei Wang | 46c2fa3 | 2017-04-20 14:45:47 -0700 | [diff] [blame] | 428 | void tcp_fastopen_active_disable(struct sock *sk) |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 429 | { |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 430 | struct net *net = sock_net(sk); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 431 | |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 432 | atomic_inc(&net->ipv4.tfo_active_disable_times); |
| 433 | net->ipv4.tfo_active_disable_stamp = jiffies; |
| 434 | NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 435 | } |
| 436 | |
| 437 | /* Calculate timeout for tfo active disable |
| 438 | * Return true if we are still in the active TFO disable period |
| 439 | * Return false if timeout already expired and we should use active TFO |
| 440 | */ |
| 441 | bool tcp_fastopen_active_should_disable(struct sock *sk) |
| 442 | { |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 443 | unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout; |
| 444 | int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 445 | unsigned long timeout; |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 446 | int multiplier; |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 447 | |
| 448 | if (!tfo_da_times) |
| 449 | return false; |
| 450 | |
| 451 | /* Limit timout to max: 2^6 * initial timeout */ |
| 452 | multiplier = 1 << min(tfo_da_times - 1, 6); |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 453 | timeout = multiplier * tfo_bh_timeout * HZ; |
| 454 | if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout)) |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 455 | return true; |
| 456 | |
| 457 | /* Mark check bit so we can check for successful active TFO |
| 458 | * condition and reset tfo_active_disable_times |
| 459 | */ |
| 460 | tcp_sk(sk)->syn_fastopen_ch = 1; |
| 461 | return false; |
| 462 | } |
| 463 | |
| 464 | /* Disable active TFO if FIN is the only packet in the ofo queue |
| 465 | * and no data is received. |
| 466 | * Also check if we can reset tfo_active_disable_times if data is |
| 467 | * received successfully on a marked active TFO sockets opened on |
| 468 | * a non-loopback interface |
| 469 | */ |
| 470 | void tcp_fastopen_active_disable_ofo_check(struct sock *sk) |
| 471 | { |
| 472 | struct tcp_sock *tp = tcp_sk(sk); |
| 473 | struct rb_node *p; |
| 474 | struct sk_buff *skb; |
| 475 | struct dst_entry *dst; |
| 476 | |
| 477 | if (!tp->syn_fastopen) |
| 478 | return; |
| 479 | |
| 480 | if (!tp->data_segs_in) { |
| 481 | p = rb_first(&tp->out_of_order_queue); |
| 482 | if (p && !rb_next(p)) { |
| 483 | skb = rb_entry(p, struct sk_buff, rbnode); |
| 484 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { |
Wei Wang | 46c2fa3 | 2017-04-20 14:45:47 -0700 | [diff] [blame] | 485 | tcp_fastopen_active_disable(sk); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 486 | return; |
| 487 | } |
| 488 | } |
| 489 | } else if (tp->syn_fastopen_ch && |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 490 | atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) { |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 491 | dst = sk_dst_get(sk); |
| 492 | if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))) |
Haishuang Yan | 3733be1 | 2017-09-27 11:35:43 +0800 | [diff] [blame] | 493 | atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0); |
Wei Wang | cf1ef3f | 2017-04-20 14:45:46 -0700 | [diff] [blame] | 494 | dst_release(dst); |
| 495 | } |
| 496 | } |