blob: 2e73748956d590ffe9cfd536dea119c189ee197f [file] [log] [blame]
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001/*
2 * NET Generic infrastructure for Network protocols.
3 *
4 * Definitions for request_sock
5 *
6 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 *
8 * From code originally in include/net/tcp.h
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#ifndef _REQUEST_SOCK_H
16#define _REQUEST_SOCK_H
17
18#include <linux/slab.h>
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -070019#include <linux/spinlock.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070020#include <linux/types.h>
Ilpo Järvinen547b7922008-07-25 21:43:18 -070021#include <linux/bug.h>
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -070022
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070023#include <net/sock.h>
24
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070025struct request_sock;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070026struct sk_buff;
27struct dst_entry;
28struct proto;
29
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070030struct request_sock_ops {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070031 int family;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070032 int obj_size;
Christoph Lametere18b8902006-12-06 20:33:20 -080033 struct kmem_cache *slab;
Catalin Marinas7e56b5d2008-11-21 16:45:22 -080034 char *slab_name;
Eric Dumazetea3bea32015-09-25 07:39:23 -070035 int (*rtx_syn_ack)(const struct sock *sk,
Christoph Paasch1a2c6182013-03-17 08:23:34 +000036 struct request_sock *req);
Eric Dumazeta00e7442015-09-29 07:42:39 -070037 void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070038 struct request_sock *req);
Eric Dumazeta00e7442015-09-29 07:42:39 -070039 void (*send_reset)(const struct sock *sk,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080040 struct sk_buff *skb);
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070041 void (*destructor)(struct request_sock *req);
Eric Dumazet42cb80a2015-03-22 10:22:19 -070042 void (*syn_ack_timeout)(const struct request_sock *req);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070043};
44
Eric Dumazet1b70e972015-09-25 07:39:24 -070045int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
Eric Dumazete6c022a2012-10-27 23:16:46 +000046
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070047/* struct request_sock - mini sock to represent a connection request
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070048 */
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070049struct request_sock {
Eric Dumazet634fb9792013-10-09 15:21:29 -070050 struct sock_common __req_common;
Eric Dumazet1e2e0112015-03-12 16:44:06 -070051#define rsk_refcnt __req_common.skc_refcnt
Eric Dumazet52452c52015-03-19 19:04:19 -070052#define rsk_hash __req_common.skc_hash
Eric Dumazet8e5eb542015-10-08 19:33:22 -070053#define rsk_listener __req_common.skc_listener
Eric Dumazeted53d0a2015-10-08 19:33:23 -070054#define rsk_window_clamp __req_common.skc_window_clamp
55#define rsk_rcv_wnd __req_common.skc_rcv_wnd
Eric Dumazet1e2e0112015-03-12 16:44:06 -070056
Eric Dumazet3fb62c52013-04-19 14:29:25 -070057 struct request_sock *dl_next;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070058 u16 mss;
Eric Dumazete6c022a2012-10-27 23:16:46 +000059 u8 num_retrans; /* number of retransmits */
60 u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
61 u8 num_timeout:7; /* number of timeouts */
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070062 u32 ts_recent;
Eric Dumazetfa76ce732015-03-19 19:04:20 -070063 struct timer_list rsk_timer;
Eric Dumazet72a3eff2006-11-16 02:30:37 -080064 const struct request_sock_ops *rsk_ops;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070065 struct sock *sk;
Eric Dumazetcd8ae852015-05-03 21:34:46 -070066 u32 *saved_syn;
Venkat Yekkirala4237c752006-07-24 23:32:50 -070067 u32 secid;
Venkat Yekkirala6b877692006-11-08 17:04:09 -060068 u32 peer_secid;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070069};
70
Eric Dumazetb267cdd2015-10-02 11:43:27 -070071static inline struct request_sock *inet_reqsk(struct sock *sk)
72{
73 return (struct request_sock *)sk;
74}
75
76static inline struct sock *req_to_sk(struct request_sock *req)
77{
78 return (struct sock *)req;
79}
80
Eric Dumazet4e9a5782015-03-17 18:32:28 -070081static inline struct request_sock *
Eric Dumazeta1a53442015-10-04 21:08:11 -070082reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
83 bool attach_listener)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070084{
Eric Dumazete96f78a2015-10-03 06:27:28 -070085 struct request_sock *req;
86
87 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070088
Eric Dumazet4e9a5782015-03-17 18:32:28 -070089 if (req) {
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070090 req->rsk_ops = ops;
Eric Dumazeta1a53442015-10-04 21:08:11 -070091 if (attach_listener) {
92 sock_hold(sk_listener);
93 req->rsk_listener = sk_listener;
94 } else {
95 req->rsk_listener = NULL;
96 }
Eric Dumazetb267cdd2015-10-02 11:43:27 -070097 req_to_sk(req)->sk_prot = sk_listener->sk_prot;
98 sk_node_init(&req_to_sk(req)->sk_node);
Eric Dumazet004a5d02015-10-04 21:08:10 -070099 sk_tx_queue_clear(req_to_sk(req));
Eric Dumazetcd8ae852015-05-03 21:34:46 -0700100 req->saved_syn = NULL;
Eric Dumazet0470c8c2015-03-17 18:32:31 -0700101 /* Following is temporary. It is coupled with debugging
102 * helpers in reqsk_put() & reqsk_free()
103 */
104 atomic_set(&req->rsk_refcnt, 0);
Eric Dumazet4e9a5782015-03-17 18:32:28 -0700105 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700106 return req;
107}
108
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700109static inline void reqsk_free(struct request_sock *req)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700110{
Eric Dumazet13854e52015-03-15 21:12:16 -0700111 /* temporary debugging */
112 WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0);
113
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700114 req->rsk_ops->destructor(req);
Eric Dumazet4e9a5782015-03-17 18:32:28 -0700115 if (req->rsk_listener)
116 sock_put(req->rsk_listener);
Eric Dumazetcd8ae852015-05-03 21:34:46 -0700117 kfree(req->saved_syn);
Eric Dumazet13854e52015-03-15 21:12:16 -0700118 kmem_cache_free(req->rsk_ops->slab, req);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700119}
120
Eric Dumazet1e2e0112015-03-12 16:44:06 -0700121static inline void reqsk_put(struct request_sock *req)
122{
123 if (atomic_dec_and_test(&req->rsk_refcnt))
124 reqsk_free(req);
125}
126
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700127extern int sysctl_max_syn_backlog;
128
Jerry Chu10467162012-08-31 12:29:11 +0000129/*
130 * For a TCP Fast Open listener -
131 * lock - protects the access to all the reqsk, which is co-owned by
132 * the listener and the child socket.
133 * qlen - pending TFO requests (still in TCP_SYN_RECV).
134 * max_qlen - max TFO reqs allowed before TFO is disabled.
135 *
136 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
137 * structure above. But there is some implementation difficulty due to
138 * listen_sock being part of request_sock_queue hence will be freed when
139 * a listener is stopped. But TFO related fields may continue to be
140 * accessed even after a listener is closed, until its sk_refcnt drops
141 * to 0 implying no more outstanding TFO reqs. One solution is to keep
142 * listen_opt around until sk_refcnt drops to 0. But there is some other
143 * complexity that needs to be resolved. E.g., a listener can be disabled
144 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
145 */
146struct fastopen_queue {
147 struct request_sock *rskq_rst_head; /* Keep track of past TFO */
148 struct request_sock *rskq_rst_tail; /* requests that caused RST.
149 * This is part of the defense
150 * against spoofing attack.
151 */
152 spinlock_t lock;
153 int qlen; /* # of pending (TCP_SYN_RECV) reqs */
154 int max_qlen; /* != 0 iff TFO is currently enabled */
155};
156
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700157/** struct request_sock_queue - queue of request_socks
158 *
159 * @rskq_accept_head - FIFO head of established children
160 * @rskq_accept_tail - FIFO tail of established children
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -0700161 * @rskq_defer_accept - User waits for some data after accept()
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700162 *
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700163 */
164struct request_sock_queue {
Eric Dumazetfff1f302015-10-02 11:43:23 -0700165 spinlock_t rskq_lock;
166 u8 rskq_defer_accept;
Eric Dumazetef547f22015-10-02 11:43:37 -0700167
Eric Dumazet8d2675f2015-10-02 11:43:25 -0700168 u32 synflood_warned;
Eric Dumazetaac065c2015-10-02 11:43:24 -0700169 atomic_t qlen;
170 atomic_t young;
171
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700172 struct request_sock *rskq_accept_head;
173 struct request_sock *rskq_accept_tail;
Eric Dumazet0536fcc2015-09-29 07:42:52 -0700174 struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
175 * if TFO is enabled.
Jerry Chu10467162012-08-31 12:29:11 +0000176 */
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700177};
178
Eric Dumazetef547f22015-10-02 11:43:37 -0700179void reqsk_queue_alloc(struct request_sock_queue *queue);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700180
Joe Perchesc0f45022013-09-22 10:32:20 -0700181void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
182 bool reset);
Arnaldo Carvalho de Melo83e36092005-08-09 19:33:31 -0700183
Eric Dumazetfff1f302015-10-02 11:43:23 -0700184static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700185{
186 return queue->rskq_accept_head == NULL;
187}
188
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700189static inline void reqsk_queue_add(struct request_sock_queue *queue,
190 struct request_sock *req,
191 struct sock *parent,
192 struct sock *child)
193{
Eric Dumazetfff1f302015-10-02 11:43:23 -0700194 spin_lock(&queue->rskq_lock);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700195 req->sk = child;
196 sk_acceptq_added(parent);
197
198 if (queue->rskq_accept_head == NULL)
199 queue->rskq_accept_head = req;
200 else
201 queue->rskq_accept_tail->dl_next = req;
202
203 queue->rskq_accept_tail = req;
204 req->dl_next = NULL;
Eric Dumazetfff1f302015-10-02 11:43:23 -0700205 spin_unlock(&queue->rskq_lock);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700206}
207
Eric Dumazetfff1f302015-10-02 11:43:23 -0700208static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
209 struct sock *parent)
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700210{
Eric Dumazetfff1f302015-10-02 11:43:23 -0700211 struct request_sock *req;
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700212
Eric Dumazetfff1f302015-10-02 11:43:23 -0700213 spin_lock_bh(&queue->rskq_lock);
214 req = queue->rskq_accept_head;
215 if (req) {
216 sk_acceptq_removed(parent);
217 queue->rskq_accept_head = req->dl_next;
218 if (queue->rskq_accept_head == NULL)
219 queue->rskq_accept_tail = NULL;
220 }
221 spin_unlock_bh(&queue->rskq_lock);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700222 return req;
223}
224
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700225static inline void reqsk_queue_removed(struct request_sock_queue *queue,
226 const struct request_sock *req)
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700227{
Eric Dumazete6c022a2012-10-27 23:16:46 +0000228 if (req->num_timeout == 0)
Eric Dumazetaac065c2015-10-02 11:43:24 -0700229 atomic_dec(&queue->young);
230 atomic_dec(&queue->qlen);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700231}
232
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700233static inline void reqsk_queue_added(struct request_sock_queue *queue)
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700234{
Eric Dumazetaac065c2015-10-02 11:43:24 -0700235 atomic_inc(&queue->young);
236 atomic_inc(&queue->qlen);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700237}
238
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700239static inline int reqsk_queue_len(const struct request_sock_queue *queue)
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700240{
Eric Dumazetaac065c2015-10-02 11:43:24 -0700241 return atomic_read(&queue->qlen);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700242}
243
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700244static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700245{
Eric Dumazetaac065c2015-10-02 11:43:24 -0700246 return atomic_read(&queue->young);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700247}
248
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700249#endif /* _REQUEST_SOCK_H */