blob: 85c5090bfe25abef6ee2f4ba88d3c10fe3fa1cca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#define FASTRETRANS_DEBUG 1
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/list.h>
24#include <linux/tcp.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050025#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
Herbert Xufb286bb2005-11-10 13:01:24 -080029#include <linux/skbuff.h>
Chris Leech97fc2f02006-05-23 17:55:33 -070030#include <linux/dmaengine.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080031#include <linux/crypto.h>
Glenn Griffinc6aefaf2008-02-07 21:49:26 -080032#include <linux/cryptohash.h>
William Allen Simpson435cf552009-12-02 18:17:05 +000033#include <linux/kref.h>
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070034
35#include <net/inet_connection_sock.h>
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -070036#include <net/inet_timewait_sock.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070037#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <net/checksum.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070039#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <net/sock.h>
41#include <net/snmp.h>
42#include <net/ip.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070043#include <net/tcp_states.h>
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -070044#include <net/inet_ecn.h>
Satoru SATOH0c266892009-05-04 11:11:01 -070045#include <net/dst.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/seq_file.h>
Glauber Costa180d8cd2011-12-11 21:47:02 +000048#include <linux/memcontrol.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -070050extern struct inet_hashinfo tcp_hashinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Eric Dumazetdd24c002008-11-25 21:17:14 -080052extern struct percpu_counter tcp_orphan_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053extern void tcp_time_wait(struct sock *sk, int state, int timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#define MAX_TCP_HEADER (128 + MAX_HEADER)
Adam Langley33ad7982008-07-19 00:04:31 -070056#define MAX_TCP_OPTION_SPACE 40
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58/*
59 * Never offer a window over 32767 without using window scaling. Some
60 * poor stacks do signed 16bit maths!
61 */
62#define MAX_TCP_WINDOW 32767U
63
Nandita Dukkipati356f0392010-12-20 14:15:56 +000064/* Offer an initial receive window of 10 mss. */
65#define TCP_DEFAULT_INIT_RCVWND 10
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
68#define TCP_MIN_MSS 88U
69
John Heffner5d424d52006-03-20 17:53:41 -080070/* The least MTU to use for probing */
71#define TCP_BASE_MSS 512
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/* After receiving this amount of duplicate ACKs fast retransmit starts. */
74#define TCP_FASTRETRANS_THRESH 3
75
76/* Maximal reordering. */
77#define TCP_MAX_REORDERING 127
78
79/* Maximal number of ACKs sent quickly to accelerate slow-start. */
80#define TCP_MAX_QUICKACKS 16U
81
82/* urg_data states */
83#define TCP_URG_VALID 0x0100
84#define TCP_URG_NOTYET 0x0200
85#define TCP_URG_READ 0x0400
86
87#define TCP_RETR1 3 /*
88 * This is how many retries it does before it
89 * tries to figure out if the gateway is
90 * down. Minimal RFC value is 3; it corresponds
91 * to ~3sec-8min depending on RTO.
92 */
93
94#define TCP_RETR2 15 /*
95 * This should take at least
96 * 90 minutes to time out.
97 * RFC1122 says that the limit is 100 sec.
98 * 15 is ~13-30min depending on RTO.
99 */
100
101#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800102 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800105 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
108 * state, about 60 seconds */
109#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
110 /* BSD style FIN_WAIT2 deadlock breaker.
111 * It used to be 3min, new value is 60sec,
112 * to combine FIN-WAIT-2 timeout with
113 * TIME-WAIT timer.
114 */
115
116#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
117#if HZ >= 100
118#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
119#define TCP_ATO_MIN ((unsigned)(HZ/25))
120#else
121#define TCP_DELACK_MIN 4U
122#define TCP_ATO_MIN 4U
123#endif
124#define TCP_RTO_MAX ((unsigned)(120*HZ))
125#define TCP_RTO_MIN ((unsigned)(HZ/5))
Eric Dumazetfd4f2ce2012-04-12 19:48:40 +0000126#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
Jerry Chu9ad7c042011-06-08 11:08:38 +0000127#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
128 * used as a fallback RTO for the
129 * initial data transmission if no
130 * valid RTT sample has been acquired,
131 * most likely due to retrans in 3WHS.
132 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
135 * for local resources.
136 */
137
138#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
139#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
140#define TCP_KEEPALIVE_INTVL (75*HZ)
141
142#define MAX_TCP_KEEPIDLE 32767
143#define MAX_TCP_KEEPINTVL 32767
144#define MAX_TCP_KEEPCNT 127
145#define MAX_TCP_SYNCNT 127
146
147#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
149#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
150#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
151 * after this time. It should be equal
152 * (or greater than) TCP_TIMEWAIT_LEN
153 * to provide reliability equal to one
154 * provided by timewait state.
155 */
156#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
157 * timestamps. It must be less than
158 * minimal timewait lifetime.
159 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160/*
161 * TCP option
162 */
163
164#define TCPOPT_NOP 1 /* Padding */
165#define TCPOPT_EOL 0 /* End of options */
166#define TCPOPT_MSS 2 /* Segment size negotiating */
167#define TCPOPT_WINDOW 3 /* Window scaling */
168#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
169#define TCPOPT_SACK 5 /* SACK Block */
170#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800171#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
William Allen Simpson435cf552009-12-02 18:17:05 +0000172#define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
174/*
175 * TCP option lengths
176 */
177
178#define TCPOLEN_MSS 4
179#define TCPOLEN_WINDOW 3
180#define TCPOLEN_SACK_PERM 2
181#define TCPOLEN_TIMESTAMP 10
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800182#define TCPOLEN_MD5SIG 18
William Allen Simpson435cf552009-12-02 18:17:05 +0000183#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
184#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
185#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
186#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188/* But this is what stacks really send out. */
189#define TCPOLEN_TSTAMP_ALIGNED 12
190#define TCPOLEN_WSCALE_ALIGNED 4
191#define TCPOLEN_SACKPERM_ALIGNED 4
192#define TCPOLEN_SACK_BASE 2
193#define TCPOLEN_SACK_BASE_ALIGNED 4
194#define TCPOLEN_SACK_PERBLOCK 8
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800195#define TCPOLEN_MD5SIG_ALIGNED 20
Adam Langley33ad7982008-07-19 00:04:31 -0700196#define TCPOLEN_MSS_ALIGNED 4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Flags in tp->nonagle */
199#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
200#define TCP_NAGLE_CORK 2 /* Socket is corked */
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800201#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Andreas Petlund36e31b0a2010-02-18 02:47:01 +0000203/* TCP thin-stream limits */
204#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
205
David S. Miller7eb38522011-02-05 18:13:45 -0800206/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
David S. Miller442b9632011-02-02 17:05:11 -0800207#define TCP_INIT_CWND 10
208
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700209extern struct inet_timewait_death_row tcp_death_row;
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211/* sysctl variables for tcp */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212extern int sysctl_tcp_timestamps;
213extern int sysctl_tcp_window_scaling;
214extern int sysctl_tcp_sack;
215extern int sysctl_tcp_fin_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216extern int sysctl_tcp_keepalive_time;
217extern int sysctl_tcp_keepalive_probes;
218extern int sysctl_tcp_keepalive_intvl;
219extern int sysctl_tcp_syn_retries;
220extern int sysctl_tcp_synack_retries;
221extern int sysctl_tcp_retries1;
222extern int sysctl_tcp_retries2;
223extern int sysctl_tcp_orphan_retries;
224extern int sysctl_tcp_syncookies;
225extern int sysctl_tcp_retrans_collapse;
226extern int sysctl_tcp_stdurg;
227extern int sysctl_tcp_rfc1337;
228extern int sysctl_tcp_abort_on_overflow;
229extern int sysctl_tcp_max_orphans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230extern int sysctl_tcp_fack;
231extern int sysctl_tcp_reordering;
232extern int sysctl_tcp_ecn;
233extern int sysctl_tcp_dsack;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234extern int sysctl_tcp_wmem[3];
235extern int sysctl_tcp_rmem[3];
236extern int sysctl_tcp_app_win;
237extern int sysctl_tcp_adv_win_scale;
238extern int sysctl_tcp_tw_reuse;
239extern int sysctl_tcp_frto;
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800240extern int sysctl_tcp_frto_response;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241extern int sysctl_tcp_low_latency;
Chris Leech95937822006-05-23 18:02:55 -0700242extern int sysctl_tcp_dma_copybreak;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243extern int sysctl_tcp_nometrics_save;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244extern int sysctl_tcp_moderate_rcvbuf;
245extern int sysctl_tcp_tso_win_divisor;
Stephen Hemminger9772efb2005-11-10 17:09:53 -0800246extern int sysctl_tcp_abc;
John Heffner5d424d52006-03-20 17:53:41 -0800247extern int sysctl_tcp_mtu_probing;
248extern int sysctl_tcp_base_mss;
Rick Jones15d99e02006-03-20 22:40:29 -0800249extern int sysctl_tcp_workaround_signed_windows;
David S. Miller35089bb2006-06-13 22:33:04 -0700250extern int sysctl_tcp_slow_start_after_idle;
John Heffner886236c2007-03-25 19:21:45 -0700251extern int sysctl_tcp_max_ssthresh;
William Allen Simpson519855c2009-12-02 18:14:19 +0000252extern int sysctl_tcp_cookie_size;
Andreas Petlund36e31b0a2010-02-18 02:47:01 +0000253extern int sysctl_tcp_thin_linear_timeouts;
Andreas Petlund7e380172010-02-18 04:48:19 +0000254extern int sysctl_tcp_thin_dupack;
Yuchung Chengeed530b2012-05-02 13:30:03 +0000255extern int sysctl_tcp_early_retrans;
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000256extern int sysctl_tcp_limit_output_bytes;
Eric Dumazet282f23c2012-07-17 10:13:05 +0200257extern int sysctl_tcp_challenge_ack_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Eric Dumazet8d987e52010-11-09 23:24:26 +0000259extern atomic_long_t tcp_memory_allocated;
Eric Dumazet17483762008-11-25 21:16:35 -0800260extern struct percpu_counter tcp_sockets_allocated;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261extern int tcp_memory_pressure;
262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 * The next routines deal with comparing 32 bit unsigned ints
265 * and worry about wraparound (automatic with unsigned arithmetic).
266 */
267
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000268static inline bool before(__u32 seq1, __u32 seq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269{
Gerrit Renker0d630cc2007-01-04 12:25:16 -0800270 return (__s32)(seq1-seq2) < 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271}
Gerrit Renker9a036b9c2006-12-20 10:25:55 -0800272#define after(seq2, seq1) before(seq1, seq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
274/* is s2<=s1<=s3 ? */
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000275static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276{
277 return seq3 - seq2 >= seq1 - seq2;
278}
279
Arun Sharmaefcdbf22012-01-30 14:16:06 -0800280static inline bool tcp_out_of_memory(struct sock *sk)
281{
282 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
283 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
284 return true;
285 return false;
286}
287
David S. Millerad1af0f2010-08-25 02:27:49 -0700288static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700289{
David S. Millerad1af0f2010-08-25 02:27:49 -0700290 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
291 int orphans = percpu_counter_read_positive(ocp);
292
293 if (orphans << shift > sysctl_tcp_max_orphans) {
294 orphans = percpu_counter_sum_positive(ocp);
295 if (orphans << shift > sysctl_tcp_max_orphans)
296 return true;
297 }
David S. Millerad1af0f2010-08-25 02:27:49 -0700298 return false;
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700299}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Arun Sharmaefcdbf22012-01-30 14:16:06 -0800301extern bool tcp_check_oom(struct sock *sk, int shift);
302
Florian Westphala0f82f62009-04-19 09:43:48 +0000303/* syncookies: remember time of last synqueue overflow */
304static inline void tcp_synq_overflow(struct sock *sk)
305{
306 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
307}
308
309/* syncookies: no recent synqueue overflow on this listening socket? */
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000310static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
Florian Westphala0f82f62009-04-19 09:43:48 +0000311{
312 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
Jerry Chu9ad7c042011-06-08 11:08:38 +0000313 return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
Florian Westphala0f82f62009-04-19 09:43:48 +0000314}
315
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316extern struct proto tcp_prot;
317
Pavel Emelyanov57ef42d2008-07-18 04:02:08 -0700318#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
319#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
320#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
321#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
Tom Herbertaa2ea052010-04-22 07:00:24 +0000322#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
Glauber Costa4acb4192012-01-30 01:20:17 +0000324extern void tcp_init_mem(struct net *net);
325
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000326extern void tcp_tasklet_init(void);
327
Changli Gao53d31762010-07-10 20:41:06 +0000328extern void tcp_v4_err(struct sk_buff *skb, u32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
Changli Gao53d31762010-07-10 20:41:06 +0000330extern void tcp_shutdown (struct sock *sk, int how);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
David S. Miller160eb5a2012-06-27 22:01:22 -0700332extern void tcp_v4_early_demux(struct sk_buff *skb);
Changli Gao53d31762010-07-10 20:41:06 +0000333extern int tcp_v4_rcv(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
David S. Miller4670fd82012-06-09 01:25:47 -0700335extern struct inet_peer *tcp_v4_get_peer(struct sock *sk);
Changli Gao53d31762010-07-10 20:41:06 +0000336extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
Changli Gao7ba42912010-07-10 20:41:55 +0000337extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
338 size_t size);
339extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
340 size_t size, int flags);
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000341extern void tcp_release_cb(struct sock *sk);
Changli Gao53d31762010-07-10 20:41:06 +0000342extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
343extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400344 const struct tcphdr *th, unsigned int len);
Changli Gao53d31762010-07-10 20:41:06 +0000345extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400346 const struct tcphdr *th, unsigned int len);
Changli Gao53d31762010-07-10 20:41:06 +0000347extern void tcp_rcv_space_adjust(struct sock *sk);
348extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
349extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
350extern void tcp_twsk_destructor(struct sock *sk);
351extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
352 struct pipe_inode_info *pipe, size_t len,
353 unsigned int flags);
Jens Axboe9c55e012007-11-06 23:30:13 -0800354
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700355static inline void tcp_dec_quickack_mode(struct sock *sk,
356 const unsigned int pkts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700358 struct inet_connection_sock *icsk = inet_csk(sk);
David S. Millerfc6415bc2005-07-05 15:17:45 -0700359
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700360 if (icsk->icsk_ack.quick) {
361 if (pkts >= icsk->icsk_ack.quick) {
362 icsk->icsk_ack.quick = 0;
David S. Millerfc6415bc2005-07-05 15:17:45 -0700363 /* Leaving quickack mode we deflate ATO. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700364 icsk->icsk_ack.ato = TCP_ATO_MIN;
David S. Millerfc6415bc2005-07-05 15:17:45 -0700365 } else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700366 icsk->icsk_ack.quick -= pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 }
368}
369
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700370#define TCP_ECN_OK 1
371#define TCP_ECN_QUEUE_CWR 2
372#define TCP_ECN_DEMAND_CWR 4
Eric Dumazet7a269ff2011-09-22 20:02:19 +0000373#define TCP_ECN_SEEN 8
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700374
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000375enum tcp_tw_status {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 TCP_TW_SUCCESS = 0,
377 TCP_TW_RST = 1,
378 TCP_TW_ACK = 2,
379 TCP_TW_SYN = 3
380};
381
382
Changli Gao53d31762010-07-10 20:41:06 +0000383extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
384 struct sk_buff *skb,
385 const struct tcphdr *th);
386extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
387 struct request_sock *req,
388 struct request_sock **prev);
389extern int tcp_child_process(struct sock *parent, struct sock *child,
390 struct sk_buff *skb);
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000391extern bool tcp_use_frto(struct sock *sk);
Changli Gao53d31762010-07-10 20:41:06 +0000392extern void tcp_enter_frto(struct sock *sk);
393extern void tcp_enter_loss(struct sock *sk, int how);
394extern void tcp_clear_retrans(struct tcp_sock *tp);
395extern void tcp_update_metrics(struct sock *sk);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700396extern void tcp_init_metrics(struct sock *sk);
David S. Miller51c5d0c2012-07-10 00:49:14 -0700397extern void tcp_metrics_init(void);
David S. Miller81166dd2012-07-10 03:14:24 -0700398extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check);
399extern bool tcp_remember_stamp(struct sock *sk);
400extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
401extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
David S. Miller4aabd8e2012-07-09 16:07:30 -0700402extern void tcp_disable_fack(struct tcp_sock *tp);
Changli Gao53d31762010-07-10 20:41:06 +0000403extern void tcp_close(struct sock *sk, long timeout);
Neal Cardwell900f65d2012-04-19 09:55:21 +0000404extern void tcp_init_sock(struct sock *sk);
Changli Gao53d31762010-07-10 20:41:06 +0000405extern unsigned int tcp_poll(struct file * file, struct socket *sock,
406 struct poll_table_struct *wait);
407extern int tcp_getsockopt(struct sock *sk, int level, int optname,
408 char __user *optval, int __user *optlen);
409extern int tcp_setsockopt(struct sock *sk, int level, int optname,
410 char __user *optval, unsigned int optlen);
411extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
412 char __user *optval, int __user *optlen);
413extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
414 char __user *optval, unsigned int optlen);
415extern void tcp_set_keepalive(struct sock *sk, int val);
416extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
417extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
418 size_t len, int nonblock, int flags, int *addr_len);
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400419extern void tcp_parse_options(const struct sk_buff *skb,
420 struct tcp_options_received *opt_rx, const u8 **hvpp,
Changli Gao53d31762010-07-10 20:41:06 +0000421 int estab);
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400422extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/*
425 * TCP v4 functions exported for the inet6 API
426 */
427
Changli Gao53d31762010-07-10 20:41:06 +0000428extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
429extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
430extern struct sock * tcp_create_openreq_child(struct sock *sk,
431 struct request_sock *req,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 struct sk_buff *skb);
Changli Gao53d31762010-07-10 20:41:06 +0000433extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
434 struct request_sock *req,
435 struct dst_entry *dst);
436extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
437extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
438 int addr_len);
439extern int tcp_connect(struct sock *sk);
440extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
441 struct request_sock *req,
442 struct request_values *rvp);
443extern int tcp_disconnect(struct sock *sk, int flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
Pavel Emelyanov370816a2012-04-19 03:40:01 +0000445void tcp_connect_init(struct sock *sk);
446void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
Pavel Emelyanov292e8d82012-05-10 01:49:41 +0000447int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449/* From syncookies.c */
Florian Westphal2051f112008-03-23 22:21:28 -0700450extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
452 struct ip_options *opt);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400453#ifdef CONFIG_SYN_COOKIES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
455 __u16 *mss);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400456#else
457static inline __u32 cookie_v4_init_sequence(struct sock *sk,
458 struct sk_buff *skb,
459 __u16 *mss)
460{
461 return 0;
462}
463#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Florian Westphal4dfc2812008-04-10 03:12:40 -0700465extern __u32 cookie_init_timestamp(struct request_sock *req);
Florian Westphal172d69e2010-06-21 11:48:45 +0000466extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
Florian Westphal4dfc2812008-04-10 03:12:40 -0700467
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800468/* From net/ipv6/syncookies.c */
469extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400470#ifdef CONFIG_SYN_COOKIES
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400471extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800472 __u16 *mss);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400473#else
474static inline __u32 cookie_v6_init_sequence(struct sock *sk,
475 struct sk_buff *skb,
476 __u16 *mss)
477{
478 return 0;
479}
480#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481/* tcp_output.c */
482
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700483extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
484 int nonagle);
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000485extern bool tcp_may_send_now(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000487extern void tcp_retransmit_timer(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488extern void tcp_xmit_retransmit_queue(struct sock *);
489extern void tcp_simple_retransmit(struct sock *);
490extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
David S. Miller6475be12005-09-01 22:47:01 -0700491extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493extern void tcp_send_probe0(struct sock *);
494extern void tcp_send_partial(struct sock *);
Changli Gao53d31762010-07-10 20:41:06 +0000495extern int tcp_write_wakeup(struct sock *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496extern void tcp_send_fin(struct sock *sk);
Al Virodd0fc662005-10-07 07:46:04 +0100497extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
Changli Gao53d31762010-07-10 20:41:06 +0000498extern int tcp_send_synack(struct sock *);
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000499extern bool tcp_syn_flood_action(struct sock *sk,
500 const struct sk_buff *skb,
501 const char *proto);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700502extern void tcp_push_one(struct sock *, unsigned int mss_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503extern void tcp_send_ack(struct sock *sk);
504extern void tcp_send_delayed_ack(struct sock *sk);
505
David S. Millera762a982005-07-05 15:18:51 -0700506/* tcp_input.c */
507extern void tcp_cwnd_application_limited(struct sock *sk);
Yuchung Cheng750ea2b2012-05-02 13:30:04 +0000508extern void tcp_resume_early_retransmit(struct sock *sk);
509extern void tcp_rearm_rto(struct sock *sk);
David S. Millera762a982005-07-05 15:18:51 -0700510
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511/* tcp_timer.c */
512extern void tcp_init_xmit_timers(struct sock *);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700513static inline void tcp_clear_xmit_timers(struct sock *sk)
514{
515 inet_csk_clear_xmit_timers(sk);
516}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000519extern unsigned int tcp_current_mss(struct sock *sk);
520
521/* Bound MSS / TSO packet size with the half of the window */
522static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
523{
Alexey Kuznetsov01f83d62010-09-15 10:27:52 -0700524 int cutoff;
525
526 /* When peer uses tiny windows, there is no use in packetizing
527 * to sub-MSS pieces for the sake of SWS or making sure there
528 * are enough packets in the pipe for fast recovery.
529 *
530 * On the other hand, for extremely large MSS devices, handling
531 * smaller than MSS windows in this way does make sense.
532 */
533 if (tp->max_window >= 512)
534 cutoff = (tp->max_window >> 1);
535 else
536 cutoff = tp->max_window;
537
538 if (cutoff && pktsize > cutoff)
539 return max_t(int, cutoff, 68U - tp->tcp_header_len);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000540 else
541 return pktsize;
542}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Arnaldo Carvalho de Melo17b085e2005-08-12 12:59:17 -0300544/* tcp.c */
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400545extern void tcp_get_info(const struct sock *, struct tcp_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
547/* Read 'sendfile()'-style from a TCP socket */
548typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
549 unsigned int, size_t);
550extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
551 sk_read_actor_t recv_actor);
552
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800553extern void tcp_initialize_rcv_mss(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Eric Dumazet67469602012-04-24 07:37:38 +0000555extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
556extern int tcp_mss_to_mtu(struct sock *sk, int mss);
John Heffner5d424d52006-03-20 17:53:41 -0800557extern void tcp_mtup_init(struct sock *sk);
Jerry Chu9ad7c042011-06-08 11:08:38 +0000558extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
John Heffner5d424d52006-03-20 17:53:41 -0800559
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000560static inline void tcp_bound_rto(const struct sock *sk)
561{
562 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
563 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
564}
565
566static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
567{
568 return (tp->srtt >> 3) + tp->rttvar;
569}
570
David S. Miller4aabd8e2012-07-09 16:07:30 -0700571extern void tcp_set_rto(struct sock *sk);
572
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800573static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574{
575 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
576 ntohl(TCP_FLAG_ACK) |
577 snd_wnd);
578}
579
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800580static inline void tcp_fast_path_on(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
582 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
583}
584
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700585static inline void tcp_fast_path_check(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700587 struct tcp_sock *tp = tcp_sk(sk);
588
David S. Millerb03efcf2005-07-08 14:57:23 -0700589 if (skb_queue_empty(&tp->out_of_order_queue) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 tp->rcv_wnd &&
591 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
592 !tp->urg_data)
593 tcp_fast_path_on(tp);
594}
595
Satoru SATOH0c266892009-05-04 11:11:01 -0700596/* Compute the actual rto_min value */
597static inline u32 tcp_rto_min(struct sock *sk)
598{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400599 const struct dst_entry *dst = __sk_dst_get(sk);
Satoru SATOH0c266892009-05-04 11:11:01 -0700600 u32 rto_min = TCP_RTO_MIN;
601
602 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
603 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
604 return rto_min;
605}
606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607/* Compute the actual receive window we are currently advertising.
608 * Rcv_nxt can be after the window if our peer push more data
609 * than the offered window.
610 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800611static inline u32 tcp_receive_window(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
613 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
614
615 if (win < 0)
616 win = 0;
617 return (u32) win;
618}
619
620/* Choose a new window, without checks for shrinking, and without
621 * scaling applied to the result. The caller does these things
622 * if necessary. This is a "raw" window selection.
623 */
Changli Gao53d31762010-07-10 20:41:06 +0000624extern u32 __tcp_select_window(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Pavel Emelyanovee995282012-04-19 03:40:39 +0000626void tcp_send_window_probe(struct sock *sk);
627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628/* TCP timestamps are only 32-bits, this causes a slight
629 * complication on 64-bit systems since we store a snapshot
Stephen Hemminger31f34262005-11-15 15:17:10 -0800630 * of jiffies in the buffer control blocks below. We decided
631 * to use only the low 32-bits of jiffies and hide the ugly
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 * casts with the following macro.
633 */
634#define tcp_time_stamp ((__u32)(jiffies))
635
Changli Gaoa3433f32010-06-12 14:01:43 +0000636#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
637
638#define TCPHDR_FIN 0x01
639#define TCPHDR_SYN 0x02
640#define TCPHDR_RST 0x04
641#define TCPHDR_PSH 0x08
642#define TCPHDR_ACK 0x10
643#define TCPHDR_URG 0x20
644#define TCPHDR_ECE 0x40
645#define TCPHDR_CWR 0x80
646
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800647/* This is what the send packet queuing engine uses to pass
Eric Dumazetf86586f2010-07-15 21:41:00 -0700648 * TCP per-packet control information to the transmission code.
649 * We also store the host-order sequence numbers in here too.
650 * This is 44 bytes if IPV6 is enabled.
651 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 */
653struct tcp_skb_cb {
654 union {
655 struct inet_skb_parm h4;
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000656#if IS_ENABLED(CONFIG_IPV6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 struct inet6_skb_parm h6;
658#endif
659 } header; /* For incoming frames */
660 __u32 seq; /* Starting sequence number */
661 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
662 __u32 when; /* used to compute rtt's */
Eric Dumazet4de075e2011-09-27 13:25:05 -0400663 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
Neal Cardwellf4f9f6e2012-04-16 07:08:06 +0000664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 __u8 sacked; /* State flags for SACK/FACK. */
666#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
667#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
668#define TCPCB_LOST 0x04 /* SKB is lost */
669#define TCPCB_TAGBITS 0x07 /* All tag bits */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
671#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
672
Neal Cardwellf4f9f6e2012-04-16 07:08:06 +0000673 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
674 /* 1 byte hole */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 __u32 ack_seq; /* Sequence number ACK'd */
676};
677
678#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
679
Eric Dumazetbd14b1b2012-05-04 05:14:02 +0000680/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
681 *
682 * If we receive a SYN packet with these bits set, it means a network is
683 * playing bad games with TOS bits. In order to avoid possible false congestion
684 * notifications, we disable TCP ECN negociation.
685 */
686static inline void
687TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb)
688{
689 const struct tcphdr *th = tcp_hdr(skb);
690
691 if (sysctl_tcp_ecn && th->ece && th->cwr &&
692 INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
693 inet_rsk(req)->ecn_ok = 1;
694}
695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696/* Due to TSO, an SKB can be composed of multiple actual
697 * packets. To keep these tracked properly, we use this.
698 */
699static inline int tcp_skb_pcount(const struct sk_buff *skb)
700{
Herbert Xu79671682006-06-22 02:40:14 -0700701 return skb_shinfo(skb)->gso_segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702}
703
704/* This is valid iff tcp_skb_pcount() > 1. */
705static inline int tcp_skb_mss(const struct sk_buff *skb)
706{
Herbert Xu79671682006-06-22 02:40:14 -0700707 return skb_shinfo(skb)->gso_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708}
709
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700710/* Events passed to congestion control interface */
711enum tcp_ca_event {
712 CA_EVENT_TX_START, /* first transmit when no packets in flight */
713 CA_EVENT_CWND_RESTART, /* congestion window restart */
714 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
715 CA_EVENT_FRTO, /* fast recovery timeout */
716 CA_EVENT_LOSS, /* loss timeout */
717 CA_EVENT_FAST_ACK, /* in sequence ack */
718 CA_EVENT_SLOW_ACK, /* other ack */
719};
720
721/*
722 * Interface for adding new TCP congestion control handlers
723 */
724#define TCP_CA_NAME_MAX 16
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800725#define TCP_CA_MAX 128
726#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
727
Stephen Hemminger164891a2007-04-23 22:26:16 -0700728#define TCP_CONG_NON_RESTRICTED 0x1
729#define TCP_CONG_RTT_STAMP 0x2
730
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700731struct tcp_congestion_ops {
732 struct list_head list;
Stephen Hemminger164891a2007-04-23 22:26:16 -0700733 unsigned long flags;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700734
735 /* initialize private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300736 void (*init)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700737 /* cleanup private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300738 void (*release)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700739
740 /* return slow start threshold (required) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300741 u32 (*ssthresh)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700742 /* lower bound for congestion window (optional) */
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700743 u32 (*min_cwnd)(const struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700744 /* do new cwnd calculation (required) */
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200745 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700746 /* call before changing ca_state (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300747 void (*set_state)(struct sock *sk, u8 new_state);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700748 /* call when cwnd event occurs (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300749 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700750 /* new value of cwnd after loss (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300751 u32 (*undo_cwnd)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700752 /* hook for packet ack accounting (optional) */
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -0700753 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
Arnaldo Carvalho de Melo73c1f4a2005-08-12 12:51:49 -0300754 /* get info for inet_diag (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300755 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700756
757 char name[TCP_CA_NAME_MAX];
758 struct module *owner;
759};
760
761extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
762extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
763
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300764extern void tcp_init_congestion_control(struct sock *sk);
765extern void tcp_cleanup_congestion_control(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700766extern int tcp_set_default_congestion_control(const char *name);
767extern void tcp_get_default_congestion_control(char *name);
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800768extern void tcp_get_available_congestion_control(char *buf, size_t len);
Stephen Hemmingerce7bc3b2006-11-09 16:35:15 -0800769extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
770extern int tcp_set_allowed_congestion_control(char *allowed);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300771extern int tcp_set_congestion_control(struct sock *sk, const char *name);
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800772extern void tcp_slow_start(struct tcp_sock *tp);
Ilpo Järvinen758ce5c2009-02-28 04:44:37 +0000773extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700774
Stephen Hemminger5f8ef482005-06-23 20:37:36 -0700775extern struct tcp_congestion_ops tcp_init_congestion_ops;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300776extern u32 tcp_reno_ssthresh(struct sock *sk);
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200777extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700778extern u32 tcp_reno_min_cwnd(const struct sock *sk);
David S. Millera8acfba2005-06-23 23:45:02 -0700779extern struct tcp_congestion_ops tcp_reno;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700780
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300781static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700782{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300783 struct inet_connection_sock *icsk = inet_csk(sk);
784
785 if (icsk->icsk_ca_ops->set_state)
786 icsk->icsk_ca_ops->set_state(sk, ca_state);
787 icsk->icsk_ca_state = ca_state;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700788}
789
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300790static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700791{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300792 const struct inet_connection_sock *icsk = inet_csk(sk);
793
794 if (icsk->icsk_ca_ops->cwnd_event)
795 icsk->icsk_ca_ops->cwnd_event(sk, event);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700796}
797
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300798/* These functions determine how the current flow behaves in respect of SACK
799 * handling. SACK is negotiated with the peer, and therefore it can vary
800 * between different flows.
801 *
802 * tcp_is_sack - SACK enabled
803 * tcp_is_reno - No SACK
804 * tcp_is_fack - FACK enabled, implies SACK enabled
805 */
806static inline int tcp_is_sack(const struct tcp_sock *tp)
807{
808 return tp->rx_opt.sack_ok;
809}
810
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000811static inline bool tcp_is_reno(const struct tcp_sock *tp)
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300812{
813 return !tcp_is_sack(tp);
814}
815
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000816static inline bool tcp_is_fack(const struct tcp_sock *tp)
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300817{
Vijay Subramanianab562222011-12-20 13:23:24 +0000818 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300819}
820
821static inline void tcp_enable_fack(struct tcp_sock *tp)
822{
Vijay Subramanianab562222011-12-20 13:23:24 +0000823 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300824}
825
Yuchung Chengeed530b2012-05-02 13:30:03 +0000826/* TCP early-retransmit (ER) is similar to but more conservative than
827 * the thin-dupack feature. Enable ER only if thin-dupack is disabled.
828 */
829static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
830{
831 tp->do_early_retrans = sysctl_tcp_early_retrans &&
832 !sysctl_tcp_thin_dupack && sysctl_tcp_reordering == 3;
Yuchung Cheng750ea2b2012-05-02 13:30:04 +0000833 tp->early_retrans_delayed = 0;
Yuchung Chengeed530b2012-05-02 13:30:03 +0000834}
835
836static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
837{
838 tp->do_early_retrans = 0;
839}
840
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300841static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
842{
843 return tp->sacked_out + tp->lost_out;
844}
845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846/* This determines how many packets are "in the network" to the best
847 * of our knowledge. In many cases it is conservative, but where
848 * detailed information is available from the receiver (via SACK
849 * blocks etc.) we can make more aggressive calculations.
850 *
851 * Use this for decisions involving congestion control, use just
852 * tp->packets_out to determine if the send queue is empty or not.
853 *
854 * Read this equation as:
855 *
856 * "Packets sent once on transmission queue" MINUS
857 * "Packets left network, but not honestly ACKed yet" PLUS
858 * "Packets fast retransmitted"
859 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800860static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861{
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300862 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863}
864
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -0700865#define TCP_INFINITE_SSTHRESH 0x7fffffff
866
867static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
868{
869 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
870}
871
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
873 * The exception is rate halving phase, when cwnd is decreasing towards
874 * ssthresh.
875 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300876static inline __u32 tcp_current_ssthresh(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300878 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400879
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300880 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 return tp->snd_ssthresh;
882 else
883 return max(tp->snd_ssthresh,
884 ((tp->snd_cwnd >> 1) +
885 (tp->snd_cwnd >> 2)));
886}
887
Ilpo Järvinenb9c45952007-07-27 16:36:17 +0300888/* Use define here intentionally to get WARN_ON location shown at the caller */
889#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800891extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400892extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
Neal Cardwell6b5a5c02011-11-21 17:15:14 +0000894/* The maximum number of MSS of available cwnd for which TSO defers
895 * sending if not using sysctl_tcp_tso_win_divisor.
896 */
897static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
898{
899 return 3;
900}
901
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902/* Slow start with delack produces 3 packets of burst, so that
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700903 * it is safe "de facto". This will be the default - same as
904 * the default reordering threshold - but if reordering increases,
905 * we must be able to allow cwnd to burst at least this much in order
906 * to not pull it back when holes are filled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 */
908static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
909{
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700910 return tp->reordering;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911}
912
Ilpo Järvinen90840de2007-12-31 04:48:41 -0800913/* Returns end sequence number of the receiver's advertised window */
914static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
915{
916 return tp->snd_una + tp->snd_wnd;
917}
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000918extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
Stephen Hemmingerf4805ed2005-11-10 16:53:30 -0800919
Chuck Leverc1bd24b2007-10-23 21:08:54 -0700920static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800921 const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922{
923 if (skb->len < mss)
924 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
925}
926
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700927static inline void tcp_check_probe_timer(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400929 const struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700930 const struct inet_connection_sock *icsk = inet_csk(sk);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700931
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700932 if (!tp->packets_out && !icsk->icsk_pending)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700933 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
934 icsk->icsk_rto, TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935}
936
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800937static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938{
939 tp->snd_wl1 = seq;
940}
941
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800942static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943{
944 tp->snd_wl1 = seq;
945}
946
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947/*
948 * Calculate(/check) TCP checksum
949 */
Frederik Deweerdtba7808e2007-02-04 20:15:27 -0800950static inline __sum16 tcp_v4_check(int len, __be32 saddr,
951 __be32 daddr, __wsum base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952{
953 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
954}
955
Al Virob51655b2006-11-14 21:40:42 -0800956static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957{
Herbert Xufb286bb2005-11-10 13:01:24 -0800958 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959}
960
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000961static inline bool tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962{
Herbert Xu60476372007-04-09 11:59:39 -0700963 return !skb_csum_unnecessary(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 __tcp_checksum_complete(skb);
965}
966
967/* Prequeue for VJ style copy to user, combined with checksumming. */
968
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800969static inline void tcp_prequeue_init(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970{
971 tp->ucopy.task = NULL;
972 tp->ucopy.len = 0;
973 tp->ucopy.memory = 0;
974 skb_queue_head_init(&tp->ucopy.prequeue);
Chris Leech97fc2f02006-05-23 17:55:33 -0700975#ifdef CONFIG_NET_DMA
976 tp->ucopy.dma_chan = NULL;
977 tp->ucopy.wakeup = 0;
978 tp->ucopy.pinned_list = NULL;
979 tp->ucopy.dma_cookie = 0;
980#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981}
982
983/* Packet is added to VJ-style prequeue for processing in process
984 * context, if a reader task is waiting. Apparently, this exciting
985 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
986 * failed somewhere. Latency? Burstiness? Well, at least now we will
987 * see, why it failed. 8)8) --ANK
988 *
989 * NOTE: is this not too big to inline?
990 */
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000991static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992{
993 struct tcp_sock *tp = tcp_sk(sk);
994
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000995 if (sysctl_tcp_low_latency || !tp->ucopy.task)
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000996 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000998 __skb_queue_tail(&tp->ucopy.prequeue, skb);
999 tp->ucopy.memory += skb->truesize;
1000 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1001 struct sk_buff *skb1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
Eric Dumazetf5f8d862009-05-07 07:08:38 +00001003 BUG_ON(sock_owned_by_user(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
Eric Dumazetf5f8d862009-05-07 07:08:38 +00001005 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1006 sk_backlog_rcv(sk, skb1);
1007 NET_INC_STATS_BH(sock_net(sk),
1008 LINUX_MIB_TCPPREQUEUEDROPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 }
Eric Dumazetf5f8d862009-05-07 07:08:38 +00001010
1011 tp->ucopy.memory = 0;
1012 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
Eric Dumazetaa395142010-04-20 13:03:51 +00001013 wake_up_interruptible_sync_poll(sk_sleep(sk),
Eric Dumazet7aedec22009-05-07 07:20:39 +00001014 POLLIN | POLLRDNORM | POLLRDBAND);
Eric Dumazetf5f8d862009-05-07 07:08:38 +00001015 if (!inet_csk_ack_scheduled(sk))
1016 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
David S. Miller22f6dac2009-05-08 02:48:30 -07001017 (3 * tcp_rto_min(sk)) / 4,
Eric Dumazetf5f8d862009-05-07 07:08:38 +00001018 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 }
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001020 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021}
1022
1023
1024#undef STATE_TRACE
1025
1026#ifdef STATE_TRACE
1027static const char *statename[]={
1028 "Unused","Established","Syn Sent","Syn Recv",
1029 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1030 "Close Wait","Last ACK","Listen","Closing"
1031};
1032#endif
Ilpo Järvinen490d5042008-01-12 03:17:20 -08001033extern void tcp_set_state(struct sock *sk, int state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
Andi Kleen4ac02ba2007-04-20 17:11:46 -07001035extern void tcp_done(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
Stephen Hemminger40efc6f2006-01-03 16:03:49 -08001037static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038{
1039 rx_opt->dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 rx_opt->num_sacks = 0;
1041}
1042
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043/* Determine a window scaling and initial window to offer. */
1044extern void tcp_select_initial_window(int __space, __u32 mss,
1045 __u32 *rcv_wnd, __u32 *window_clamp,
laurent chavey31d12922009-12-15 11:15:28 +00001046 int wscale_ok, __u8 *rcv_wscale,
1047 __u32 init_rcv_wnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
1049static inline int tcp_win_from_space(int space)
1050{
1051 return sysctl_tcp_adv_win_scale<=0 ?
1052 (space>>(-sysctl_tcp_adv_win_scale)) :
1053 space - (space>>sysctl_tcp_adv_win_scale);
1054}
1055
1056/* Note: caller must be prepared to deal with negative returns */
1057static inline int tcp_space(const struct sock *sk)
1058{
1059 return tcp_win_from_space(sk->sk_rcvbuf -
1060 atomic_read(&sk->sk_rmem_alloc));
1061}
1062
1063static inline int tcp_full_space(const struct sock *sk)
1064{
1065 return tcp_win_from_space(sk->sk_rcvbuf);
1066}
1067
Stephen Hemminger40efc6f2006-01-03 16:03:49 -08001068static inline void tcp_openreq_init(struct request_sock *req,
1069 struct tcp_options_received *rx_opt,
1070 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001072 struct inet_request_sock *ireq = inet_rsk(req);
1073
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
Florian Westphal4dfc2812008-04-10 03:12:40 -07001075 req->cookie_ts = 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001076 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 req->mss = rx_opt->mss_clamp;
1078 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001079 ireq->tstamp_ok = rx_opt->tstamp_ok;
1080 ireq->sack_ok = rx_opt->sack_ok;
1081 ireq->snd_wscale = rx_opt->snd_wscale;
1082 ireq->wscale_ok = rx_opt->wscale_ok;
1083 ireq->acked = 0;
1084 ireq->ecn_ok = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001085 ireq->rmt_port = tcp_hdr(skb)->source;
KOVACS Krisztiana3116ac52008-10-01 07:46:49 -07001086 ireq->loc_port = tcp_hdr(skb)->dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087}
1088
Pavel Emelyanov5c52ba12008-07-16 20:28:10 -07001089extern void tcp_enter_memory_pressure(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1092{
1093 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1094}
1095
1096static inline int keepalive_time_when(const struct tcp_sock *tp)
1097{
1098 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1099}
1100
Eric Dumazetdf19a622009-08-28 23:48:54 -07001101static inline int keepalive_probes(const struct tcp_sock *tp)
1102{
1103 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1104}
1105
Flavio Leitner6c37e5d2010-04-26 18:33:27 +00001106static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1107{
1108 const struct inet_connection_sock *icsk = &tp->inet_conn;
1109
1110 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1111 tcp_time_stamp - tp->rcv_tstamp);
1112}
1113
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001114static inline int tcp_fin_time(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001116 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1117 const int rto = inet_csk(sk)->icsk_rto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001119 if (fin_timeout < (rto << 2) - (rto >> 1))
1120 fin_timeout = (rto << 2) - (rto >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
1122 return fin_timeout;
1123}
1124
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001125static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1126 int paws_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127{
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001128 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001129 return true;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001130 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001131 return true;
Eric Dumazetbc2ce892010-12-16 14:08:34 -08001132 /*
1133 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1134 * then following tcp messages have valid values. Ignore 0 value,
1135 * or else 'negative' tsval might forbid us to accept their packets.
1136 */
1137 if (!rx_opt->ts_recent)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001138 return true;
1139 return false;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001140}
1141
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001142static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1143 int rst)
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001144{
1145 if (tcp_paws_check(rx_opt, 0))
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001146 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147
1148 /* RST segments are not recommended to carry timestamp,
1149 and, if they do, it is recommended to ignore PAWS because
1150 "their cleanup function should take precedence over timestamps."
1151 Certainly, it is mistake. It is necessary to understand the reasons
1152 of this constraint to relax it: if peer reboots, clock may go
1153 out-of-sync and half-open connections will not be reset.
1154 Actually, the problem would be not existing if all
1155 the implementations followed draft about maintaining clock
1156 via reboots. Linux-2.2 DOES NOT!
1157
1158 However, we can relax time bounds for RST segments to MSL.
1159 */
James Morris9d729f72007-03-04 16:12:44 -08001160 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001161 return false;
1162 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163}
1164
Pavel Emelyanova9c193292008-07-16 20:21:42 -07001165static inline void tcp_mib_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166{
1167 /* See RFC 2012 */
Pavel Emelyanovcf1100a2008-07-16 20:27:38 -07001168 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1169 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1170 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1171 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172}
1173
Ilpo Järvinen5af4ec22007-09-20 11:30:48 -07001174/* from STCP */
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001175static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
David S. Miller0800f172007-09-20 11:40:37 -07001176{
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001177 tp->lost_skb_hint = NULL;
1178 tp->scoreboard_skb_hint = NULL;
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001179}
1180
1181static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1182{
1183 tcp_clear_retrans_hints_partial(tp);
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001184 tp->retransmit_skb_hint = NULL;
Ilpo Järvinenb7689202007-09-20 11:37:19 -07001185}
1186
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001187/* MD5 Signature */
1188struct crypto_hash;
1189
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001190union tcp_md5_addr {
1191 struct in_addr a4;
1192#if IS_ENABLED(CONFIG_IPV6)
1193 struct in6_addr a6;
1194#endif
1195};
1196
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001197/* - key database */
1198struct tcp_md5sig_key {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001199 struct hlist_node node;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001200 u8 keylen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001201 u8 family; /* AF_INET or AF_INET6 */
1202 union tcp_md5_addr addr;
1203 u8 key[TCP_MD5SIG_MAXKEYLEN];
1204 struct rcu_head rcu;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001205};
1206
1207/* - sock block */
1208struct tcp_md5sig_info {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001209 struct hlist_head head;
Eric Dumazeta8afca02012-01-31 18:45:40 +00001210 struct rcu_head rcu;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001211};
1212
1213/* - pseudo header */
1214struct tcp4_pseudohdr {
1215 __be32 saddr;
1216 __be32 daddr;
1217 __u8 pad;
1218 __u8 protocol;
1219 __be16 len;
1220};
1221
1222struct tcp6_pseudohdr {
1223 struct in6_addr saddr;
1224 struct in6_addr daddr;
1225 __be32 len;
1226 __be32 protocol; /* including padding */
1227};
1228
1229union tcp_md5sum_block {
1230 struct tcp4_pseudohdr ip4;
Eric Dumazetdfd56b82011-12-10 09:48:31 +00001231#if IS_ENABLED(CONFIG_IPV6)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001232 struct tcp6_pseudohdr ip6;
1233#endif
1234};
1235
1236/* - pool: digest algorithm, hash description and scratch buffer */
1237struct tcp_md5sig_pool {
1238 struct hash_desc md5_desc;
1239 union tcp_md5sum_block md5_blk;
1240};
1241
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001242/* - functions */
Changli Gao53d31762010-07-10 20:41:06 +00001243extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001244 const struct sock *sk,
1245 const struct request_sock *req,
1246 const struct sk_buff *skb);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001247extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1248 int family, const u8 *newkey,
1249 u8 newkeylen, gfp_t gfp);
1250extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1251 int family);
1252extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1253 struct sock *addr_sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001254
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001255#ifdef CONFIG_TCP_MD5SIG
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001256extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1257 const union tcp_md5_addr *addr, int family);
1258#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001259#else
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001260static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1261 const union tcp_md5_addr *addr,
1262 int family)
1263{
1264 return NULL;
1265}
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001266#define tcp_twsk_md5_key(twsk) NULL
1267#endif
1268
Eric Dumazet765cf992011-09-12 20:28:37 +00001269extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *);
Changli Gao53d31762010-07-10 20:41:06 +00001270extern void tcp_free_md5sig_pool(void);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001271
Eric Dumazet35790c02010-05-16 00:34:04 -07001272extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
Changli Gao53d31762010-07-10 20:41:06 +00001273extern void tcp_put_md5sig_pool(void);
Eric Dumazet35790c02010-05-16 00:34:04 -07001274
Eric Dumazetca35a0e2011-10-24 01:52:35 -04001275extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001276extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
Eric Dumazet95c96172012-04-15 05:58:06 +00001277 unsigned int header_len);
Adam Langley49a72df2008-07-19 00:01:42 -07001278extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001279 const struct tcp_md5sig_key *key);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001280
David S. Millerfe067e82007-03-07 12:12:44 -08001281/* write queue abstraction */
1282static inline void tcp_write_queue_purge(struct sock *sk)
1283{
1284 struct sk_buff *skb;
1285
1286 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001287 sk_wmem_free_skb(sk, skb);
1288 sk_mem_reclaim(sk);
Ilpo Järvinen8818a9d2009-12-02 22:24:02 -08001289 tcp_clear_all_retrans_hints(tcp_sk(sk));
David S. Millerfe067e82007-03-07 12:12:44 -08001290}
1291
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001292static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001293{
David S. Millercd07a8e2008-09-23 00:50:13 -07001294 return skb_peek(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001295}
1296
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001297static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001298{
David S. Millercd07a8e2008-09-23 00:50:13 -07001299 return skb_peek_tail(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001300}
1301
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001302static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1303 const struct sk_buff *skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001304{
David S. Millercd07a8e2008-09-23 00:50:13 -07001305 return skb_queue_next(&sk->sk_write_queue, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001306}
1307
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001308static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1309 const struct sk_buff *skb)
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08001310{
1311 return skb_queue_prev(&sk->sk_write_queue, skb);
1312}
1313
David S. Millerfe067e82007-03-07 12:12:44 -08001314#define tcp_for_write_queue(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001315 skb_queue_walk(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001316
1317#define tcp_for_write_queue_from(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001318 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001319
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001320#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001321 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001322
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001323static inline struct sk_buff *tcp_send_head(const struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001324{
1325 return sk->sk_send_head;
1326}
1327
David S. Millercd07a8e2008-09-23 00:50:13 -07001328static inline bool tcp_skb_is_last(const struct sock *sk,
1329 const struct sk_buff *skb)
1330{
1331 return skb_queue_is_last(&sk->sk_write_queue, skb);
1332}
1333
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001334static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001335{
David S. Millercd07a8e2008-09-23 00:50:13 -07001336 if (tcp_skb_is_last(sk, skb))
David S. Millerfe067e82007-03-07 12:12:44 -08001337 sk->sk_send_head = NULL;
David S. Millercd07a8e2008-09-23 00:50:13 -07001338 else
1339 sk->sk_send_head = tcp_write_queue_next(sk, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001340}
1341
1342static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1343{
1344 if (sk->sk_send_head == skb_unlinked)
1345 sk->sk_send_head = NULL;
1346}
1347
1348static inline void tcp_init_send_head(struct sock *sk)
1349{
1350 sk->sk_send_head = NULL;
1351}
1352
1353static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1354{
1355 __skb_queue_tail(&sk->sk_write_queue, skb);
1356}
1357
1358static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1359{
1360 __tcp_add_write_queue_tail(sk, skb);
1361
1362 /* Queue it, remembering where we must start sending. */
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001363 if (sk->sk_send_head == NULL) {
David S. Millerfe067e82007-03-07 12:12:44 -08001364 sk->sk_send_head = skb;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001365
1366 if (tcp_sk(sk)->highest_sack == NULL)
1367 tcp_sk(sk)->highest_sack = skb;
1368 }
David S. Millerfe067e82007-03-07 12:12:44 -08001369}
1370
1371static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1372{
1373 __skb_queue_head(&sk->sk_write_queue, skb);
1374}
1375
1376/* Insert buff after skb on the write queue of sk. */
1377static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1378 struct sk_buff *buff,
1379 struct sock *sk)
1380{
Gerrit Renker7de6c032008-04-14 00:05:09 -07001381 __skb_queue_after(&sk->sk_write_queue, skb, buff);
David S. Millerfe067e82007-03-07 12:12:44 -08001382}
1383
David S. Miller43f59c82008-09-21 21:28:51 -07001384/* Insert new before skb on the write queue of sk. */
David S. Millerfe067e82007-03-07 12:12:44 -08001385static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1386 struct sk_buff *skb,
1387 struct sock *sk)
1388{
David S. Miller43f59c82008-09-21 21:28:51 -07001389 __skb_queue_before(&sk->sk_write_queue, skb, new);
Ilpo Järvinen6e421412007-11-19 23:24:09 -08001390
1391 if (sk->sk_send_head == skb)
1392 sk->sk_send_head = new;
David S. Millerfe067e82007-03-07 12:12:44 -08001393}
1394
1395static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1396{
1397 __skb_unlink(skb, &sk->sk_write_queue);
1398}
1399
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001400static inline bool tcp_write_queue_empty(struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001401{
1402 return skb_queue_empty(&sk->sk_write_queue);
1403}
1404
Krishna Kumar12d50c42009-12-08 22:26:13 +00001405static inline void tcp_push_pending_frames(struct sock *sk)
1406{
1407 if (tcp_send_head(sk)) {
1408 struct tcp_sock *tp = tcp_sk(sk);
1409
1410 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1411 }
1412}
1413
Neal Cardwellecb97192012-02-27 17:52:52 -05001414/* Start sequence of the skb just after the highest skb with SACKed
1415 * bit, valid only if sacked_out > 0 or when the caller has ensured
1416 * validity by itself.
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001417 */
1418static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1419{
1420 if (!tp->sacked_out)
1421 return tp->snd_una;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001422
1423 if (tp->highest_sack == NULL)
1424 return tp->snd_nxt;
1425
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001426 return TCP_SKB_CB(tp->highest_sack)->seq;
1427}
1428
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001429static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1430{
1431 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1432 tcp_write_queue_next(sk, skb);
1433}
1434
1435static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1436{
1437 return tcp_sk(sk)->highest_sack;
1438}
1439
1440static inline void tcp_highest_sack_reset(struct sock *sk)
1441{
1442 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1443}
1444
1445/* Called when old skb is about to be deleted (to be combined with new skb) */
1446static inline void tcp_highest_sack_combine(struct sock *sk,
1447 struct sk_buff *old,
1448 struct sk_buff *new)
1449{
1450 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1451 tcp_sk(sk)->highest_sack = new;
1452}
1453
Andreas Petlund5aa4b322010-02-18 02:45:45 +00001454/* Determines whether this is a thin stream (which may suffer from
1455 * increased latency). Used to trigger latency-reducing mechanisms.
1456 */
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001457static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
Andreas Petlund5aa4b322010-02-18 02:45:45 +00001458{
1459 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1460}
1461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462/* /proc */
1463enum tcp_seq_states {
1464 TCP_SEQ_STATE_LISTENING,
1465 TCP_SEQ_STATE_OPENREQ,
1466 TCP_SEQ_STATE_ESTABLISHED,
1467 TCP_SEQ_STATE_TIME_WAIT,
1468};
1469
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001470int tcp_seq_open(struct inode *inode, struct file *file);
1471
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472struct tcp_seq_afinfo {
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001473 char *name;
1474 sa_family_t family;
1475 const struct file_operations *seq_fops;
1476 struct seq_operations seq_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477};
1478
1479struct tcp_iter_state {
Denis V. Luneva4146b12008-04-13 22:11:14 -07001480 struct seq_net_private p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 sa_family_t family;
1482 enum tcp_seq_states state;
1483 struct sock *syn_wait_sk;
Tom Herberta8b690f2010-06-07 00:43:42 -07001484 int bucket, offset, sbucket, num, uid;
1485 loff_t last_pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486};
1487
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001488extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1489extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001491extern struct request_sock_ops tcp_request_sock_ops;
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001492extern struct request_sock_ops tcp6_request_sock_ops;
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001493
Brian Haley7d06b2e2008-06-14 17:04:49 -07001494extern void tcp_v4_destroy_sock(struct sock *sk);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001495
Herbert Xua430a432006-07-08 13:34:56 -07001496extern int tcp_v4_gso_send_check(struct sk_buff *skb);
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001497extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
1498 netdev_features_t features);
Herbert Xubf296b12008-12-15 23:43:36 -08001499extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1500 struct sk_buff *skb);
1501extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1502 struct sk_buff *skb);
1503extern int tcp_gro_complete(struct sk_buff *skb);
1504extern int tcp4_gro_complete(struct sk_buff *skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07001505
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001506#ifdef CONFIG_PROC_FS
Changli Gao53d31762010-07-10 20:41:06 +00001507extern int tcp4_proc_init(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001508extern void tcp4_proc_exit(void);
1509#endif
1510
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001511/* TCP af-specific functions */
1512struct tcp_sock_af_ops {
1513#ifdef CONFIG_TCP_MD5SIG
1514 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1515 struct sock *addr_sk);
1516 int (*calc_md5_hash) (char *location,
1517 struct tcp_md5sig_key *md5,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001518 const struct sock *sk,
1519 const struct request_sock *req,
1520 const struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001521 int (*md5_parse) (struct sock *sk,
1522 char __user *optval,
1523 int optlen);
1524#endif
1525};
1526
1527struct tcp_request_sock_ops {
1528#ifdef CONFIG_TCP_MD5SIG
1529 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1530 struct request_sock *req);
John Dykstrae3afe7b2009-07-16 05:04:51 +00001531 int (*calc_md5_hash) (char *location,
1532 struct tcp_md5sig_key *md5,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001533 const struct sock *sk,
1534 const struct request_sock *req,
1535 const struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001536#endif
1537};
1538
William Allen Simpsonda5c78c2009-12-02 18:12:09 +00001539/* Using SHA1 for now, define some constants.
1540 */
1541#define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS)
1542#define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4)
1543#define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS)
1544
1545extern int tcp_cookie_generator(u32 *bakery);
1546
William Allen Simpson435cf552009-12-02 18:17:05 +00001547/**
1548 * struct tcp_cookie_values - each socket needs extra space for the
1549 * cookies, together with (optional) space for any SYN data.
1550 *
1551 * A tcp_sock contains a pointer to the current value, and this is
1552 * cloned to the tcp_timewait_sock.
1553 *
1554 * @cookie_pair: variable data from the option exchange.
1555 *
1556 * @cookie_desired: user specified tcpct_cookie_desired. Zero
1557 * indicates default (sysctl_tcp_cookie_size).
1558 * After cookie sent, remembers size of cookie.
1559 * Range 0, TCP_COOKIE_MIN to TCP_COOKIE_MAX.
1560 *
1561 * @s_data_desired: user specified tcpct_s_data_desired. When the
1562 * constant payload is specified (@s_data_constant),
1563 * holds its length instead.
1564 * Range 0 to TCP_MSS_DESIRED.
1565 *
1566 * @s_data_payload: constant data that is to be included in the
1567 * payload of SYN or SYNACK segments when the
1568 * cookie option is present.
1569 */
1570struct tcp_cookie_values {
1571 struct kref kref;
1572 u8 cookie_pair[TCP_COOKIE_PAIR_SIZE];
1573 u8 cookie_pair_size;
1574 u8 cookie_desired;
1575 u16 s_data_desired:11,
1576 s_data_constant:1,
1577 s_data_in:1,
1578 s_data_out:1,
1579 s_data_unused:2;
1580 u8 s_data_payload[0];
1581};
1582
1583static inline void tcp_cookie_values_release(struct kref *kref)
1584{
1585 kfree(container_of(kref, struct tcp_cookie_values, kref));
1586}
1587
1588/* The length of constant payload data. Note that s_data_desired is
1589 * overloaded, depending on s_data_constant: either the length of constant
1590 * data (returned here) or the limit on variable data.
1591 */
1592static inline int tcp_s_data_size(const struct tcp_sock *tp)
1593{
1594 return (tp->cookie_values != NULL && tp->cookie_values->s_data_constant)
1595 ? tp->cookie_values->s_data_desired
1596 : 0;
1597}
1598
1599/**
1600 * struct tcp_extend_values - tcp_ipv?.c to tcp_output.c workspace.
1601 *
1602 * As tcp_request_sock has already been extended in other places, the
1603 * only remaining method is to pass stack values along as function
1604 * parameters. These parameters are not needed after sending SYNACK.
1605 *
1606 * @cookie_bakery: cryptographic secret and message workspace.
1607 *
1608 * @cookie_plus: bytes in authenticator/cookie option, copied from
1609 * struct tcp_options_received (above).
1610 */
1611struct tcp_extend_values {
1612 struct request_values rv;
1613 u32 cookie_bakery[COOKIE_WORKSPACE_WORDS];
1614 u8 cookie_plus:6,
1615 cookie_out_never:1,
1616 cookie_in_always:1;
1617};
1618
1619static inline struct tcp_extend_values *tcp_xv(struct request_values *rvp)
1620{
1621 return (struct tcp_extend_values *)rvp;
1622}
1623
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08001624extern void tcp_v4_init(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001625extern void tcp_init(void);
1626
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627#endif /* _TCP_H */