blob: 0302636af98c40869b4703891c128ff87c196416 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#define FASTRETRANS_DEBUG 1
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/list.h>
24#include <linux/tcp.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050025#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
Herbert Xufb286bb2005-11-10 13:01:24 -080029#include <linux/skbuff.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080030#include <linux/crypto.h>
Glenn Griffinc6aefaf2008-02-07 21:49:26 -080031#include <linux/cryptohash.h>
William Allen Simpson435cf552009-12-02 18:17:05 +000032#include <linux/kref.h>
Eric Dumazet740b0f12014-02-26 14:02:48 -080033#include <linux/ktime.h>
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070034
35#include <net/inet_connection_sock.h>
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -070036#include <net/inet_timewait_sock.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070037#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <net/checksum.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070039#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <net/sock.h>
41#include <net/snmp.h>
42#include <net/ip.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070043#include <net/tcp_states.h>
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -070044#include <net/inet_ecn.h>
Satoru SATOH0c266892009-05-04 11:11:01 -070045#include <net/dst.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/seq_file.h>
Glauber Costa180d8cd2011-12-11 21:47:02 +000048#include <linux/memcontrol.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -070050extern struct inet_hashinfo tcp_hashinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Eric Dumazetdd24c002008-11-25 21:17:14 -080052extern struct percpu_counter tcp_orphan_count;
Joe Perches5c9f3022013-09-23 11:33:32 -070053void tcp_time_wait(struct sock *sk, int state, int timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#define MAX_TCP_HEADER (128 + MAX_HEADER)
Adam Langley33ad7982008-07-19 00:04:31 -070056#define MAX_TCP_OPTION_SPACE 40
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Kenjiro Nakayama105970f2014-10-20 18:15:50 +090058/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 * Never offer a window over 32767 without using window scaling. Some
Kenjiro Nakayama105970f2014-10-20 18:15:50 +090060 * poor stacks do signed 16bit maths!
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 */
62#define MAX_TCP_WINDOW 32767U
63
64/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
65#define TCP_MIN_MSS 88U
66
John Heffner5d424d52006-03-20 17:53:41 -080067/* The least MTU to use for probing */
Fan Dudcd8fb82015-03-06 11:18:22 +080068#define TCP_BASE_MSS 1024
John Heffner5d424d52006-03-20 17:53:41 -080069
Fan Du05cbc0d2015-03-06 11:18:24 +080070/* probing interval, default to 10 minutes as per RFC4821 */
71#define TCP_PROBE_INTERVAL 600
72
Fan Du6b58e0a2015-03-06 11:18:23 +080073/* Specify interval when tcp mtu probing will stop */
74#define TCP_PROBE_THRESHOLD 8
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076/* After receiving this amount of duplicate ACKs fast retransmit starts. */
77#define TCP_FASTRETRANS_THRESH 3
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079/* Maximal number of ACKs sent quickly to accelerate slow-start. */
80#define TCP_MAX_QUICKACKS 16U
81
82/* urg_data states */
83#define TCP_URG_VALID 0x0100
84#define TCP_URG_NOTYET 0x0200
85#define TCP_URG_READ 0x0400
86
87#define TCP_RETR1 3 /*
88 * This is how many retries it does before it
89 * tries to figure out if the gateway is
90 * down. Minimal RFC value is 3; it corresponds
91 * to ~3sec-8min depending on RTO.
92 */
93
94#define TCP_RETR2 15 /*
95 * This should take at least
96 * 90 minutes to time out.
97 * RFC1122 says that the limit is 100 sec.
98 * 15 is ~13-30min depending on RTO.
99 */
100
Alex Bergmann6c9ff972012-08-31 02:48:31 +0000101#define TCP_SYN_RETRIES 6 /* This is how many retries are done
102 * when active opening a connection.
103 * RFC1122 says the minimum retry MUST
104 * be at least 180secs. Nevertheless
105 * this value is corresponding to
106 * 63secs of retransmission with the
107 * current initial RTO.
108 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Alex Bergmann6c9ff972012-08-31 02:48:31 +0000110#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
111 * when passive opening a connection.
112 * This is corresponding to 31secs of
113 * retransmission with the current
114 * initial RTO.
115 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
118 * state, about 60 seconds */
119#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
120 /* BSD style FIN_WAIT2 deadlock breaker.
121 * It used to be 3min, new value is 60sec,
122 * to combine FIN-WAIT-2 timeout with
123 * TIME-WAIT timer.
124 */
125
126#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
127#if HZ >= 100
128#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
129#define TCP_ATO_MIN ((unsigned)(HZ/25))
130#else
131#define TCP_DELACK_MIN 4U
132#define TCP_ATO_MIN 4U
133#endif
134#define TCP_RTO_MAX ((unsigned)(120*HZ))
135#define TCP_RTO_MIN ((unsigned)(HZ/5))
Eric Dumazetfd4f2ce2012-04-12 19:48:40 +0000136#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
Jerry Chu9ad7c042011-06-08 11:08:38 +0000137#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
138 * used as a fallback RTO for the
139 * initial data transmission if no
140 * valid RTT sample has been acquired,
141 * most likely due to retrans in 3WHS.
142 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
145 * for local resources.
146 */
147
148#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
149#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
150#define TCP_KEEPALIVE_INTVL (75*HZ)
151
152#define MAX_TCP_KEEPIDLE 32767
153#define MAX_TCP_KEEPINTVL 32767
154#define MAX_TCP_KEEPCNT 127
155#define MAX_TCP_SYNCNT 127
156
157#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
160#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
161 * after this time. It should be equal
162 * (or greater than) TCP_TIMEWAIT_LEN
163 * to provide reliability equal to one
164 * provided by timewait state.
165 */
166#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
167 * timestamps. It must be less than
168 * minimal timewait lifetime.
169 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170/*
171 * TCP option
172 */
Kenjiro Nakayama105970f2014-10-20 18:15:50 +0900173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174#define TCPOPT_NOP 1 /* Padding */
175#define TCPOPT_EOL 0 /* End of options */
176#define TCPOPT_MSS 2 /* Segment size negotiating */
177#define TCPOPT_WINDOW 3 /* Window scaling */
178#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
179#define TCPOPT_SACK 5 /* SACK Block */
180#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800181#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
Daniel Lee7f9b8382015-04-06 14:37:26 -0700182#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
Yuchung Cheng2100c8d2012-07-19 06:43:05 +0000183#define TCPOPT_EXP 254 /* Experimental */
184/* Magic number to be after the option value for sharing TCP
185 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
186 */
187#define TCPOPT_FASTOPEN_MAGIC 0xF989
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189/*
190 * TCP option lengths
191 */
192
193#define TCPOLEN_MSS 4
194#define TCPOLEN_WINDOW 3
195#define TCPOLEN_SACK_PERM 2
196#define TCPOLEN_TIMESTAMP 10
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800197#define TCPOLEN_MD5SIG 18
Daniel Lee7f9b8382015-04-06 14:37:26 -0700198#define TCPOLEN_FASTOPEN_BASE 2
Yuchung Cheng2100c8d2012-07-19 06:43:05 +0000199#define TCPOLEN_EXP_FASTOPEN_BASE 4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
201/* But this is what stacks really send out. */
202#define TCPOLEN_TSTAMP_ALIGNED 12
203#define TCPOLEN_WSCALE_ALIGNED 4
204#define TCPOLEN_SACKPERM_ALIGNED 4
205#define TCPOLEN_SACK_BASE 2
206#define TCPOLEN_SACK_BASE_ALIGNED 4
207#define TCPOLEN_SACK_PERBLOCK 8
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800208#define TCPOLEN_MD5SIG_ALIGNED 20
Adam Langley33ad7982008-07-19 00:04:31 -0700209#define TCPOLEN_MSS_ALIGNED 4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211/* Flags in tp->nonagle */
212#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
213#define TCP_NAGLE_CORK 2 /* Socket is corked */
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800214#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Andreas Petlund36e31b0a2010-02-18 02:47:01 +0000216/* TCP thin-stream limits */
217#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
218
Jörg Thalheim21603fc2016-01-27 22:54:06 +0100219/* TCP initial congestion window as per rfc6928 */
David S. Miller442b9632011-02-02 17:05:11 -0800220#define TCP_INIT_CWND 10
221
Yuchung Chengcf60af02012-07-19 06:43:09 +0000222/* Bit Flags for sysctl_tcp_fastopen */
223#define TFO_CLIENT_ENABLE 1
Jerry Chu10467162012-08-31 12:29:11 +0000224#define TFO_SERVER_ENABLE 2
Yuchung Cheng67da22d2012-07-19 06:43:11 +0000225#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
Yuchung Chengcf60af02012-07-19 06:43:09 +0000226
Jerry Chu10467162012-08-31 12:29:11 +0000227/* Accept SYN data w/o any cookie option */
228#define TFO_SERVER_COOKIE_NOT_REQD 0x200
229
230/* Force enable TFO on all listeners, i.e., not requiring the
231 * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
232 */
233#define TFO_SERVER_WO_SOCKOPT1 0x400
234#define TFO_SERVER_WO_SOCKOPT2 0x800
Jerry Chu10467162012-08-31 12:29:11 +0000235
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700236extern struct inet_timewait_death_row tcp_death_row;
237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238/* sysctl variables for tcp */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239extern int sysctl_tcp_timestamps;
240extern int sysctl_tcp_window_scaling;
241extern int sysctl_tcp_sack;
Yuchung Cheng2100c8d2012-07-19 06:43:05 +0000242extern int sysctl_tcp_fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243extern int sysctl_tcp_retrans_collapse;
244extern int sysctl_tcp_stdurg;
245extern int sysctl_tcp_rfc1337;
246extern int sysctl_tcp_abort_on_overflow;
247extern int sysctl_tcp_max_orphans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248extern int sysctl_tcp_fack;
249extern int sysctl_tcp_reordering;
Eric Dumazetdca145f2014-10-27 21:45:24 -0700250extern int sysctl_tcp_max_reordering;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251extern int sysctl_tcp_dsack;
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -0700252extern long sysctl_tcp_mem[3];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253extern int sysctl_tcp_wmem[3];
254extern int sysctl_tcp_rmem[3];
255extern int sysctl_tcp_app_win;
256extern int sysctl_tcp_adv_win_scale;
257extern int sysctl_tcp_tw_reuse;
258extern int sysctl_tcp_frto;
259extern int sysctl_tcp_low_latency;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260extern int sysctl_tcp_nometrics_save;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261extern int sysctl_tcp_moderate_rcvbuf;
262extern int sysctl_tcp_tso_win_divisor;
Rick Jones15d99e02006-03-20 22:40:29 -0800263extern int sysctl_tcp_workaround_signed_windows;
David S. Miller35089bb2006-06-13 22:33:04 -0700264extern int sysctl_tcp_slow_start_after_idle;
Andreas Petlund36e31b0a2010-02-18 02:47:01 +0000265extern int sysctl_tcp_thin_linear_timeouts;
Andreas Petlund7e380172010-02-18 04:48:19 +0000266extern int sysctl_tcp_thin_dupack;
Yuchung Chengeed530b2012-05-02 13:30:03 +0000267extern int sysctl_tcp_early_retrans;
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000268extern int sysctl_tcp_limit_output_bytes;
Eric Dumazet282f23c2012-07-17 10:13:05 +0200269extern int sysctl_tcp_challenge_ack_limit;
Eric Dumazet95bd09e2013-08-27 05:46:32 -0700270extern int sysctl_tcp_min_tso_segs;
Yuchung Chengf6722582015-10-16 21:57:42 -0700271extern int sysctl_tcp_min_rtt_wlen;
Eric Dumazetf54b3112013-12-05 22:36:05 -0800272extern int sysctl_tcp_autocorking;
Neal Cardwell032ee422015-02-06 16:04:38 -0500273extern int sysctl_tcp_invalid_ratelimit;
Eric Dumazet43e122b2015-08-21 17:38:02 -0700274extern int sysctl_tcp_pacing_ss_ratio;
275extern int sysctl_tcp_pacing_ca_ratio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Eric Dumazet8d987e52010-11-09 23:24:26 +0000277extern atomic_long_t tcp_memory_allocated;
Eric Dumazet17483762008-11-25 21:16:35 -0800278extern struct percpu_counter tcp_sockets_allocated;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279extern int tcp_memory_pressure;
280
Eric Dumazetb8da51e2015-05-15 12:39:27 -0700281/* optimized version of sk_under_memory_pressure() for TCP sockets */
282static inline bool tcp_under_memory_pressure(const struct sock *sk)
283{
Johannes Weinerbaac50b2016-01-14 15:21:17 -0800284 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
285 mem_cgroup_under_socket_pressure(sk->sk_memcg))
Johannes Weinere8056052016-01-14 15:21:14 -0800286 return true;
Eric Dumazetb8da51e2015-05-15 12:39:27 -0700287
288 return tcp_memory_pressure;
289}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 * The next routines deal with comparing 32 bit unsigned ints
292 * and worry about wraparound (automatic with unsigned arithmetic).
293 */
294
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000295static inline bool before(__u32 seq1, __u32 seq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296{
Gerrit Renker0d630cc2007-01-04 12:25:16 -0800297 return (__s32)(seq1-seq2) < 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
Gerrit Renker9a036b9c2006-12-20 10:25:55 -0800299#define after(seq2, seq1) before(seq1, seq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
301/* is s2<=s1<=s3 ? */
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000302static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303{
304 return seq3 - seq2 >= seq1 - seq2;
305}
306
Arun Sharmaefcdbf22012-01-30 14:16:06 -0800307static inline bool tcp_out_of_memory(struct sock *sk)
308{
309 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
310 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
311 return true;
312 return false;
313}
314
Eric Dumazeta6c5ea42015-05-15 12:39:26 -0700315void sk_forced_mem_schedule(struct sock *sk, int size);
316
David S. Millerad1af0f2010-08-25 02:27:49 -0700317static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700318{
David S. Millerad1af0f2010-08-25 02:27:49 -0700319 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
320 int orphans = percpu_counter_read_positive(ocp);
321
322 if (orphans << shift > sysctl_tcp_max_orphans) {
323 orphans = percpu_counter_sum_positive(ocp);
324 if (orphans << shift > sysctl_tcp_max_orphans)
325 return true;
326 }
David S. Millerad1af0f2010-08-25 02:27:49 -0700327 return false;
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700328}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
Joe Perches5c9f3022013-09-23 11:33:32 -0700330bool tcp_check_oom(struct sock *sk, int shift);
Arun Sharmaefcdbf22012-01-30 14:16:06 -0800331
Florian Westphala0f82f62009-04-19 09:43:48 +0000332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333extern struct proto tcp_prot;
334
Pavel Emelyanov57ef42d2008-07-18 04:02:08 -0700335#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
336#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
337#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
338#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
Tom Herbertaa2ea052010-04-22 07:00:24 +0000339#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Joe Perches5c9f3022013-09-23 11:33:32 -0700341void tcp_tasklet_init(void);
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000342
Joe Perches5c9f3022013-09-23 11:33:32 -0700343void tcp_v4_err(struct sk_buff *skb, u32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Joe Perches5c9f3022013-09-23 11:33:32 -0700345void tcp_shutdown(struct sock *sk, int how);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
Joe Perches5c9f3022013-09-23 11:33:32 -0700347void tcp_v4_early_demux(struct sk_buff *skb);
348int tcp_v4_rcv(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
Joe Perches5c9f3022013-09-23 11:33:32 -0700350int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
Ying Xue1b784142015-03-02 15:37:48 +0800351int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
Joe Perches5c9f3022013-09-23 11:33:32 -0700352int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
353 int flags);
354void tcp_release_cb(struct sock *sk);
355void tcp_wfree(struct sk_buff *skb);
356void tcp_write_timer_handler(struct sock *sk);
357void tcp_delack_timer_handler(struct sock *sk);
358int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
Eric Dumazet72ab4a82015-09-29 07:42:41 -0700359int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
Joe Perches5c9f3022013-09-23 11:33:32 -0700360void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
361 const struct tcphdr *th, unsigned int len);
362void tcp_rcv_space_adjust(struct sock *sk);
Joe Perches5c9f3022013-09-23 11:33:32 -0700363int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
364void tcp_twsk_destructor(struct sock *sk);
365ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
366 struct pipe_inode_info *pipe, size_t len,
367 unsigned int flags);
Jens Axboe9c55e012007-11-06 23:30:13 -0800368
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700369static inline void tcp_dec_quickack_mode(struct sock *sk,
370 const unsigned int pkts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700372 struct inet_connection_sock *icsk = inet_csk(sk);
David S. Millerfc6415bc2005-07-05 15:17:45 -0700373
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700374 if (icsk->icsk_ack.quick) {
375 if (pkts >= icsk->icsk_ack.quick) {
376 icsk->icsk_ack.quick = 0;
David S. Millerfc6415bc2005-07-05 15:17:45 -0700377 /* Leaving quickack mode we deflate ATO. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700378 icsk->icsk_ack.ato = TCP_ATO_MIN;
David S. Millerfc6415bc2005-07-05 15:17:45 -0700379 } else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700380 icsk->icsk_ack.quick -= pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 }
382}
383
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700384#define TCP_ECN_OK 1
385#define TCP_ECN_QUEUE_CWR 2
386#define TCP_ECN_DEMAND_CWR 4
Eric Dumazet7a269ff2011-09-22 20:02:19 +0000387#define TCP_ECN_SEEN 8
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700388
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000389enum tcp_tw_status {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 TCP_TW_SUCCESS = 0,
391 TCP_TW_RST = 1,
392 TCP_TW_ACK = 2,
393 TCP_TW_SYN = 3
394};
395
396
Joe Perches5c9f3022013-09-23 11:33:32 -0700397enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
398 struct sk_buff *skb,
399 const struct tcphdr *th);
400struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
Eric Dumazet52452c52015-03-19 19:04:19 -0700401 struct request_sock *req, bool fastopen);
Joe Perches5c9f3022013-09-23 11:33:32 -0700402int tcp_child_process(struct sock *parent, struct sock *child,
403 struct sk_buff *skb);
Neal Cardwell5ae344c2014-08-04 19:12:29 -0400404void tcp_enter_loss(struct sock *sk);
Joe Perches5c9f3022013-09-23 11:33:32 -0700405void tcp_clear_retrans(struct tcp_sock *tp);
406void tcp_update_metrics(struct sock *sk);
407void tcp_init_metrics(struct sock *sk);
408void tcp_metrics_init(void);
409bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
Hannes Frederic Sowaa26552a2014-08-14 22:06:12 +0200410 bool paws_check, bool timestamps);
Joe Perches5c9f3022013-09-23 11:33:32 -0700411bool tcp_remember_stamp(struct sock *sk);
412bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
413void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
414void tcp_disable_fack(struct tcp_sock *tp);
415void tcp_close(struct sock *sk, long timeout);
416void tcp_init_sock(struct sock *sk);
417unsigned int tcp_poll(struct file *file, struct socket *sock,
418 struct poll_table_struct *wait);
419int tcp_getsockopt(struct sock *sk, int level, int optname,
420 char __user *optval, int __user *optlen);
421int tcp_setsockopt(struct sock *sk, int level, int optname,
422 char __user *optval, unsigned int optlen);
423int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
Changli Gao53d31762010-07-10 20:41:06 +0000424 char __user *optval, int __user *optlen);
Joe Perches5c9f3022013-09-23 11:33:32 -0700425int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
Changli Gao53d31762010-07-10 20:41:06 +0000426 char __user *optval, unsigned int optlen);
Joe Perches5c9f3022013-09-23 11:33:32 -0700427void tcp_set_keepalive(struct sock *sk, int val);
Eric Dumazet42cb80a2015-03-22 10:22:19 -0700428void tcp_syn_ack_timeout(const struct request_sock *req);
Ying Xue1b784142015-03-02 15:37:48 +0800429int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
430 int flags, int *addr_len);
Joe Perches5c9f3022013-09-23 11:33:32 -0700431void tcp_parse_options(const struct sk_buff *skb,
432 struct tcp_options_received *opt_rx,
433 int estab, struct tcp_fastopen_cookie *foc);
434const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900435
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436/*
437 * TCP v4 functions exported for the inet6 API
438 */
439
Joe Perches5c9f3022013-09-23 11:33:32 -0700440void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
Neal Cardwell4fab9072014-08-14 12:40:05 -0400441void tcp_v4_mtu_reduced(struct sock *sk);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800442void tcp_req_err(struct sock *sk, u32 seq, bool abort);
Joe Perches5c9f3022013-09-23 11:33:32 -0700443int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
Eric Dumazetc28c6f02015-09-29 07:42:47 -0700444struct sock *tcp_create_openreq_child(const struct sock *sk,
Joe Perches5c9f3022013-09-23 11:33:32 -0700445 struct request_sock *req,
446 struct sk_buff *skb);
Daniel Borkmann81164412015-01-05 23:57:48 +0100447void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
Eric Dumazet0c271712015-09-29 07:42:48 -0700448struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Joe Perches5c9f3022013-09-23 11:33:32 -0700449 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700450 struct dst_entry *dst,
451 struct request_sock *req_unhash,
452 bool *own_req);
Joe Perches5c9f3022013-09-23 11:33:32 -0700453int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
454int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
455int tcp_connect(struct sock *sk);
Eric Dumazet5d062de2015-09-25 07:39:19 -0700456struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
Joe Perches5c9f3022013-09-23 11:33:32 -0700457 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700458 struct tcp_fastopen_cookie *foc,
459 bool attach_req);
Joe Perches5c9f3022013-09-23 11:33:32 -0700460int tcp_disconnect(struct sock *sk, int flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
Pavel Emelyanov370816a2012-04-19 03:40:01 +0000462void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
Pavel Emelyanov292e8d82012-05-10 01:49:41 +0000463int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
Eric Dumazet63d02d12012-08-09 14:11:00 +0000464void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466/* From syncookies.c */
Eric Dumazetb80c0e72015-06-04 18:30:43 -0700467struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
468 struct request_sock *req,
469 struct dst_entry *dst);
Joe Perches5c9f3022013-09-23 11:33:32 -0700470int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
471 u32 cookie);
Cong Wang461b74c2014-10-15 14:33:22 -0700472struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400473#ifdef CONFIG_SYN_COOKIES
Florian Westphal8c27bd72013-09-20 22:32:55 +0200474
Eric Dumazet63262312014-03-19 21:02:21 -0700475/* Syncookies use a monotonic timer which increments every 60 seconds.
Florian Westphal8c27bd72013-09-20 22:32:55 +0200476 * This counter is used both as a hash input and partially encoded into
477 * the cookie value. A cookie is only validated further if the delta
478 * between the current counter value and the encoded one is less than this,
Eric Dumazet63262312014-03-19 21:02:21 -0700479 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
Florian Westphal8c27bd72013-09-20 22:32:55 +0200480 * the counter advances immediately after a cookie is generated).
481 */
Eric Dumazet264ea102015-05-14 14:26:56 -0700482#define MAX_SYNCOOKIE_AGE 2
483#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
484#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
485
486/* syncookies: remember time of last synqueue overflow
487 * But do not dirty this field too often (once per second is enough)
Eric Dumazet3f684b42015-09-29 07:42:49 -0700488 * It is racy as we do not hold a lock, but race is very minor.
Eric Dumazet264ea102015-05-14 14:26:56 -0700489 */
Eric Dumazet3f684b42015-09-29 07:42:49 -0700490static inline void tcp_synq_overflow(const struct sock *sk)
Eric Dumazet264ea102015-05-14 14:26:56 -0700491{
492 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
493 unsigned long now = jiffies;
494
495 if (time_after(now, last_overflow + HZ))
496 tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
497}
498
499/* syncookies: no recent synqueue overflow on this listening socket? */
500static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
501{
502 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
503
504 return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
505}
Florian Westphal8c27bd72013-09-20 22:32:55 +0200506
507static inline u32 tcp_cookie_time(void)
508{
Eric Dumazet63262312014-03-19 21:02:21 -0700509 u64 val = get_jiffies_64();
510
Eric Dumazet264ea102015-05-14 14:26:56 -0700511 do_div(val, TCP_SYNCOOKIE_PERIOD);
Eric Dumazet63262312014-03-19 21:02:21 -0700512 return val;
Florian Westphal8c27bd72013-09-20 22:32:55 +0200513}
514
Joe Perches5c9f3022013-09-23 11:33:32 -0700515u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
516 u16 *mssp);
Eric Dumazet3f684b42015-09-29 07:42:49 -0700517__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
Joe Perches5c9f3022013-09-23 11:33:32 -0700518__u32 cookie_init_timestamp(struct request_sock *req);
Florian Westphalf1673382014-11-03 17:35:02 +0100519bool cookie_timestamp_decode(struct tcp_options_received *opt);
520bool cookie_ecn_ok(const struct tcp_options_received *opt,
Florian Westphalf7b3bec2014-11-03 17:35:03 +0100521 const struct net *net, const struct dst_entry *dst);
Florian Westphal4dfc2812008-04-10 03:12:40 -0700522
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800523/* From net/ipv6/syncookies.c */
Joe Perches5c9f3022013-09-23 11:33:32 -0700524int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
525 u32 cookie);
526struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
Florian Westphalf1673382014-11-03 17:35:02 +0100527
Joe Perches5c9f3022013-09-23 11:33:32 -0700528u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
529 const struct tcphdr *th, u16 *mssp);
Eric Dumazet3f684b42015-09-29 07:42:49 -0700530__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400531#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532/* tcp_output.c */
533
Joe Perches5c9f3022013-09-23 11:33:32 -0700534void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
535 int nonagle);
536bool tcp_may_send_now(struct sock *sk);
537int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
538int tcp_retransmit_skb(struct sock *, struct sk_buff *);
539void tcp_retransmit_timer(struct sock *sk);
540void tcp_xmit_retransmit_queue(struct sock *);
541void tcp_simple_retransmit(struct sock *);
542int tcp_trim_head(struct sock *, struct sk_buff *, u32);
Octavian Purdila6cc55e02014-06-06 17:32:37 +0300543int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
Joe Perches5c9f3022013-09-23 11:33:32 -0700545void tcp_send_probe0(struct sock *);
546void tcp_send_partial(struct sock *);
Eric Dumazete520af42015-05-06 14:26:25 -0700547int tcp_write_wakeup(struct sock *, int mib);
Joe Perches5c9f3022013-09-23 11:33:32 -0700548void tcp_send_fin(struct sock *sk);
549void tcp_send_active_reset(struct sock *sk, gfp_t priority);
550int tcp_send_synack(struct sock *);
Joe Perches5c9f3022013-09-23 11:33:32 -0700551void tcp_push_one(struct sock *, unsigned int mss_now);
552void tcp_send_ack(struct sock *sk);
553void tcp_send_delayed_ack(struct sock *sk);
554void tcp_send_loss_probe(struct sock *sk);
555bool tcp_schedule_loss_probe(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
David S. Millera762a982005-07-05 15:18:51 -0700557/* tcp_input.c */
Joe Perches5c9f3022013-09-23 11:33:32 -0700558void tcp_resume_early_retransmit(struct sock *sk);
559void tcp_rearm_rto(struct sock *sk);
Yuchung Cheng0f1c28a2015-09-18 11:36:14 -0700560void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
Joe Perches5c9f3022013-09-23 11:33:32 -0700561void tcp_reset(struct sock *sk);
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -0700562void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
Eric Dumazete3e17b72016-02-06 11:16:28 -0800563void tcp_fin(struct sock *sk);
David S. Millera762a982005-07-05 15:18:51 -0700564
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565/* tcp_timer.c */
Joe Perches5c9f3022013-09-23 11:33:32 -0700566void tcp_init_xmit_timers(struct sock *);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700567static inline void tcp_clear_xmit_timers(struct sock *sk)
568{
569 inet_csk_clear_xmit_timers(sk);
570}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
Joe Perches5c9f3022013-09-23 11:33:32 -0700572unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
573unsigned int tcp_current_mss(struct sock *sk);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000574
575/* Bound MSS / TSO packet size with the half of the window */
576static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
577{
Alexey Kuznetsov01f83d62010-09-15 10:27:52 -0700578 int cutoff;
579
580 /* When peer uses tiny windows, there is no use in packetizing
581 * to sub-MSS pieces for the sake of SWS or making sure there
582 * are enough packets in the pipe for fast recovery.
583 *
584 * On the other hand, for extremely large MSS devices, handling
585 * smaller than MSS windows in this way does make sense.
586 */
587 if (tp->max_window >= 512)
588 cutoff = (tp->max_window >> 1);
589 else
590 cutoff = tp->max_window;
591
592 if (cutoff && pktsize > cutoff)
593 return max_t(int, cutoff, 68U - tp->tcp_header_len);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000594 else
595 return pktsize;
596}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Arnaldo Carvalho de Melo17b085e2005-08-12 12:59:17 -0300598/* tcp.c */
Eric Dumazet0df48c22015-04-28 15:28:17 -0700599void tcp_get_info(struct sock *, struct tcp_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601/* Read 'sendfile()'-style from a TCP socket */
602typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
603 unsigned int, size_t);
Joe Perches5c9f3022013-09-23 11:33:32 -0700604int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
605 sk_read_actor_t recv_actor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Joe Perches5c9f3022013-09-23 11:33:32 -0700607void tcp_initialize_rcv_mss(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
Joe Perches5c9f3022013-09-23 11:33:32 -0700609int tcp_mtu_to_mss(struct sock *sk, int pmtu);
610int tcp_mss_to_mtu(struct sock *sk, int mss);
611void tcp_mtup_init(struct sock *sk);
612void tcp_init_buffer_space(struct sock *sk);
John Heffner5d424d52006-03-20 17:53:41 -0800613
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000614static inline void tcp_bound_rto(const struct sock *sk)
615{
616 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
617 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
618}
619
620static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
621{
Eric Dumazet740b0f12014-02-26 14:02:48 -0800622 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000623}
624
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800625static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626{
627 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
628 ntohl(TCP_FLAG_ACK) |
629 snd_wnd);
630}
631
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800632static inline void tcp_fast_path_on(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633{
634 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
635}
636
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700637static inline void tcp_fast_path_check(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700639 struct tcp_sock *tp = tcp_sk(sk);
640
David S. Millerb03efcf2005-07-08 14:57:23 -0700641 if (skb_queue_empty(&tp->out_of_order_queue) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 tp->rcv_wnd &&
643 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
644 !tp->urg_data)
645 tcp_fast_path_on(tp);
646}
647
Satoru SATOH0c266892009-05-04 11:11:01 -0700648/* Compute the actual rto_min value */
649static inline u32 tcp_rto_min(struct sock *sk)
650{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400651 const struct dst_entry *dst = __sk_dst_get(sk);
Satoru SATOH0c266892009-05-04 11:11:01 -0700652 u32 rto_min = TCP_RTO_MIN;
653
654 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
655 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
656 return rto_min;
657}
658
Eric Dumazet740b0f12014-02-26 14:02:48 -0800659static inline u32 tcp_rto_min_us(struct sock *sk)
660{
661 return jiffies_to_usecs(tcp_rto_min(sk));
662}
663
Daniel Borkmann81164412015-01-05 23:57:48 +0100664static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
665{
666 return dst_metric_locked(dst, RTAX_CC_ALGO);
667}
668
Yuchung Chengf6722582015-10-16 21:57:42 -0700669/* Minimum RTT in usec. ~0 means not available. */
670static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
671{
672 return tp->rtt_min[0].rtt;
673}
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675/* Compute the actual receive window we are currently advertising.
676 * Rcv_nxt can be after the window if our peer push more data
677 * than the offered window.
678 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800679static inline u32 tcp_receive_window(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680{
681 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
682
683 if (win < 0)
684 win = 0;
685 return (u32) win;
686}
687
688/* Choose a new window, without checks for shrinking, and without
689 * scaling applied to the result. The caller does these things
690 * if necessary. This is a "raw" window selection.
691 */
Joe Perches5c9f3022013-09-23 11:33:32 -0700692u32 __tcp_select_window(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Pavel Emelyanovee995282012-04-19 03:40:39 +0000694void tcp_send_window_probe(struct sock *sk);
695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696/* TCP timestamps are only 32-bits, this causes a slight
697 * complication on 64-bit systems since we store a snapshot
Stephen Hemminger31f34262005-11-15 15:17:10 -0800698 * of jiffies in the buffer control blocks below. We decided
699 * to use only the low 32-bits of jiffies and hide the ugly
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 * casts with the following macro.
701 */
702#define tcp_time_stamp ((__u32)(jiffies))
703
Eric Dumazet7faee5c2014-09-05 15:33:33 -0700704static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
705{
706 return skb->skb_mstamp.stamp_jiffies;
707}
708
709
Changli Gaoa3433f32010-06-12 14:01:43 +0000710#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
711
712#define TCPHDR_FIN 0x01
713#define TCPHDR_SYN 0x02
714#define TCPHDR_RST 0x04
715#define TCPHDR_PSH 0x08
716#define TCPHDR_ACK 0x10
717#define TCPHDR_URG 0x20
718#define TCPHDR_ECE 0x40
719#define TCPHDR_CWR 0x80
720
Daniel Borkmann49213552015-05-19 21:04:22 +0200721#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
722
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800723/* This is what the send packet queuing engine uses to pass
Eric Dumazetf86586f2010-07-15 21:41:00 -0700724 * TCP per-packet control information to the transmission code.
725 * We also store the host-order sequence numbers in here too.
726 * This is 44 bytes if IPV6 is enabled.
727 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 */
729struct tcp_skb_cb {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 __u32 seq; /* Starting sequence number */
731 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
Eric Dumazetcd7d8492014-09-24 04:11:22 -0700732 union {
733 /* Note : tcp_tw_isn is used in input path only
734 * (isn chosen by tcp_timewait_state_process())
735 *
Eric Dumazetf69ad292015-06-11 09:15:18 -0700736 * tcp_gso_segs/size are used in write queue only,
737 * cf tcp_skb_pcount()/tcp_skb_mss()
Eric Dumazetcd7d8492014-09-24 04:11:22 -0700738 */
739 __u32 tcp_tw_isn;
Eric Dumazetf69ad292015-06-11 09:15:18 -0700740 struct {
741 u16 tcp_gso_segs;
742 u16 tcp_gso_size;
743 };
Eric Dumazetcd7d8492014-09-24 04:11:22 -0700744 };
Eric Dumazet4de075e2011-09-27 13:25:05 -0400745 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
Neal Cardwellf4f9f6e2012-04-16 07:08:06 +0000746
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 __u8 sacked; /* State flags for SACK/FACK. */
748#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
749#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
750#define TCPCB_LOST 0x04 /* SKB is lost */
751#define TCPCB_TAGBITS 0x07 /* All tag bits */
Andrey Vagin9d186ca2014-08-13 16:03:10 +0400752#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
Andrey Vagin9d186ca2014-08-13 16:03:10 +0400754#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
755 TCPCB_REPAIRED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
Neal Cardwellf4f9f6e2012-04-16 07:08:06 +0000757 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
758 /* 1 byte hole */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 __u32 ack_seq; /* Sequence number ACK'd */
Eric Dumazet971f10e2014-09-27 09:50:57 -0700760 union {
761 struct inet_skb_parm h4;
762#if IS_ENABLED(CONFIG_IPV6)
763 struct inet6_skb_parm h6;
764#endif
765 } header; /* For incoming frames */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766};
767
768#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
769
Eric Dumazet870c3152014-10-17 09:17:20 -0700770
Eric Dumazet815afe12014-10-18 08:34:37 -0700771#if IS_ENABLED(CONFIG_IPV6)
Eric Dumazet870c3152014-10-17 09:17:20 -0700772/* This is the variant of inet6_iif() that must be used by TCP,
773 * as TCP moves IP6CB into a different location in skb->cb[]
774 */
775static inline int tcp_v6_iif(const struct sk_buff *skb)
776{
777 return TCP_SKB_CB(skb)->header.h6.iif;
778}
Eric Dumazet815afe12014-10-18 08:34:37 -0700779#endif
Eric Dumazet870c3152014-10-17 09:17:20 -0700780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781/* Due to TSO, an SKB can be composed of multiple actual
782 * packets. To keep these tracked properly, we use this.
783 */
784static inline int tcp_skb_pcount(const struct sk_buff *skb)
785{
Eric Dumazetcd7d8492014-09-24 04:11:22 -0700786 return TCP_SKB_CB(skb)->tcp_gso_segs;
787}
788
789static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
790{
791 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
792}
793
794static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
795{
796 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797}
798
Eric Dumazetf69ad292015-06-11 09:15:18 -0700799/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800static inline int tcp_skb_mss(const struct sk_buff *skb)
801{
Eric Dumazetf69ad292015-06-11 09:15:18 -0700802 return TCP_SKB_CB(skb)->tcp_gso_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803}
804
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700805/* Events passed to congestion control interface */
806enum tcp_ca_event {
807 CA_EVENT_TX_START, /* first transmit when no packets in flight */
808 CA_EVENT_CWND_RESTART, /* congestion window restart */
809 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700810 CA_EVENT_LOSS, /* loss timeout */
Florian Westphal98900922014-09-26 22:37:35 +0200811 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
812 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
813 CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
814 CA_EVENT_NON_DELAYED_ACK,
Florian Westphal7354c8c2014-09-26 22:37:34 +0200815};
816
Florian Westphal98900922014-09-26 22:37:35 +0200817/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
Florian Westphal7354c8c2014-09-26 22:37:34 +0200818enum tcp_ca_ack_event_flags {
Florian Westphal98900922014-09-26 22:37:35 +0200819 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
820 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
821 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700822};
823
824/*
825 * Interface for adding new TCP congestion control handlers
826 */
827#define TCP_CA_NAME_MAX 16
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800828#define TCP_CA_MAX 128
829#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
830
Daniel Borkmannc5c6a8a2015-01-05 23:57:46 +0100831#define TCP_CA_UNSPEC 0
832
Daniel Borkmann30e502a2014-09-26 22:37:33 +0200833/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
Stephen Hemminger164891a2007-04-23 22:26:16 -0700834#define TCP_CONG_NON_RESTRICTED 0x1
Daniel Borkmann30e502a2014-09-26 22:37:33 +0200835/* Requires ECN/ECT set on all packets */
836#define TCP_CONG_NEEDS_ECN 0x2
Stephen Hemminger164891a2007-04-23 22:26:16 -0700837
Eric Dumazet64f40ff2015-04-28 16:23:48 -0700838union tcp_cc_info;
839
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700840struct tcp_congestion_ops {
841 struct list_head list;
Daniel Borkmannc5c6a8a2015-01-05 23:57:46 +0100842 u32 key;
843 u32 flags;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700844
845 /* initialize private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300846 void (*init)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700847 /* cleanup private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300848 void (*release)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700849
850 /* return slow start threshold (required) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300851 u32 (*ssthresh)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700852 /* do new cwnd calculation (required) */
Eric Dumazet24901552014-05-02 21:18:05 -0700853 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700854 /* call before changing ca_state (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300855 void (*set_state)(struct sock *sk, u8 new_state);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700856 /* call when cwnd event occurs (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300857 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
Florian Westphal7354c8c2014-09-26 22:37:34 +0200858 /* call when ack arrives (optional) */
859 void (*in_ack_event)(struct sock *sk, u32 flags);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700860 /* new value of cwnd after loss (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300861 u32 (*undo_cwnd)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700862 /* hook for packet ack accounting (optional) */
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -0700863 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
Arnaldo Carvalho de Melo73c1f4a2005-08-12 12:51:49 -0300864 /* get info for inet_diag (optional) */
Eric Dumazet64f40ff2015-04-28 16:23:48 -0700865 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
866 union tcp_cc_info *info);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700867
868 char name[TCP_CA_NAME_MAX];
869 struct module *owner;
870};
871
Joe Perches5c9f3022013-09-23 11:33:32 -0700872int tcp_register_congestion_control(struct tcp_congestion_ops *type);
873void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700874
Florian Westphal55d86942014-09-26 22:37:32 +0200875void tcp_assign_congestion_control(struct sock *sk);
Joe Perches5c9f3022013-09-23 11:33:32 -0700876void tcp_init_congestion_control(struct sock *sk);
877void tcp_cleanup_congestion_control(struct sock *sk);
878int tcp_set_default_congestion_control(const char *name);
879void tcp_get_default_congestion_control(char *name);
880void tcp_get_available_congestion_control(char *buf, size_t len);
881void tcp_get_allowed_congestion_control(char *buf, size_t len);
882int tcp_set_allowed_congestion_control(char *allowed);
883int tcp_set_congestion_control(struct sock *sk, const char *name);
Neal Cardwelle73ebb02015-01-28 20:01:35 -0500884u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
885void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700886
Joe Perches5c9f3022013-09-23 11:33:32 -0700887u32 tcp_reno_ssthresh(struct sock *sk);
Eric Dumazet24901552014-05-02 21:18:05 -0700888void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
David S. Millera8acfba2005-06-23 23:45:02 -0700889extern struct tcp_congestion_ops tcp_reno;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700890
Daniel Borkmannc5c6a8a2015-01-05 23:57:46 +0100891struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
Daniel Borkmannc3a8d942015-08-31 15:58:47 +0200892u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
Daniel Borkmannea697632015-01-05 23:57:47 +0100893#ifdef CONFIG_INET
Daniel Borkmannc5c6a8a2015-01-05 23:57:46 +0100894char *tcp_ca_get_name_by_key(u32 key, char *buffer);
Daniel Borkmannea697632015-01-05 23:57:47 +0100895#else
896static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
897{
898 return NULL;
899}
900#endif
Daniel Borkmannc5c6a8a2015-01-05 23:57:46 +0100901
Daniel Borkmann30e502a2014-09-26 22:37:33 +0200902static inline bool tcp_ca_needs_ecn(const struct sock *sk)
903{
904 const struct inet_connection_sock *icsk = inet_csk(sk);
905
906 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
907}
908
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300909static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700910{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300911 struct inet_connection_sock *icsk = inet_csk(sk);
912
913 if (icsk->icsk_ca_ops->set_state)
914 icsk->icsk_ca_ops->set_state(sk, ca_state);
915 icsk->icsk_ca_state = ca_state;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700916}
917
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300918static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700919{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300920 const struct inet_connection_sock *icsk = inet_csk(sk);
921
922 if (icsk->icsk_ca_ops->cwnd_event)
923 icsk->icsk_ca_ops->cwnd_event(sk, event);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700924}
925
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300926/* These functions determine how the current flow behaves in respect of SACK
927 * handling. SACK is negotiated with the peer, and therefore it can vary
928 * between different flows.
929 *
930 * tcp_is_sack - SACK enabled
931 * tcp_is_reno - No SACK
932 * tcp_is_fack - FACK enabled, implies SACK enabled
933 */
934static inline int tcp_is_sack(const struct tcp_sock *tp)
935{
936 return tp->rx_opt.sack_ok;
937}
938
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000939static inline bool tcp_is_reno(const struct tcp_sock *tp)
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300940{
941 return !tcp_is_sack(tp);
942}
943
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000944static inline bool tcp_is_fack(const struct tcp_sock *tp)
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300945{
Vijay Subramanianab562222011-12-20 13:23:24 +0000946 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300947}
948
949static inline void tcp_enable_fack(struct tcp_sock *tp)
950{
Vijay Subramanianab562222011-12-20 13:23:24 +0000951 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300952}
953
Yuchung Chengeed530b2012-05-02 13:30:03 +0000954/* TCP early-retransmit (ER) is similar to but more conservative than
955 * the thin-dupack feature. Enable ER only if thin-dupack is disabled.
956 */
957static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
958{
Nikolay Borisov1043e252016-02-03 09:46:52 +0200959 struct net *net = sock_net((struct sock *)tp);
960
Yuchung Chengeed530b2012-05-02 13:30:03 +0000961 tp->do_early_retrans = sysctl_tcp_early_retrans &&
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +0000962 sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
Nikolay Borisov1043e252016-02-03 09:46:52 +0200963 net->ipv4.sysctl_tcp_reordering == 3;
Yuchung Chengeed530b2012-05-02 13:30:03 +0000964}
965
966static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
967{
968 tp->do_early_retrans = 0;
969}
970
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300971static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
972{
973 return tp->sacked_out + tp->lost_out;
974}
975
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976/* This determines how many packets are "in the network" to the best
977 * of our knowledge. In many cases it is conservative, but where
978 * detailed information is available from the receiver (via SACK
979 * blocks etc.) we can make more aggressive calculations.
980 *
981 * Use this for decisions involving congestion control, use just
982 * tp->packets_out to determine if the send queue is empty or not.
983 *
984 * Read this equation as:
985 *
986 * "Packets sent once on transmission queue" MINUS
987 * "Packets left network, but not honestly ACKed yet" PLUS
988 * "Packets fast retransmitted"
989 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800990static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991{
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300992 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993}
994
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -0700995#define TCP_INFINITE_SSTHRESH 0x7fffffff
996
Yuchung Cheng071d5082015-07-09 13:16:29 -0700997static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
998{
Yuchung Cheng76174002015-07-09 13:16:30 -0700999 return tp->snd_cwnd < tp->snd_ssthresh;
Yuchung Cheng071d5082015-07-09 13:16:29 -07001000}
1001
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07001002static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1003{
1004 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1005}
1006
Yuchung Cheng684bad12012-09-02 17:38:04 +00001007static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1008{
1009 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1010 (1 << inet_csk(sk)->icsk_ca_state);
1011}
1012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
Yuchung Cheng684bad12012-09-02 17:38:04 +00001014 * The exception is cwnd reduction phase, when cwnd is decreasing towards
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 * ssthresh.
1016 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001017static inline __u32 tcp_current_ssthresh(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001019 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001020
Yuchung Cheng684bad12012-09-02 17:38:04 +00001021 if (tcp_in_cwnd_reduction(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 return tp->snd_ssthresh;
1023 else
1024 return max(tp->snd_ssthresh,
1025 ((tp->snd_cwnd >> 1) +
1026 (tp->snd_cwnd >> 2)));
1027}
1028
Ilpo Järvinenb9c45952007-07-27 16:36:17 +03001029/* Use define here intentionally to get WARN_ON location shown at the caller */
1030#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031
Christoph Paasch5ee2c942014-07-14 16:58:32 +02001032void tcp_enter_cwr(struct sock *sk);
Joe Perches5c9f3022013-09-23 11:33:32 -07001033__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
Neal Cardwell6b5a5c02011-11-21 17:15:14 +00001035/* The maximum number of MSS of available cwnd for which TSO defers
1036 * sending if not using sysctl_tcp_tso_win_divisor.
1037 */
1038static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1039{
1040 return 3;
1041}
1042
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043/* Slow start with delack produces 3 packets of burst, so that
John Heffnerdd9e0dd2008-04-15 15:26:39 -07001044 * it is safe "de facto". This will be the default - same as
1045 * the default reordering threshold - but if reordering increases,
1046 * we must be able to allow cwnd to burst at least this much in order
1047 * to not pull it back when holes are filled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 */
1049static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
1050{
John Heffnerdd9e0dd2008-04-15 15:26:39 -07001051 return tp->reordering;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052}
1053
Ilpo Järvinen90840de2007-12-31 04:48:41 -08001054/* Returns end sequence number of the receiver's advertised window */
1055static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1056{
1057 return tp->snd_una + tp->snd_wnd;
1058}
Eric Dumazete114a712014-04-30 11:58:13 -07001059
1060/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1061 * flexible approach. The RFC suggests cwnd should not be raised unless
Neal Cardwellca8a2262014-05-22 10:41:08 -04001062 * it was fully used previously. And that's exactly what we do in
1063 * congestion avoidance mode. But in slow start we allow cwnd to grow
1064 * as long as the application has used half the cwnd.
Eric Dumazete114a712014-04-30 11:58:13 -07001065 * Example :
1066 * cwnd is 10 (IW10), but application sends 9 frames.
1067 * We allow cwnd to reach 18 when all frames are ACKed.
1068 * This check is safe because it's as aggressive as slow start which already
1069 * risks 100% overshoot. The advantage is that we discourage application to
1070 * either send more filler packets or data to artificially blow up the cwnd
1071 * usage, and allow application-limited process to probe bw more aggressively.
Eric Dumazete114a712014-04-30 11:58:13 -07001072 */
Eric Dumazet24901552014-05-02 21:18:05 -07001073static inline bool tcp_is_cwnd_limited(const struct sock *sk)
Eric Dumazete114a712014-04-30 11:58:13 -07001074{
1075 const struct tcp_sock *tp = tcp_sk(sk);
1076
Neal Cardwellca8a2262014-05-22 10:41:08 -04001077 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
Yuchung Cheng071d5082015-07-09 13:16:29 -07001078 if (tcp_in_slow_start(tp))
Neal Cardwellca8a2262014-05-22 10:41:08 -04001079 return tp->snd_cwnd < 2 * tp->max_packets_out;
1080
1081 return tp->is_cwnd_limited;
Eric Dumazete114a712014-04-30 11:58:13 -07001082}
Stephen Hemmingerf4805ed2005-11-10 16:53:30 -08001083
Eric Dumazet21c8fe92015-05-06 14:26:24 -07001084/* Something is really bad, we could not queue an additional packet,
1085 * because qdisc is full or receiver sent a 0 window.
1086 * We do not want to add fuel to the fire, or abort too early,
1087 * so make sure the timer we arm now is at least 200ms in the future,
1088 * regardless of current icsk_rto value (as it could be ~2ms)
1089 */
1090static inline unsigned long tcp_probe0_base(const struct sock *sk)
1091{
1092 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1093}
1094
1095/* Variant of inet_csk_rto_backoff() used for zero window probes */
1096static inline unsigned long tcp_probe0_when(const struct sock *sk,
1097 unsigned long max_when)
1098{
1099 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1100
1101 return (unsigned long)min_t(u64, when, max_when);
1102}
1103
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001104static inline void tcp_check_probe_timer(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105{
Eric Dumazet21c8fe92015-05-06 14:26:24 -07001106 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -07001107 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
Eric Dumazet21c8fe92015-05-06 14:26:24 -07001108 tcp_probe0_base(sk), TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109}
1110
Hantzis Fotisee7537b2009-03-02 22:42:02 -08001111static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112{
1113 tp->snd_wl1 = seq;
1114}
1115
Hantzis Fotisee7537b2009-03-02 22:42:02 -08001116static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117{
1118 tp->snd_wl1 = seq;
1119}
1120
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121/*
1122 * Calculate(/check) TCP checksum
1123 */
Frederik Deweerdtba7808e2007-02-04 20:15:27 -08001124static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1125 __be32 daddr, __wsum base)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126{
1127 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1128}
1129
Al Virob51655b2006-11-14 21:40:42 -08001130static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131{
Herbert Xufb286bb2005-11-10 13:01:24 -08001132 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133}
1134
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001135static inline bool tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136{
Herbert Xu60476372007-04-09 11:59:39 -07001137 return !skb_csum_unnecessary(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 __tcp_checksum_complete(skb);
1139}
1140
1141/* Prequeue for VJ style copy to user, combined with checksumming. */
1142
Stephen Hemminger40efc6f2006-01-03 16:03:49 -08001143static inline void tcp_prequeue_init(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144{
1145 tp->ucopy.task = NULL;
1146 tp->ucopy.len = 0;
1147 tp->ucopy.memory = 0;
1148 skb_queue_head_init(&tp->ucopy.prequeue);
1149}
1150
Joe Perches5c9f3022013-09-23 11:33:32 -07001151bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152
1153#undef STATE_TRACE
1154
1155#ifdef STATE_TRACE
1156static const char *statename[]={
1157 "Unused","Established","Syn Sent","Syn Recv",
1158 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1159 "Close Wait","Last ACK","Listen","Closing"
1160};
1161#endif
Joe Perches5c9f3022013-09-23 11:33:32 -07001162void tcp_set_state(struct sock *sk, int state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
Joe Perches5c9f3022013-09-23 11:33:32 -07001164void tcp_done(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09001166int tcp_abort(struct sock *sk, int err);
1167
Stephen Hemminger40efc6f2006-01-03 16:03:49 -08001168static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169{
1170 rx_opt->dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 rx_opt->num_sacks = 0;
1172}
1173
Joe Perches5c9f3022013-09-23 11:33:32 -07001174u32 tcp_default_init_rwnd(u32 mss);
Eric Dumazet6f021c62015-08-21 12:30:00 -07001175void tcp_cwnd_restart(struct sock *sk, s32 delta);
1176
1177static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1178{
1179 struct tcp_sock *tp = tcp_sk(sk);
1180 s32 delta;
1181
1182 if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
1183 return;
1184 delta = tcp_time_stamp - tp->lsndtime;
1185 if (delta > inet_csk(sk)->icsk_rto)
1186 tcp_cwnd_restart(sk, delta);
1187}
Yuchung Cheng85f16522013-06-11 15:35:32 -07001188
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189/* Determine a window scaling and initial window to offer. */
Joe Perches5c9f3022013-09-23 11:33:32 -07001190void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
1191 __u32 *window_clamp, int wscale_ok,
1192 __u8 *rcv_wscale, __u32 init_rcv_wnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
1194static inline int tcp_win_from_space(int space)
1195{
1196 return sysctl_tcp_adv_win_scale<=0 ?
1197 (space>>(-sysctl_tcp_adv_win_scale)) :
1198 space - (space>>sysctl_tcp_adv_win_scale);
1199}
1200
Kenjiro Nakayama105970f2014-10-20 18:15:50 +09001201/* Note: caller must be prepared to deal with negative returns */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202static inline int tcp_space(const struct sock *sk)
1203{
1204 return tcp_win_from_space(sk->sk_rcvbuf -
1205 atomic_read(&sk->sk_rmem_alloc));
Kenjiro Nakayama105970f2014-10-20 18:15:50 +09001206}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207
1208static inline int tcp_full_space(const struct sock *sk)
1209{
Kenjiro Nakayama105970f2014-10-20 18:15:50 +09001210 return tcp_win_from_space(sk->sk_rcvbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211}
1212
Yuchung Cheng843f4a52014-05-11 20:22:11 -07001213extern void tcp_openreq_init_rwin(struct request_sock *req,
Eric Dumazetb1964b52015-09-25 07:39:09 -07001214 const struct sock *sk_listener,
1215 const struct dst_entry *dst);
Yuchung Cheng843f4a52014-05-11 20:22:11 -07001216
Joe Perches5c9f3022013-09-23 11:33:32 -07001217void tcp_enter_memory_pressure(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1220{
Nikolay Borisovb840d152016-01-07 16:38:45 +02001221 struct net *net = sock_net((struct sock *)tp);
1222
1223 return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224}
1225
1226static inline int keepalive_time_when(const struct tcp_sock *tp)
1227{
Nikolay Borisov13b287e2016-01-07 16:38:43 +02001228 struct net *net = sock_net((struct sock *)tp);
1229
1230 return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231}
1232
Eric Dumazetdf19a622009-08-28 23:48:54 -07001233static inline int keepalive_probes(const struct tcp_sock *tp)
1234{
Nikolay Borisov9bd68612016-01-07 16:38:44 +02001235 struct net *net = sock_net((struct sock *)tp);
1236
1237 return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
Eric Dumazetdf19a622009-08-28 23:48:54 -07001238}
1239
Flavio Leitner6c37e5d2010-04-26 18:33:27 +00001240static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1241{
1242 const struct inet_connection_sock *icsk = &tp->inet_conn;
1243
1244 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1245 tcp_time_stamp - tp->rcv_tstamp);
1246}
1247
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001248static inline int tcp_fin_time(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249{
Nikolay Borisov1e579ca2016-02-03 09:46:56 +02001250 int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001251 const int rto = inet_csk(sk)->icsk_rto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001253 if (fin_timeout < (rto << 2) - (rto >> 1))
1254 fin_timeout = (rto << 2) - (rto >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
1256 return fin_timeout;
1257}
1258
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001259static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1260 int paws_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261{
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001262 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001263 return true;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001264 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001265 return true;
Eric Dumazetbc2ce892010-12-16 14:08:34 -08001266 /*
1267 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1268 * then following tcp messages have valid values. Ignore 0 value,
1269 * or else 'negative' tsval might forbid us to accept their packets.
1270 */
1271 if (!rx_opt->ts_recent)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001272 return true;
1273 return false;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001274}
1275
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001276static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1277 int rst)
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001278{
1279 if (tcp_paws_check(rx_opt, 0))
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001280 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
1282 /* RST segments are not recommended to carry timestamp,
1283 and, if they do, it is recommended to ignore PAWS because
1284 "their cleanup function should take precedence over timestamps."
1285 Certainly, it is mistake. It is necessary to understand the reasons
1286 of this constraint to relax it: if peer reboots, clock may go
1287 out-of-sync and half-open connections will not be reset.
1288 Actually, the problem would be not existing if all
1289 the implementations followed draft about maintaining clock
1290 via reboots. Linux-2.2 DOES NOT!
1291
1292 However, we can relax time bounds for RST segments to MSL.
1293 */
James Morris9d729f72007-03-04 16:12:44 -08001294 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001295 return false;
1296 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297}
1298
Eric Dumazet7970ddc2015-03-16 21:06:20 -07001299bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1300 int mib_idx, u32 *last_oow_ack_time);
Neal Cardwell032ee422015-02-06 16:04:38 -05001301
Pavel Emelyanova9c193292008-07-16 20:21:42 -07001302static inline void tcp_mib_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303{
1304 /* See RFC 2012 */
Pavel Emelyanovcf1100a2008-07-16 20:27:38 -07001305 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1306 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1307 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1308 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309}
1310
Ilpo Järvinen5af4ec22007-09-20 11:30:48 -07001311/* from STCP */
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001312static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
David S. Miller0800f172007-09-20 11:40:37 -07001313{
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001314 tp->lost_skb_hint = NULL;
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001315}
1316
1317static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1318{
1319 tcp_clear_retrans_hints_partial(tp);
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001320 tp->retransmit_skb_hint = NULL;
Ilpo Järvinenb7689202007-09-20 11:37:19 -07001321}
1322
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001323/* MD5 Signature */
1324struct crypto_hash;
1325
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001326union tcp_md5_addr {
1327 struct in_addr a4;
1328#if IS_ENABLED(CONFIG_IPV6)
1329 struct in6_addr a6;
1330#endif
1331};
1332
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001333/* - key database */
1334struct tcp_md5sig_key {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001335 struct hlist_node node;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001336 u8 keylen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001337 u8 family; /* AF_INET or AF_INET6 */
1338 union tcp_md5_addr addr;
1339 u8 key[TCP_MD5SIG_MAXKEYLEN];
1340 struct rcu_head rcu;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001341};
1342
1343/* - sock block */
1344struct tcp_md5sig_info {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001345 struct hlist_head head;
Eric Dumazeta8afca02012-01-31 18:45:40 +00001346 struct rcu_head rcu;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001347};
1348
1349/* - pseudo header */
1350struct tcp4_pseudohdr {
1351 __be32 saddr;
1352 __be32 daddr;
1353 __u8 pad;
1354 __u8 protocol;
1355 __be16 len;
1356};
1357
1358struct tcp6_pseudohdr {
1359 struct in6_addr saddr;
1360 struct in6_addr daddr;
1361 __be32 len;
1362 __be32 protocol; /* including padding */
1363};
1364
1365union tcp_md5sum_block {
1366 struct tcp4_pseudohdr ip4;
Eric Dumazetdfd56b82011-12-10 09:48:31 +00001367#if IS_ENABLED(CONFIG_IPV6)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001368 struct tcp6_pseudohdr ip6;
1369#endif
1370};
1371
1372/* - pool: digest algorithm, hash description and scratch buffer */
1373struct tcp_md5sig_pool {
1374 struct hash_desc md5_desc;
1375 union tcp_md5sum_block md5_blk;
1376};
1377
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001378/* - functions */
Eric Dumazet39f8e582015-03-24 15:58:55 -07001379int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1380 const struct sock *sk, const struct sk_buff *skb);
Joe Perches5c9f3022013-09-23 11:33:32 -07001381int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1382 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
1383int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1384 int family);
Eric Dumazetb83e3de2015-09-25 07:39:15 -07001385struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001386 const struct sock *addr_sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001387
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001388#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -07001389struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
Joe Perches5c9f3022013-09-23 11:33:32 -07001390 const union tcp_md5_addr *addr,
1391 int family);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001392#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001393#else
Eric Dumazetb83e3de2015-09-25 07:39:15 -07001394static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001395 const union tcp_md5_addr *addr,
1396 int family)
1397{
1398 return NULL;
1399}
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001400#define tcp_twsk_md5_key(twsk) NULL
1401#endif
1402
Joe Perches5c9f3022013-09-23 11:33:32 -07001403bool tcp_alloc_md5sig_pool(void);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001404
Joe Perches5c9f3022013-09-23 11:33:32 -07001405struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
Eric Dumazet71cea172013-05-20 06:52:26 +00001406static inline void tcp_put_md5sig_pool(void)
1407{
1408 local_bh_enable();
1409}
Eric Dumazet35790c02010-05-16 00:34:04 -07001410
Joe Perches5c9f3022013-09-23 11:33:32 -07001411int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1412int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1413 unsigned int header_len);
1414int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1415 const struct tcp_md5sig_key *key);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001416
Jerry Chu10467162012-08-31 12:29:11 +00001417/* From tcp_fastopen.c */
Joe Perches5c9f3022013-09-23 11:33:32 -07001418void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1419 struct tcp_fastopen_cookie *cookie, int *syn_loss,
1420 unsigned long *last_syn_loss);
1421void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
Daniel Lee2646c832015-04-06 14:37:27 -07001422 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1423 u16 try_exp);
Yuchung Cheng783237e2012-07-19 06:43:07 +00001424struct tcp_fastopen_request {
1425 /* Fast Open cookie. Size 0 means a cookie request */
1426 struct tcp_fastopen_cookie cookie;
1427 struct msghdr *data; /* data in MSG_FASTOPEN */
Eric Dumazetf5ddcbb2014-02-20 10:09:18 -08001428 size_t size;
1429 int copied; /* queued in tcp_connect() */
Yuchung Cheng783237e2012-07-19 06:43:07 +00001430};
Yuchung Cheng783237e2012-07-19 06:43:07 +00001431void tcp_free_fastopen_req(struct tcp_sock *tp);
1432
Jerry Chu10467162012-08-31 12:29:11 +00001433extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1434int tcp_fastopen_reset_cipher(void *key, unsigned int len);
Eric Dumazet61d2bca2016-02-01 21:03:07 -08001435void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
Eric Dumazet7c85af82015-09-24 17:16:05 -07001436struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1437 struct request_sock *req,
1438 struct tcp_fastopen_cookie *foc,
1439 struct dst_entry *dst);
Hannes Frederic Sowa222e83d2013-10-19 21:48:58 +02001440void tcp_fastopen_init_key_once(bool publish);
Jerry Chu10467162012-08-31 12:29:11 +00001441#define TCP_FASTOPEN_KEY_LENGTH 16
1442
1443/* Fastopen key context */
1444struct tcp_fastopen_context {
Eric Dumazet7ae86392013-06-25 01:21:06 -07001445 struct crypto_cipher *tfm;
1446 __u8 key[TCP_FASTOPEN_KEY_LENGTH];
1447 struct rcu_head rcu;
Jerry Chu10467162012-08-31 12:29:11 +00001448};
1449
David S. Millerfe067e82007-03-07 12:12:44 -08001450/* write queue abstraction */
1451static inline void tcp_write_queue_purge(struct sock *sk)
1452{
1453 struct sk_buff *skb;
1454
1455 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001456 sk_wmem_free_skb(sk, skb);
1457 sk_mem_reclaim(sk);
Ilpo Järvinen8818a9d2009-12-02 22:24:02 -08001458 tcp_clear_all_retrans_hints(tcp_sk(sk));
David S. Millerfe067e82007-03-07 12:12:44 -08001459}
1460
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001461static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001462{
David S. Millercd07a8e2008-09-23 00:50:13 -07001463 return skb_peek(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001464}
1465
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001466static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001467{
David S. Millercd07a8e2008-09-23 00:50:13 -07001468 return skb_peek_tail(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001469}
1470
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001471static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1472 const struct sk_buff *skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001473{
David S. Millercd07a8e2008-09-23 00:50:13 -07001474 return skb_queue_next(&sk->sk_write_queue, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001475}
1476
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001477static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1478 const struct sk_buff *skb)
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08001479{
1480 return skb_queue_prev(&sk->sk_write_queue, skb);
1481}
1482
David S. Millerfe067e82007-03-07 12:12:44 -08001483#define tcp_for_write_queue(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001484 skb_queue_walk(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001485
1486#define tcp_for_write_queue_from(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001487 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001488
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001489#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001490 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001491
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001492static inline struct sk_buff *tcp_send_head(const struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001493{
1494 return sk->sk_send_head;
1495}
1496
David S. Millercd07a8e2008-09-23 00:50:13 -07001497static inline bool tcp_skb_is_last(const struct sock *sk,
1498 const struct sk_buff *skb)
1499{
1500 return skb_queue_is_last(&sk->sk_write_queue, skb);
1501}
1502
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001503static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001504{
David S. Millercd07a8e2008-09-23 00:50:13 -07001505 if (tcp_skb_is_last(sk, skb))
David S. Millerfe067e82007-03-07 12:12:44 -08001506 sk->sk_send_head = NULL;
David S. Millercd07a8e2008-09-23 00:50:13 -07001507 else
1508 sk->sk_send_head = tcp_write_queue_next(sk, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001509}
1510
1511static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1512{
1513 if (sk->sk_send_head == skb_unlinked)
1514 sk->sk_send_head = NULL;
1515}
1516
1517static inline void tcp_init_send_head(struct sock *sk)
1518{
1519 sk->sk_send_head = NULL;
1520}
1521
1522static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1523{
1524 __skb_queue_tail(&sk->sk_write_queue, skb);
1525}
1526
1527static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1528{
1529 __tcp_add_write_queue_tail(sk, skb);
1530
1531 /* Queue it, remembering where we must start sending. */
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001532 if (sk->sk_send_head == NULL) {
David S. Millerfe067e82007-03-07 12:12:44 -08001533 sk->sk_send_head = skb;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001534
1535 if (tcp_sk(sk)->highest_sack == NULL)
1536 tcp_sk(sk)->highest_sack = skb;
1537 }
David S. Millerfe067e82007-03-07 12:12:44 -08001538}
1539
1540static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1541{
1542 __skb_queue_head(&sk->sk_write_queue, skb);
1543}
1544
1545/* Insert buff after skb on the write queue of sk. */
1546static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1547 struct sk_buff *buff,
1548 struct sock *sk)
1549{
Gerrit Renker7de6c032008-04-14 00:05:09 -07001550 __skb_queue_after(&sk->sk_write_queue, skb, buff);
David S. Millerfe067e82007-03-07 12:12:44 -08001551}
1552
David S. Miller43f59c82008-09-21 21:28:51 -07001553/* Insert new before skb on the write queue of sk. */
David S. Millerfe067e82007-03-07 12:12:44 -08001554static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1555 struct sk_buff *skb,
1556 struct sock *sk)
1557{
David S. Miller43f59c82008-09-21 21:28:51 -07001558 __skb_queue_before(&sk->sk_write_queue, skb, new);
Ilpo Järvinen6e421412007-11-19 23:24:09 -08001559
1560 if (sk->sk_send_head == skb)
1561 sk->sk_send_head = new;
David S. Millerfe067e82007-03-07 12:12:44 -08001562}
1563
1564static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1565{
1566 __skb_unlink(skb, &sk->sk_write_queue);
1567}
1568
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001569static inline bool tcp_write_queue_empty(struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001570{
1571 return skb_queue_empty(&sk->sk_write_queue);
1572}
1573
Krishna Kumar12d50c42009-12-08 22:26:13 +00001574static inline void tcp_push_pending_frames(struct sock *sk)
1575{
1576 if (tcp_send_head(sk)) {
1577 struct tcp_sock *tp = tcp_sk(sk);
1578
1579 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1580 }
1581}
1582
Neal Cardwellecb97192012-02-27 17:52:52 -05001583/* Start sequence of the skb just after the highest skb with SACKed
1584 * bit, valid only if sacked_out > 0 or when the caller has ensured
1585 * validity by itself.
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001586 */
1587static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1588{
1589 if (!tp->sacked_out)
1590 return tp->snd_una;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001591
1592 if (tp->highest_sack == NULL)
1593 return tp->snd_nxt;
1594
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001595 return TCP_SKB_CB(tp->highest_sack)->seq;
1596}
1597
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001598static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1599{
1600 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1601 tcp_write_queue_next(sk, skb);
1602}
1603
1604static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1605{
1606 return tcp_sk(sk)->highest_sack;
1607}
1608
1609static inline void tcp_highest_sack_reset(struct sock *sk)
1610{
1611 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1612}
1613
1614/* Called when old skb is about to be deleted (to be combined with new skb) */
1615static inline void tcp_highest_sack_combine(struct sock *sk,
1616 struct sk_buff *old,
1617 struct sk_buff *new)
1618{
1619 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1620 tcp_sk(sk)->highest_sack = new;
1621}
1622
Florian Westphalb1f0a0e2015-12-21 21:29:24 +01001623/* This helper checks if socket has IP_TRANSPARENT set */
1624static inline bool inet_sk_transparent(const struct sock *sk)
1625{
1626 switch (sk->sk_state) {
1627 case TCP_TIME_WAIT:
1628 return inet_twsk(sk)->tw_transparent;
1629 case TCP_NEW_SYN_RECV:
1630 return inet_rsk(inet_reqsk(sk))->no_srccheck;
1631 }
1632 return inet_sk(sk)->transparent;
1633}
1634
Andreas Petlund5aa4b322010-02-18 02:45:45 +00001635/* Determines whether this is a thin stream (which may suffer from
1636 * increased latency). Used to trigger latency-reducing mechanisms.
1637 */
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001638static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
Andreas Petlund5aa4b322010-02-18 02:45:45 +00001639{
1640 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1641}
1642
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643/* /proc */
1644enum tcp_seq_states {
1645 TCP_SEQ_STATE_LISTENING,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 TCP_SEQ_STATE_ESTABLISHED,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647};
1648
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001649int tcp_seq_open(struct inode *inode, struct file *file);
1650
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651struct tcp_seq_afinfo {
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001652 char *name;
1653 sa_family_t family;
1654 const struct file_operations *seq_fops;
1655 struct seq_operations seq_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656};
1657
1658struct tcp_iter_state {
Denis V. Luneva4146b12008-04-13 22:11:14 -07001659 struct seq_net_private p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 sa_family_t family;
1661 enum tcp_seq_states state;
1662 struct sock *syn_wait_sk;
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06001663 int bucket, offset, sbucket, num;
Tom Herberta8b690f2010-06-07 00:43:42 -07001664 loff_t last_pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665};
1666
Joe Perches5c9f3022013-09-23 11:33:32 -07001667int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1668void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001670extern struct request_sock_ops tcp_request_sock_ops;
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001671extern struct request_sock_ops tcp6_request_sock_ops;
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001672
Joe Perches5c9f3022013-09-23 11:33:32 -07001673void tcp_v4_destroy_sock(struct sock *sk);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001674
Eric Dumazet28be6e02013-10-18 10:36:17 -07001675struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
Joe Perches5c9f3022013-09-23 11:33:32 -07001676 netdev_features_t features);
1677struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1678int tcp_gro_complete(struct sk_buff *skb);
Daniel Borkmann28850dc2013-06-07 05:11:46 +00001679
Joe Perches5c9f3022013-09-23 11:33:32 -07001680void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
Herbert Xuf4c50d92006-06-22 03:02:40 -07001681
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07001682static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1683{
Nikolay Borisov4979f2d2016-02-03 09:46:57 +02001684 struct net *net = sock_net((struct sock *)tp);
1685 return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07001686}
1687
1688static inline bool tcp_stream_memory_free(const struct sock *sk)
1689{
1690 const struct tcp_sock *tp = tcp_sk(sk);
1691 u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1692
1693 return notsent_bytes < tcp_notsent_lowat(tp);
1694}
1695
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001696#ifdef CONFIG_PROC_FS
Joe Perches5c9f3022013-09-23 11:33:32 -07001697int tcp4_proc_init(void);
1698void tcp4_proc_exit(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001699#endif
1700
Eric Dumazetea3bea32015-09-25 07:39:23 -07001701int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001702int tcp_conn_request(struct request_sock_ops *rsk_ops,
1703 const struct tcp_request_sock_ops *af_ops,
1704 struct sock *sk, struct sk_buff *skb);
Octavian Purdila5db92c92014-06-25 17:09:59 +03001705
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001706/* TCP af-specific functions */
1707struct tcp_sock_af_ops {
1708#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -07001709 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001710 const struct sock *addr_sk);
Eric Dumazet39f8e582015-03-24 15:58:55 -07001711 int (*calc_md5_hash)(char *location,
1712 const struct tcp_md5sig_key *md5,
1713 const struct sock *sk,
1714 const struct sk_buff *skb);
1715 int (*md5_parse)(struct sock *sk,
1716 char __user *optval,
1717 int optlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001718#endif
1719};
1720
1721struct tcp_request_sock_ops {
Octavian Purdila2aec4a22014-06-25 17:10:00 +03001722 u16 mss_clamp;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001723#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -07001724 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001725 const struct sock *addr_sk);
Eric Dumazet39f8e582015-03-24 15:58:55 -07001726 int (*calc_md5_hash) (char *location,
1727 const struct tcp_md5sig_key *md5,
1728 const struct sock *sk,
1729 const struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001730#endif
Eric Dumazetb40cf182015-09-25 07:39:08 -07001731 void (*init_req)(struct request_sock *req,
1732 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +03001733 struct sk_buff *skb);
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001734#ifdef CONFIG_SYN_COOKIES
Eric Dumazet3f684b42015-09-29 07:42:49 -07001735 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001736 __u16 *mss);
1737#endif
Eric Dumazetf9646292015-09-29 07:42:50 -07001738 struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
Octavian Purdilad94e0412014-06-25 17:09:55 +03001739 const struct request_sock *req,
1740 bool *strict);
Octavian Purdila936b8bd2014-06-25 17:09:57 +03001741 __u32 (*init_seq)(const struct sk_buff *skb);
Eric Dumazet0f935dbe2015-09-25 07:39:21 -07001742 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +03001743 struct flowi *fl, struct request_sock *req,
Eric Dumazetdc6ef6b2015-10-16 13:00:01 -07001744 struct tcp_fastopen_cookie *foc,
Eric Dumazetca6fb062015-10-02 11:43:35 -07001745 bool attach_req);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001746};
1747
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001748#ifdef CONFIG_SYN_COOKIES
1749static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
Eric Dumazet3f684b42015-09-29 07:42:49 -07001750 const struct sock *sk, struct sk_buff *skb,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001751 __u16 *mss)
1752{
Eric Dumazet3f684b42015-09-29 07:42:49 -07001753 tcp_synq_overflow(sk);
1754 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
1755 return ops->cookie_init_seq(skb, mss);
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001756}
1757#else
1758static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
Eric Dumazet3f684b42015-09-29 07:42:49 -07001759 const struct sock *sk, struct sk_buff *skb,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001760 __u16 *mss)
1761{
1762 return 0;
1763}
1764#endif
1765
Joe Perches5c9f3022013-09-23 11:33:32 -07001766int tcpv4_offload_init(void);
Daniel Borkmann28850dc2013-06-07 05:11:46 +00001767
Joe Perches5c9f3022013-09-23 11:33:32 -07001768void tcp_v4_init(void);
1769void tcp_init(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001770
Yuchung Cheng659a8ad2015-10-16 21:57:46 -07001771/* tcp_recovery.c */
1772
Yuchung Cheng4f41b1c2015-10-16 21:57:47 -07001773/* Flags to enable various loss recovery features. See below */
1774extern int sysctl_tcp_recovery;
1775
1776/* Use TCP RACK to detect (some) tail and retransmit losses */
1777#define TCP_RACK_LOST_RETRANS 0x1
1778
1779extern int tcp_rack_mark_lost(struct sock *sk);
1780
Yuchung Cheng659a8ad2015-10-16 21:57:46 -07001781extern void tcp_rack_advance(struct tcp_sock *tp,
1782 const struct skb_mstamp *xmit_time, u8 sacked);
1783
Cong Wange25f8662014-10-15 14:33:21 -07001784/*
1785 * Save and compile IPv4 options, return a pointer to it
1786 */
1787static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
1788{
1789 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
1790 struct ip_options_rcu *dopt = NULL;
1791
Cong Wang461b74c2014-10-15 14:33:22 -07001792 if (opt->optlen) {
Cong Wange25f8662014-10-15 14:33:21 -07001793 int opt_size = sizeof(*dopt) + opt->optlen;
1794
1795 dopt = kmalloc(opt_size, GFP_ATOMIC);
1796 if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
1797 kfree(dopt);
1798 dopt = NULL;
1799 }
1800 }
1801 return dopt;
1802}
1803
Eric Dumazet98781962015-02-03 18:31:53 -08001804/* locally generated TCP pure ACKs have skb->truesize == 2
1805 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
1806 * This is much faster than dissecting the packet to find out.
1807 * (Think of GRE encapsulations, IPv4, IPv6, ...)
1808 */
1809static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
1810{
1811 return skb->truesize == 2;
1812}
1813
1814static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
1815{
1816 skb->truesize = 2;
1817}
1818
Tom Herbert473bd232016-03-07 14:11:05 -08001819static inline int tcp_inq(struct sock *sk)
1820{
1821 struct tcp_sock *tp = tcp_sk(sk);
1822 int answ;
1823
1824 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1825 answ = 0;
1826 } else if (sock_flag(sk, SOCK_URGINLINE) ||
1827 !tp->urg_data ||
1828 before(tp->urg_seq, tp->copied_seq) ||
1829 !before(tp->urg_seq, tp->rcv_nxt)) {
1830
1831 answ = tp->rcv_nxt - tp->copied_seq;
1832
1833 /* Subtract 1, if FIN was received */
1834 if (answ && sock_flag(sk, SOCK_DONE))
1835 answ--;
1836 } else {
1837 answ = tp->urg_seq - tp->copied_seq;
1838 }
1839
1840 return answ;
1841}
1842
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843#endif /* _TCP_H */