blob: 702aefc8d43da5bc9093bdb0fdd783825c842472 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
21#define TCP_DEBUG 1
22#define FASTRETRANS_DEBUG 1
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/list.h>
25#include <linux/tcp.h>
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
Herbert Xufb286bb2005-11-10 13:01:24 -080029#include <linux/skbuff.h>
Chris Leech97fc2f02006-05-23 17:55:33 -070030#include <linux/dmaengine.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080031#include <linux/crypto.h>
Glenn Griffinc6aefaf2008-02-07 21:49:26 -080032#include <linux/cryptohash.h>
William Allen Simpson435cf552009-12-02 18:17:05 +000033#include <linux/kref.h>
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070034
35#include <net/inet_connection_sock.h>
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -070036#include <net/inet_timewait_sock.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070037#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <net/checksum.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070039#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <net/sock.h>
41#include <net/snmp.h>
42#include <net/ip.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070043#include <net/tcp_states.h>
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -070044#include <net/inet_ecn.h>
Satoru SATOH0c266892009-05-04 11:11:01 -070045#include <net/dst.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/seq_file.h>
48
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -070049extern struct inet_hashinfo tcp_hashinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Eric Dumazetdd24c002008-11-25 21:17:14 -080051extern struct percpu_counter tcp_orphan_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052extern void tcp_time_wait(struct sock *sk, int state, int timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#define MAX_TCP_HEADER (128 + MAX_HEADER)
Adam Langley33ad7982008-07-19 00:04:31 -070055#define MAX_TCP_OPTION_SPACE 40
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57/*
58 * Never offer a window over 32767 without using window scaling. Some
59 * poor stacks do signed 16bit maths!
60 */
61#define MAX_TCP_WINDOW 32767U
62
Nandita Dukkipati356f0392010-12-20 14:15:56 +000063/* Offer an initial receive window of 10 mss. */
64#define TCP_DEFAULT_INIT_RCVWND 10
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
67#define TCP_MIN_MSS 88U
68
John Heffner5d424d52006-03-20 17:53:41 -080069/* The least MTU to use for probing */
70#define TCP_BASE_MSS 512
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* After receiving this amount of duplicate ACKs fast retransmit starts. */
73#define TCP_FASTRETRANS_THRESH 3
74
75/* Maximal reordering. */
76#define TCP_MAX_REORDERING 127
77
78/* Maximal number of ACKs sent quickly to accelerate slow-start. */
79#define TCP_MAX_QUICKACKS 16U
80
81/* urg_data states */
82#define TCP_URG_VALID 0x0100
83#define TCP_URG_NOTYET 0x0200
84#define TCP_URG_READ 0x0400
85
86#define TCP_RETR1 3 /*
87 * This is how many retries it does before it
88 * tries to figure out if the gateway is
89 * down. Minimal RFC value is 3; it corresponds
90 * to ~3sec-8min depending on RTO.
91 */
92
93#define TCP_RETR2 15 /*
94 * This should take at least
95 * 90 minutes to time out.
96 * RFC1122 says that the limit is 100 sec.
97 * 15 is ~13-30min depending on RTO.
98 */
99
100#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800101 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800104 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
107 * state, about 60 seconds */
108#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
109 /* BSD style FIN_WAIT2 deadlock breaker.
110 * It used to be 3min, new value is 60sec,
111 * to combine FIN-WAIT-2 timeout with
112 * TIME-WAIT timer.
113 */
114
115#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
116#if HZ >= 100
117#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
118#define TCP_ATO_MIN ((unsigned)(HZ/25))
119#else
120#define TCP_DELACK_MIN 4U
121#define TCP_ATO_MIN 4U
122#endif
123#define TCP_RTO_MAX ((unsigned)(120*HZ))
124#define TCP_RTO_MIN ((unsigned)(HZ/5))
Jerry Chu9ad7c042011-06-08 11:08:38 +0000125#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC2988bis initial RTO value */
126#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
127 * used as a fallback RTO for the
128 * initial data transmission if no
129 * valid RTT sample has been acquired,
130 * most likely due to retrans in 3WHS.
131 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
133#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
134 * for local resources.
135 */
136
137#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
138#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
139#define TCP_KEEPALIVE_INTVL (75*HZ)
140
141#define MAX_TCP_KEEPIDLE 32767
142#define MAX_TCP_KEEPINTVL 32767
143#define MAX_TCP_KEEPCNT 127
144#define MAX_TCP_SYNCNT 127
145
146#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
149#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
150 * after this time. It should be equal
151 * (or greater than) TCP_TIMEWAIT_LEN
152 * to provide reliability equal to one
153 * provided by timewait state.
154 */
155#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
156 * timestamps. It must be less than
157 * minimal timewait lifetime.
158 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159/*
160 * TCP option
161 */
162
163#define TCPOPT_NOP 1 /* Padding */
164#define TCPOPT_EOL 0 /* End of options */
165#define TCPOPT_MSS 2 /* Segment size negotiating */
166#define TCPOPT_WINDOW 3 /* Window scaling */
167#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
168#define TCPOPT_SACK 5 /* SACK Block */
169#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800170#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
William Allen Simpson435cf552009-12-02 18:17:05 +0000171#define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
173/*
174 * TCP option lengths
175 */
176
177#define TCPOLEN_MSS 4
178#define TCPOLEN_WINDOW 3
179#define TCPOLEN_SACK_PERM 2
180#define TCPOLEN_TIMESTAMP 10
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800181#define TCPOLEN_MD5SIG 18
William Allen Simpson435cf552009-12-02 18:17:05 +0000182#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
183#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
184#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
185#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
187/* But this is what stacks really send out. */
188#define TCPOLEN_TSTAMP_ALIGNED 12
189#define TCPOLEN_WSCALE_ALIGNED 4
190#define TCPOLEN_SACKPERM_ALIGNED 4
191#define TCPOLEN_SACK_BASE 2
192#define TCPOLEN_SACK_BASE_ALIGNED 4
193#define TCPOLEN_SACK_PERBLOCK 8
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800194#define TCPOLEN_MD5SIG_ALIGNED 20
Adam Langley33ad7982008-07-19 00:04:31 -0700195#define TCPOLEN_MSS_ALIGNED 4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197/* Flags in tp->nonagle */
198#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
199#define TCP_NAGLE_CORK 2 /* Socket is corked */
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800200#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Andreas Petlund36e31b0a2010-02-18 02:47:01 +0000202/* TCP thin-stream limits */
203#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
204
David S. Miller7eb38522011-02-05 18:13:45 -0800205/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
David S. Miller442b9632011-02-02 17:05:11 -0800206#define TCP_INIT_CWND 10
207
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700208extern struct inet_timewait_death_row tcp_death_row;
209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210/* sysctl variables for tcp */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211extern int sysctl_tcp_timestamps;
212extern int sysctl_tcp_window_scaling;
213extern int sysctl_tcp_sack;
214extern int sysctl_tcp_fin_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215extern int sysctl_tcp_keepalive_time;
216extern int sysctl_tcp_keepalive_probes;
217extern int sysctl_tcp_keepalive_intvl;
218extern int sysctl_tcp_syn_retries;
219extern int sysctl_tcp_synack_retries;
220extern int sysctl_tcp_retries1;
221extern int sysctl_tcp_retries2;
222extern int sysctl_tcp_orphan_retries;
223extern int sysctl_tcp_syncookies;
224extern int sysctl_tcp_retrans_collapse;
225extern int sysctl_tcp_stdurg;
226extern int sysctl_tcp_rfc1337;
227extern int sysctl_tcp_abort_on_overflow;
228extern int sysctl_tcp_max_orphans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229extern int sysctl_tcp_fack;
230extern int sysctl_tcp_reordering;
231extern int sysctl_tcp_ecn;
232extern int sysctl_tcp_dsack;
Eric Dumazet8d987e52010-11-09 23:24:26 +0000233extern long sysctl_tcp_mem[3];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234extern int sysctl_tcp_wmem[3];
235extern int sysctl_tcp_rmem[3];
236extern int sysctl_tcp_app_win;
237extern int sysctl_tcp_adv_win_scale;
238extern int sysctl_tcp_tw_reuse;
239extern int sysctl_tcp_frto;
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800240extern int sysctl_tcp_frto_response;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241extern int sysctl_tcp_low_latency;
Chris Leech95937822006-05-23 18:02:55 -0700242extern int sysctl_tcp_dma_copybreak;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243extern int sysctl_tcp_nometrics_save;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244extern int sysctl_tcp_moderate_rcvbuf;
245extern int sysctl_tcp_tso_win_divisor;
Stephen Hemminger9772efb2005-11-10 17:09:53 -0800246extern int sysctl_tcp_abc;
John Heffner5d424d52006-03-20 17:53:41 -0800247extern int sysctl_tcp_mtu_probing;
248extern int sysctl_tcp_base_mss;
Rick Jones15d99e02006-03-20 22:40:29 -0800249extern int sysctl_tcp_workaround_signed_windows;
David S. Miller35089bb2006-06-13 22:33:04 -0700250extern int sysctl_tcp_slow_start_after_idle;
John Heffner886236c2007-03-25 19:21:45 -0700251extern int sysctl_tcp_max_ssthresh;
William Allen Simpson519855c2009-12-02 18:14:19 +0000252extern int sysctl_tcp_cookie_size;
Andreas Petlund36e31b0a2010-02-18 02:47:01 +0000253extern int sysctl_tcp_thin_linear_timeouts;
Andreas Petlund7e380172010-02-18 04:48:19 +0000254extern int sysctl_tcp_thin_dupack;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
Eric Dumazet8d987e52010-11-09 23:24:26 +0000256extern atomic_long_t tcp_memory_allocated;
Eric Dumazet17483762008-11-25 21:16:35 -0800257extern struct percpu_counter tcp_sockets_allocated;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258extern int tcp_memory_pressure;
259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 * The next routines deal with comparing 32 bit unsigned ints
262 * and worry about wraparound (automatic with unsigned arithmetic).
263 */
264
265static inline int before(__u32 seq1, __u32 seq2)
266{
Gerrit Renker0d630cc2007-01-04 12:25:16 -0800267 return (__s32)(seq1-seq2) < 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268}
Gerrit Renker9a036b9c2006-12-20 10:25:55 -0800269#define after(seq2, seq1) before(seq1, seq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271/* is s2<=s1<=s3 ? */
272static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
273{
274 return seq3 - seq2 >= seq1 - seq2;
275}
276
David S. Millerad1af0f2010-08-25 02:27:49 -0700277static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700278{
David S. Millerad1af0f2010-08-25 02:27:49 -0700279 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
280 int orphans = percpu_counter_read_positive(ocp);
281
282 if (orphans << shift > sysctl_tcp_max_orphans) {
283 orphans = percpu_counter_sum_positive(ocp);
284 if (orphans << shift > sysctl_tcp_max_orphans)
285 return true;
286 }
287
288 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
Eric Dumazet8d987e52010-11-09 23:24:26 +0000289 atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
David S. Millerad1af0f2010-08-25 02:27:49 -0700290 return true;
291 return false;
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700292}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
Florian Westphala0f82f62009-04-19 09:43:48 +0000294/* syncookies: remember time of last synqueue overflow */
295static inline void tcp_synq_overflow(struct sock *sk)
296{
297 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
298}
299
300/* syncookies: no recent synqueue overflow on this listening socket? */
301static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
302{
303 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
Jerry Chu9ad7c042011-06-08 11:08:38 +0000304 return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
Florian Westphala0f82f62009-04-19 09:43:48 +0000305}
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307extern struct proto tcp_prot;
308
Pavel Emelyanov57ef42d2008-07-18 04:02:08 -0700309#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
310#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
311#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
312#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
Tom Herbertaa2ea052010-04-22 07:00:24 +0000313#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
Changli Gao53d31762010-07-10 20:41:06 +0000315extern void tcp_v4_err(struct sk_buff *skb, u32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
Changli Gao53d31762010-07-10 20:41:06 +0000317extern void tcp_shutdown (struct sock *sk, int how);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
Changli Gao53d31762010-07-10 20:41:06 +0000319extern int tcp_v4_rcv(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
David S. Miller3f419d22010-11-29 13:37:14 -0800321extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it);
David S. Millerccb7c412010-12-01 18:09:13 -0800322extern void *tcp_v4_tw_get_peer(struct sock *sk);
Changli Gao53d31762010-07-10 20:41:06 +0000323extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
Changli Gao7ba42912010-07-10 20:41:55 +0000324extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
325 size_t size);
326extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
327 size_t size, int flags);
Changli Gao53d31762010-07-10 20:41:06 +0000328extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
329extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
330 struct tcphdr *th, unsigned len);
331extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
332 struct tcphdr *th, unsigned len);
333extern void tcp_rcv_space_adjust(struct sock *sk);
334extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
335extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
336extern void tcp_twsk_destructor(struct sock *sk);
337extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
338 struct pipe_inode_info *pipe, size_t len,
339 unsigned int flags);
Jens Axboe9c55e012007-11-06 23:30:13 -0800340
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700341static inline void tcp_dec_quickack_mode(struct sock *sk,
342 const unsigned int pkts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700344 struct inet_connection_sock *icsk = inet_csk(sk);
David S. Millerfc6415bc2005-07-05 15:17:45 -0700345
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700346 if (icsk->icsk_ack.quick) {
347 if (pkts >= icsk->icsk_ack.quick) {
348 icsk->icsk_ack.quick = 0;
David S. Millerfc6415bc2005-07-05 15:17:45 -0700349 /* Leaving quickack mode we deflate ATO. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700350 icsk->icsk_ack.ato = TCP_ATO_MIN;
David S. Millerfc6415bc2005-07-05 15:17:45 -0700351 } else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700352 icsk->icsk_ack.quick -= pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 }
354}
355
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700356#define TCP_ECN_OK 1
357#define TCP_ECN_QUEUE_CWR 2
358#define TCP_ECN_DEMAND_CWR 4
Eric Dumazet7a269ff2011-09-22 20:02:19 +0000359#define TCP_ECN_SEEN 8
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700360
361static __inline__ void
362TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
363{
364 if (sysctl_tcp_ecn && th->ece && th->cwr)
365 inet_rsk(req)->ecn_ok = 1;
366}
367
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000368enum tcp_tw_status {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 TCP_TW_SUCCESS = 0,
370 TCP_TW_RST = 1,
371 TCP_TW_ACK = 2,
372 TCP_TW_SYN = 3
373};
374
375
Changli Gao53d31762010-07-10 20:41:06 +0000376extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
377 struct sk_buff *skb,
378 const struct tcphdr *th);
379extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
380 struct request_sock *req,
381 struct request_sock **prev);
382extern int tcp_child_process(struct sock *parent, struct sock *child,
383 struct sk_buff *skb);
384extern int tcp_use_frto(struct sock *sk);
385extern void tcp_enter_frto(struct sock *sk);
386extern void tcp_enter_loss(struct sock *sk, int how);
387extern void tcp_clear_retrans(struct tcp_sock *tp);
388extern void tcp_update_metrics(struct sock *sk);
389extern void tcp_close(struct sock *sk, long timeout);
390extern unsigned int tcp_poll(struct file * file, struct socket *sock,
391 struct poll_table_struct *wait);
392extern int tcp_getsockopt(struct sock *sk, int level, int optname,
393 char __user *optval, int __user *optlen);
394extern int tcp_setsockopt(struct sock *sk, int level, int optname,
395 char __user *optval, unsigned int optlen);
396extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
397 char __user *optval, int __user *optlen);
398extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
399 char __user *optval, unsigned int optlen);
400extern void tcp_set_keepalive(struct sock *sk, int val);
401extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
402extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
403 size_t len, int nonblock, int flags, int *addr_len);
404extern void tcp_parse_options(struct sk_buff *skb,
405 struct tcp_options_received *opt_rx, u8 **hvpp,
406 int estab);
407extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409/*
410 * TCP v4 functions exported for the inet6 API
411 */
412
Changli Gao53d31762010-07-10 20:41:06 +0000413extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
414extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
415extern struct sock * tcp_create_openreq_child(struct sock *sk,
416 struct request_sock *req,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 struct sk_buff *skb);
Changli Gao53d31762010-07-10 20:41:06 +0000418extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
419 struct request_sock *req,
420 struct dst_entry *dst);
421extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
422extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
423 int addr_len);
424extern int tcp_connect(struct sock *sk);
425extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
426 struct request_sock *req,
427 struct request_values *rvp);
428extern int tcp_disconnect(struct sock *sk, int flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431/* From syncookies.c */
Florian Westphal2051f112008-03-23 22:21:28 -0700432extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
434 struct ip_options *opt);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400435#ifdef CONFIG_SYN_COOKIES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
437 __u16 *mss);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400438#else
439static inline __u32 cookie_v4_init_sequence(struct sock *sk,
440 struct sk_buff *skb,
441 __u16 *mss)
442{
443 return 0;
444}
445#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Florian Westphal4dfc2812008-04-10 03:12:40 -0700447extern __u32 cookie_init_timestamp(struct request_sock *req);
Florian Westphal172d69e2010-06-21 11:48:45 +0000448extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
Florian Westphal4dfc2812008-04-10 03:12:40 -0700449
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800450/* From net/ipv6/syncookies.c */
451extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400452#ifdef CONFIG_SYN_COOKIES
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800453extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
454 __u16 *mss);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400455#else
456static inline __u32 cookie_v6_init_sequence(struct sock *sk,
457 struct sk_buff *skb,
458 __u16 *mss)
459{
460 return 0;
461}
462#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463/* tcp_output.c */
464
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700465extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
466 int nonagle);
467extern int tcp_may_send_now(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000469extern void tcp_retransmit_timer(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470extern void tcp_xmit_retransmit_queue(struct sock *);
471extern void tcp_simple_retransmit(struct sock *);
472extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
David S. Miller6475be12005-09-01 22:47:01 -0700473extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
475extern void tcp_send_probe0(struct sock *);
476extern void tcp_send_partial(struct sock *);
Changli Gao53d31762010-07-10 20:41:06 +0000477extern int tcp_write_wakeup(struct sock *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478extern void tcp_send_fin(struct sock *sk);
Al Virodd0fc662005-10-07 07:46:04 +0100479extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
Changli Gao53d31762010-07-10 20:41:06 +0000480extern int tcp_send_synack(struct sock *);
Eric Dumazet946cedc2011-08-30 03:21:44 +0000481extern int tcp_syn_flood_action(struct sock *sk,
482 const struct sk_buff *skb,
483 const char *proto);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700484extern void tcp_push_one(struct sock *, unsigned int mss_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485extern void tcp_send_ack(struct sock *sk);
486extern void tcp_send_delayed_ack(struct sock *sk);
487
David S. Millera762a982005-07-05 15:18:51 -0700488/* tcp_input.c */
489extern void tcp_cwnd_application_limited(struct sock *sk);
490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491/* tcp_timer.c */
492extern void tcp_init_xmit_timers(struct sock *);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700493static inline void tcp_clear_xmit_timers(struct sock *sk)
494{
495 inet_csk_clear_xmit_timers(sk);
496}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000499extern unsigned int tcp_current_mss(struct sock *sk);
500
501/* Bound MSS / TSO packet size with the half of the window */
502static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
503{
Alexey Kuznetsov01f83d62010-09-15 10:27:52 -0700504 int cutoff;
505
506 /* When peer uses tiny windows, there is no use in packetizing
507 * to sub-MSS pieces for the sake of SWS or making sure there
508 * are enough packets in the pipe for fast recovery.
509 *
510 * On the other hand, for extremely large MSS devices, handling
511 * smaller than MSS windows in this way does make sense.
512 */
513 if (tp->max_window >= 512)
514 cutoff = (tp->max_window >> 1);
515 else
516 cutoff = tp->max_window;
517
518 if (cutoff && pktsize > cutoff)
519 return max_t(int, cutoff, 68U - tp->tcp_header_len);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000520 else
521 return pktsize;
522}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Arnaldo Carvalho de Melo17b085e2005-08-12 12:59:17 -0300524/* tcp.c */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525extern void tcp_get_info(struct sock *, struct tcp_info *);
526
527/* Read 'sendfile()'-style from a TCP socket */
528typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
529 unsigned int, size_t);
530extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
531 sk_read_actor_t recv_actor);
532
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800533extern void tcp_initialize_rcv_mss(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
John Heffner5d424d52006-03-20 17:53:41 -0800535extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
536extern int tcp_mss_to_mtu(struct sock *sk, int mss);
537extern void tcp_mtup_init(struct sock *sk);
Jerry Chu9ad7c042011-06-08 11:08:38 +0000538extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
John Heffner5d424d52006-03-20 17:53:41 -0800539
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000540static inline void tcp_bound_rto(const struct sock *sk)
541{
542 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
543 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
544}
545
546static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
547{
548 return (tp->srtt >> 3) + tp->rttvar;
549}
550
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800551static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552{
553 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
554 ntohl(TCP_FLAG_ACK) |
555 snd_wnd);
556}
557
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800558static inline void tcp_fast_path_on(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559{
560 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
561}
562
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700563static inline void tcp_fast_path_check(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700565 struct tcp_sock *tp = tcp_sk(sk);
566
David S. Millerb03efcf2005-07-08 14:57:23 -0700567 if (skb_queue_empty(&tp->out_of_order_queue) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 tp->rcv_wnd &&
569 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
570 !tp->urg_data)
571 tcp_fast_path_on(tp);
572}
573
Satoru SATOH0c266892009-05-04 11:11:01 -0700574/* Compute the actual rto_min value */
575static inline u32 tcp_rto_min(struct sock *sk)
576{
577 struct dst_entry *dst = __sk_dst_get(sk);
578 u32 rto_min = TCP_RTO_MIN;
579
580 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
581 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
582 return rto_min;
583}
584
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585/* Compute the actual receive window we are currently advertising.
586 * Rcv_nxt can be after the window if our peer push more data
587 * than the offered window.
588 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800589static inline u32 tcp_receive_window(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590{
591 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
592
593 if (win < 0)
594 win = 0;
595 return (u32) win;
596}
597
598/* Choose a new window, without checks for shrinking, and without
599 * scaling applied to the result. The caller does these things
600 * if necessary. This is a "raw" window selection.
601 */
Changli Gao53d31762010-07-10 20:41:06 +0000602extern u32 __tcp_select_window(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
604/* TCP timestamps are only 32-bits, this causes a slight
605 * complication on 64-bit systems since we store a snapshot
Stephen Hemminger31f34262005-11-15 15:17:10 -0800606 * of jiffies in the buffer control blocks below. We decided
607 * to use only the low 32-bits of jiffies and hide the ugly
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 * casts with the following macro.
609 */
610#define tcp_time_stamp ((__u32)(jiffies))
611
Changli Gaoa3433f32010-06-12 14:01:43 +0000612#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
613
614#define TCPHDR_FIN 0x01
615#define TCPHDR_SYN 0x02
616#define TCPHDR_RST 0x04
617#define TCPHDR_PSH 0x08
618#define TCPHDR_ACK 0x10
619#define TCPHDR_URG 0x20
620#define TCPHDR_ECE 0x40
621#define TCPHDR_CWR 0x80
622
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800623/* This is what the send packet queuing engine uses to pass
Eric Dumazetf86586f2010-07-15 21:41:00 -0700624 * TCP per-packet control information to the transmission code.
625 * We also store the host-order sequence numbers in here too.
626 * This is 44 bytes if IPV6 is enabled.
627 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 */
629struct tcp_skb_cb {
630 union {
631 struct inet_skb_parm h4;
632#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
633 struct inet6_skb_parm h6;
634#endif
635 } header; /* For incoming frames */
636 __u32 seq; /* Starting sequence number */
637 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
638 __u32 when; /* used to compute rtt's */
639 __u8 flags; /* TCP header flags. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 __u8 sacked; /* State flags for SACK/FACK. */
641#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
642#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
643#define TCPCB_LOST 0x04 /* SKB is lost */
644#define TCPCB_TAGBITS 0x07 /* All tag bits */
645
646#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
647#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
648
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 __u32 ack_seq; /* Sequence number ACK'd */
650};
651
652#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
653
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654/* Due to TSO, an SKB can be composed of multiple actual
655 * packets. To keep these tracked properly, we use this.
656 */
657static inline int tcp_skb_pcount(const struct sk_buff *skb)
658{
Herbert Xu79671682006-06-22 02:40:14 -0700659 return skb_shinfo(skb)->gso_segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660}
661
662/* This is valid iff tcp_skb_pcount() > 1. */
663static inline int tcp_skb_mss(const struct sk_buff *skb)
664{
Herbert Xu79671682006-06-22 02:40:14 -0700665 return skb_shinfo(skb)->gso_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666}
667
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700668/* Events passed to congestion control interface */
669enum tcp_ca_event {
670 CA_EVENT_TX_START, /* first transmit when no packets in flight */
671 CA_EVENT_CWND_RESTART, /* congestion window restart */
672 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
673 CA_EVENT_FRTO, /* fast recovery timeout */
674 CA_EVENT_LOSS, /* loss timeout */
675 CA_EVENT_FAST_ACK, /* in sequence ack */
676 CA_EVENT_SLOW_ACK, /* other ack */
677};
678
679/*
680 * Interface for adding new TCP congestion control handlers
681 */
682#define TCP_CA_NAME_MAX 16
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800683#define TCP_CA_MAX 128
684#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
685
Stephen Hemminger164891a2007-04-23 22:26:16 -0700686#define TCP_CONG_NON_RESTRICTED 0x1
687#define TCP_CONG_RTT_STAMP 0x2
688
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700689struct tcp_congestion_ops {
690 struct list_head list;
Stephen Hemminger164891a2007-04-23 22:26:16 -0700691 unsigned long flags;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700692
693 /* initialize private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300694 void (*init)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700695 /* cleanup private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300696 void (*release)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700697
698 /* return slow start threshold (required) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300699 u32 (*ssthresh)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700700 /* lower bound for congestion window (optional) */
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700701 u32 (*min_cwnd)(const struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700702 /* do new cwnd calculation (required) */
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200703 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700704 /* call before changing ca_state (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300705 void (*set_state)(struct sock *sk, u8 new_state);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700706 /* call when cwnd event occurs (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300707 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700708 /* new value of cwnd after loss (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300709 u32 (*undo_cwnd)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700710 /* hook for packet ack accounting (optional) */
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -0700711 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
Arnaldo Carvalho de Melo73c1f4a2005-08-12 12:51:49 -0300712 /* get info for inet_diag (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300713 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700714
715 char name[TCP_CA_NAME_MAX];
716 struct module *owner;
717};
718
719extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
720extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
721
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300722extern void tcp_init_congestion_control(struct sock *sk);
723extern void tcp_cleanup_congestion_control(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700724extern int tcp_set_default_congestion_control(const char *name);
725extern void tcp_get_default_congestion_control(char *name);
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800726extern void tcp_get_available_congestion_control(char *buf, size_t len);
Stephen Hemmingerce7bc3b2006-11-09 16:35:15 -0800727extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
728extern int tcp_set_allowed_congestion_control(char *allowed);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300729extern int tcp_set_congestion_control(struct sock *sk, const char *name);
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800730extern void tcp_slow_start(struct tcp_sock *tp);
Ilpo Järvinen758ce5c2009-02-28 04:44:37 +0000731extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700732
Stephen Hemminger5f8ef482005-06-23 20:37:36 -0700733extern struct tcp_congestion_ops tcp_init_congestion_ops;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300734extern u32 tcp_reno_ssthresh(struct sock *sk);
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200735extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700736extern u32 tcp_reno_min_cwnd(const struct sock *sk);
David S. Millera8acfba2005-06-23 23:45:02 -0700737extern struct tcp_congestion_ops tcp_reno;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700738
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300739static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700740{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300741 struct inet_connection_sock *icsk = inet_csk(sk);
742
743 if (icsk->icsk_ca_ops->set_state)
744 icsk->icsk_ca_ops->set_state(sk, ca_state);
745 icsk->icsk_ca_state = ca_state;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700746}
747
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300748static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700749{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300750 const struct inet_connection_sock *icsk = inet_csk(sk);
751
752 if (icsk->icsk_ca_ops->cwnd_event)
753 icsk->icsk_ca_ops->cwnd_event(sk, event);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700754}
755
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300756/* These functions determine how the current flow behaves in respect of SACK
757 * handling. SACK is negotiated with the peer, and therefore it can vary
758 * between different flows.
759 *
760 * tcp_is_sack - SACK enabled
761 * tcp_is_reno - No SACK
762 * tcp_is_fack - FACK enabled, implies SACK enabled
763 */
764static inline int tcp_is_sack(const struct tcp_sock *tp)
765{
766 return tp->rx_opt.sack_ok;
767}
768
769static inline int tcp_is_reno(const struct tcp_sock *tp)
770{
771 return !tcp_is_sack(tp);
772}
773
774static inline int tcp_is_fack(const struct tcp_sock *tp)
775{
776 return tp->rx_opt.sack_ok & 2;
777}
778
779static inline void tcp_enable_fack(struct tcp_sock *tp)
780{
781 tp->rx_opt.sack_ok |= 2;
782}
783
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300784static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
785{
786 return tp->sacked_out + tp->lost_out;
787}
788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789/* This determines how many packets are "in the network" to the best
790 * of our knowledge. In many cases it is conservative, but where
791 * detailed information is available from the receiver (via SACK
792 * blocks etc.) we can make more aggressive calculations.
793 *
794 * Use this for decisions involving congestion control, use just
795 * tp->packets_out to determine if the send queue is empty or not.
796 *
797 * Read this equation as:
798 *
799 * "Packets sent once on transmission queue" MINUS
800 * "Packets left network, but not honestly ACKed yet" PLUS
801 * "Packets fast retransmitted"
802 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800803static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804{
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300805 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806}
807
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -0700808#define TCP_INFINITE_SSTHRESH 0x7fffffff
809
810static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
811{
812 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
813}
814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
816 * The exception is rate halving phase, when cwnd is decreasing towards
817 * ssthresh.
818 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300819static inline __u32 tcp_current_ssthresh(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300821 const struct tcp_sock *tp = tcp_sk(sk);
822 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 return tp->snd_ssthresh;
824 else
825 return max(tp->snd_ssthresh,
826 ((tp->snd_cwnd >> 1) +
827 (tp->snd_cwnd >> 2)));
828}
829
Ilpo Järvinenb9c45952007-07-27 16:36:17 +0300830/* Use define here intentionally to get WARN_ON location shown at the caller */
831#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800833extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
835
836/* Slow start with delack produces 3 packets of burst, so that
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700837 * it is safe "de facto". This will be the default - same as
838 * the default reordering threshold - but if reordering increases,
839 * we must be able to allow cwnd to burst at least this much in order
840 * to not pull it back when holes are filled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 */
842static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
843{
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700844 return tp->reordering;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845}
846
Ilpo Järvinen90840de2007-12-31 04:48:41 -0800847/* Returns end sequence number of the receiver's advertised window */
848static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
849{
850 return tp->snd_una + tp->snd_wnd;
851}
Ilpo Järvinencea14e02008-01-12 03:19:12 -0800852extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
Stephen Hemmingerf4805ed2005-11-10 16:53:30 -0800853
Chuck Leverc1bd24b2007-10-23 21:08:54 -0700854static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800855 const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856{
857 if (skb->len < mss)
858 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
859}
860
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700861static inline void tcp_check_probe_timer(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700863 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700864 const struct inet_connection_sock *icsk = inet_csk(sk);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700865
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700866 if (!tp->packets_out && !icsk->icsk_pending)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700867 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
868 icsk->icsk_rto, TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869}
870
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800871static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872{
873 tp->snd_wl1 = seq;
874}
875
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800876static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877{
878 tp->snd_wl1 = seq;
879}
880
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881/*
882 * Calculate(/check) TCP checksum
883 */
Frederik Deweerdtba7808e2007-02-04 20:15:27 -0800884static inline __sum16 tcp_v4_check(int len, __be32 saddr,
885 __be32 daddr, __wsum base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886{
887 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
888}
889
Al Virob51655b2006-11-14 21:40:42 -0800890static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891{
Herbert Xufb286bb2005-11-10 13:01:24 -0800892 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893}
894
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800895static inline int tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
Herbert Xu60476372007-04-09 11:59:39 -0700897 return !skb_csum_unnecessary(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 __tcp_checksum_complete(skb);
899}
900
901/* Prequeue for VJ style copy to user, combined with checksumming. */
902
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800903static inline void tcp_prequeue_init(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904{
905 tp->ucopy.task = NULL;
906 tp->ucopy.len = 0;
907 tp->ucopy.memory = 0;
908 skb_queue_head_init(&tp->ucopy.prequeue);
Chris Leech97fc2f02006-05-23 17:55:33 -0700909#ifdef CONFIG_NET_DMA
910 tp->ucopy.dma_chan = NULL;
911 tp->ucopy.wakeup = 0;
912 tp->ucopy.pinned_list = NULL;
913 tp->ucopy.dma_cookie = 0;
914#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915}
916
917/* Packet is added to VJ-style prequeue for processing in process
918 * context, if a reader task is waiting. Apparently, this exciting
919 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
920 * failed somewhere. Latency? Burstiness? Well, at least now we will
921 * see, why it failed. 8)8) --ANK
922 *
923 * NOTE: is this not too big to inline?
924 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800925static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926{
927 struct tcp_sock *tp = tcp_sk(sk);
928
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000929 if (sysctl_tcp_low_latency || !tp->ucopy.task)
930 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000932 __skb_queue_tail(&tp->ucopy.prequeue, skb);
933 tp->ucopy.memory += skb->truesize;
934 if (tp->ucopy.memory > sk->sk_rcvbuf) {
935 struct sk_buff *skb1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000937 BUG_ON(sock_owned_by_user(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000939 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
940 sk_backlog_rcv(sk, skb1);
941 NET_INC_STATS_BH(sock_net(sk),
942 LINUX_MIB_TCPPREQUEUEDROPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 }
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000944
945 tp->ucopy.memory = 0;
946 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
Eric Dumazetaa395142010-04-20 13:03:51 +0000947 wake_up_interruptible_sync_poll(sk_sleep(sk),
Eric Dumazet7aedec22009-05-07 07:20:39 +0000948 POLLIN | POLLRDNORM | POLLRDBAND);
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000949 if (!inet_csk_ack_scheduled(sk))
950 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
David S. Miller22f6dac2009-05-08 02:48:30 -0700951 (3 * tcp_rto_min(sk)) / 4,
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000952 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 }
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000954 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955}
956
957
958#undef STATE_TRACE
959
960#ifdef STATE_TRACE
961static const char *statename[]={
962 "Unused","Established","Syn Sent","Syn Recv",
963 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
964 "Close Wait","Last ACK","Listen","Closing"
965};
966#endif
Ilpo Järvinen490d5042008-01-12 03:17:20 -0800967extern void tcp_set_state(struct sock *sk, int state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Andi Kleen4ac02ba2007-04-20 17:11:46 -0700969extern void tcp_done(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800971static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972{
973 rx_opt->dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 rx_opt->num_sacks = 0;
975}
976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977/* Determine a window scaling and initial window to offer. */
978extern void tcp_select_initial_window(int __space, __u32 mss,
979 __u32 *rcv_wnd, __u32 *window_clamp,
laurent chavey31d12922009-12-15 11:15:28 +0000980 int wscale_ok, __u8 *rcv_wscale,
981 __u32 init_rcv_wnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983static inline int tcp_win_from_space(int space)
984{
985 return sysctl_tcp_adv_win_scale<=0 ?
986 (space>>(-sysctl_tcp_adv_win_scale)) :
987 space - (space>>sysctl_tcp_adv_win_scale);
988}
989
990/* Note: caller must be prepared to deal with negative returns */
991static inline int tcp_space(const struct sock *sk)
992{
993 return tcp_win_from_space(sk->sk_rcvbuf -
994 atomic_read(&sk->sk_rmem_alloc));
995}
996
997static inline int tcp_full_space(const struct sock *sk)
998{
999 return tcp_win_from_space(sk->sk_rcvbuf);
1000}
1001
Stephen Hemminger40efc6f2006-01-03 16:03:49 -08001002static inline void tcp_openreq_init(struct request_sock *req,
1003 struct tcp_options_received *rx_opt,
1004 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001006 struct inet_request_sock *ireq = inet_rsk(req);
1007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
Florian Westphal4dfc2812008-04-10 03:12:40 -07001009 req->cookie_ts = 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001010 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 req->mss = rx_opt->mss_clamp;
1012 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001013 ireq->tstamp_ok = rx_opt->tstamp_ok;
1014 ireq->sack_ok = rx_opt->sack_ok;
1015 ireq->snd_wscale = rx_opt->snd_wscale;
1016 ireq->wscale_ok = rx_opt->wscale_ok;
1017 ireq->acked = 0;
1018 ireq->ecn_ok = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001019 ireq->rmt_port = tcp_hdr(skb)->source;
KOVACS Krisztiana3116ac52008-10-01 07:46:49 -07001020 ireq->loc_port = tcp_hdr(skb)->dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021}
1022
Pavel Emelyanov5c52ba12008-07-16 20:28:10 -07001023extern void tcp_enter_memory_pressure(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1026{
1027 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1028}
1029
1030static inline int keepalive_time_when(const struct tcp_sock *tp)
1031{
1032 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1033}
1034
Eric Dumazetdf19a622009-08-28 23:48:54 -07001035static inline int keepalive_probes(const struct tcp_sock *tp)
1036{
1037 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1038}
1039
Flavio Leitner6c37e5d2010-04-26 18:33:27 +00001040static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1041{
1042 const struct inet_connection_sock *icsk = &tp->inet_conn;
1043
1044 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1045 tcp_time_stamp - tp->rcv_tstamp);
1046}
1047
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001048static inline int tcp_fin_time(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001050 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1051 const int rto = inet_csk(sk)->icsk_rto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001053 if (fin_timeout < (rto << 2) - (rto >> 1))
1054 fin_timeout = (rto << 2) - (rto >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055
1056 return fin_timeout;
1057}
1058
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001059static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
1060 int paws_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061{
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001062 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1063 return 1;
1064 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1065 return 1;
Eric Dumazetbc2ce892010-12-16 14:08:34 -08001066 /*
1067 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1068 * then following tcp messages have valid values. Ignore 0 value,
1069 * or else 'negative' tsval might forbid us to accept their packets.
1070 */
1071 if (!rx_opt->ts_recent)
1072 return 1;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001073 return 0;
1074}
1075
1076static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1077 int rst)
1078{
1079 if (tcp_paws_check(rx_opt, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 return 0;
1081
1082 /* RST segments are not recommended to carry timestamp,
1083 and, if they do, it is recommended to ignore PAWS because
1084 "their cleanup function should take precedence over timestamps."
1085 Certainly, it is mistake. It is necessary to understand the reasons
1086 of this constraint to relax it: if peer reboots, clock may go
1087 out-of-sync and half-open connections will not be reset.
1088 Actually, the problem would be not existing if all
1089 the implementations followed draft about maintaining clock
1090 via reboots. Linux-2.2 DOES NOT!
1091
1092 However, we can relax time bounds for RST segments to MSL.
1093 */
James Morris9d729f72007-03-04 16:12:44 -08001094 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 return 0;
1096 return 1;
1097}
1098
Pavel Emelyanova9c193292008-07-16 20:21:42 -07001099static inline void tcp_mib_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100{
1101 /* See RFC 2012 */
Pavel Emelyanovcf1100a2008-07-16 20:27:38 -07001102 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1103 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1104 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1105 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106}
1107
Ilpo Järvinen5af4ec22007-09-20 11:30:48 -07001108/* from STCP */
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001109static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
David S. Miller0800f172007-09-20 11:40:37 -07001110{
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001111 tp->lost_skb_hint = NULL;
1112 tp->scoreboard_skb_hint = NULL;
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001113}
1114
1115static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1116{
1117 tcp_clear_retrans_hints_partial(tp);
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001118 tp->retransmit_skb_hint = NULL;
Ilpo Järvinenb7689202007-09-20 11:37:19 -07001119}
1120
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001121/* MD5 Signature */
1122struct crypto_hash;
1123
1124/* - key database */
1125struct tcp_md5sig_key {
1126 u8 *key;
1127 u8 keylen;
1128};
1129
1130struct tcp4_md5sig_key {
David S. Millerf8ab18d2007-09-28 15:18:35 -07001131 struct tcp_md5sig_key base;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001132 __be32 addr;
1133};
1134
1135struct tcp6_md5sig_key {
David S. Millerf8ab18d2007-09-28 15:18:35 -07001136 struct tcp_md5sig_key base;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001137#if 0
1138 u32 scope_id; /* XXX */
1139#endif
1140 struct in6_addr addr;
1141};
1142
1143/* - sock block */
1144struct tcp_md5sig_info {
1145 struct tcp4_md5sig_key *keys4;
1146#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1147 struct tcp6_md5sig_key *keys6;
1148 u32 entries6;
1149 u32 alloced6;
1150#endif
1151 u32 entries4;
1152 u32 alloced4;
1153};
1154
1155/* - pseudo header */
1156struct tcp4_pseudohdr {
1157 __be32 saddr;
1158 __be32 daddr;
1159 __u8 pad;
1160 __u8 protocol;
1161 __be16 len;
1162};
1163
1164struct tcp6_pseudohdr {
1165 struct in6_addr saddr;
1166 struct in6_addr daddr;
1167 __be32 len;
1168 __be32 protocol; /* including padding */
1169};
1170
1171union tcp_md5sum_block {
1172 struct tcp4_pseudohdr ip4;
1173#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1174 struct tcp6_pseudohdr ip6;
1175#endif
1176};
1177
1178/* - pool: digest algorithm, hash description and scratch buffer */
1179struct tcp_md5sig_pool {
1180 struct hash_desc md5_desc;
1181 union tcp_md5sum_block md5_blk;
1182};
1183
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001184/* - functions */
Changli Gao53d31762010-07-10 20:41:06 +00001185extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1186 struct sock *sk, struct request_sock *req,
1187 struct sk_buff *skb);
1188extern struct tcp_md5sig_key * tcp_v4_md5_lookup(struct sock *sk,
1189 struct sock *addr_sk);
1190extern int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey,
1191 u8 newkeylen);
1192extern int tcp_v4_md5_do_del(struct sock *sk, __be32 addr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001193
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001194#ifdef CONFIG_TCP_MD5SIG
1195#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \
1196 &(struct tcp_md5sig_key) { \
1197 .key = (twsk)->tw_md5_key, \
1198 .keylen = (twsk)->tw_md5_keylen, \
1199 } : NULL)
1200#else
1201#define tcp_twsk_md5_key(twsk) NULL
1202#endif
1203
Eric Dumazet765cf992011-09-12 20:28:37 +00001204extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *);
Changli Gao53d31762010-07-10 20:41:06 +00001205extern void tcp_free_md5sig_pool(void);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001206
Eric Dumazet35790c02010-05-16 00:34:04 -07001207extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
Changli Gao53d31762010-07-10 20:41:06 +00001208extern void tcp_put_md5sig_pool(void);
Eric Dumazet35790c02010-05-16 00:34:04 -07001209
Adam Langley49a72df2008-07-19 00:01:42 -07001210extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
1211extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
1212 unsigned header_len);
1213extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1214 struct tcp_md5sig_key *key);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001215
David S. Millerfe067e82007-03-07 12:12:44 -08001216/* write queue abstraction */
1217static inline void tcp_write_queue_purge(struct sock *sk)
1218{
1219 struct sk_buff *skb;
1220
1221 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001222 sk_wmem_free_skb(sk, skb);
1223 sk_mem_reclaim(sk);
Ilpo Järvinen8818a9d2009-12-02 22:24:02 -08001224 tcp_clear_all_retrans_hints(tcp_sk(sk));
David S. Millerfe067e82007-03-07 12:12:44 -08001225}
1226
1227static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
1228{
David S. Millercd07a8e2008-09-23 00:50:13 -07001229 return skb_peek(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001230}
1231
1232static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
1233{
David S. Millercd07a8e2008-09-23 00:50:13 -07001234 return skb_peek_tail(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001235}
1236
1237static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
1238{
David S. Millercd07a8e2008-09-23 00:50:13 -07001239 return skb_queue_next(&sk->sk_write_queue, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001240}
1241
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08001242static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
1243{
1244 return skb_queue_prev(&sk->sk_write_queue, skb);
1245}
1246
David S. Millerfe067e82007-03-07 12:12:44 -08001247#define tcp_for_write_queue(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001248 skb_queue_walk(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001249
1250#define tcp_for_write_queue_from(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001251 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001252
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001253#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001254 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001255
David S. Millerfe067e82007-03-07 12:12:44 -08001256static inline struct sk_buff *tcp_send_head(struct sock *sk)
1257{
1258 return sk->sk_send_head;
1259}
1260
David S. Millercd07a8e2008-09-23 00:50:13 -07001261static inline bool tcp_skb_is_last(const struct sock *sk,
1262 const struct sk_buff *skb)
1263{
1264 return skb_queue_is_last(&sk->sk_write_queue, skb);
1265}
1266
David S. Millerfe067e82007-03-07 12:12:44 -08001267static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
1268{
David S. Millercd07a8e2008-09-23 00:50:13 -07001269 if (tcp_skb_is_last(sk, skb))
David S. Millerfe067e82007-03-07 12:12:44 -08001270 sk->sk_send_head = NULL;
David S. Millercd07a8e2008-09-23 00:50:13 -07001271 else
1272 sk->sk_send_head = tcp_write_queue_next(sk, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001273}
1274
1275static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1276{
1277 if (sk->sk_send_head == skb_unlinked)
1278 sk->sk_send_head = NULL;
1279}
1280
1281static inline void tcp_init_send_head(struct sock *sk)
1282{
1283 sk->sk_send_head = NULL;
1284}
1285
1286static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1287{
1288 __skb_queue_tail(&sk->sk_write_queue, skb);
1289}
1290
1291static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1292{
1293 __tcp_add_write_queue_tail(sk, skb);
1294
1295 /* Queue it, remembering where we must start sending. */
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001296 if (sk->sk_send_head == NULL) {
David S. Millerfe067e82007-03-07 12:12:44 -08001297 sk->sk_send_head = skb;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001298
1299 if (tcp_sk(sk)->highest_sack == NULL)
1300 tcp_sk(sk)->highest_sack = skb;
1301 }
David S. Millerfe067e82007-03-07 12:12:44 -08001302}
1303
1304static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1305{
1306 __skb_queue_head(&sk->sk_write_queue, skb);
1307}
1308
1309/* Insert buff after skb on the write queue of sk. */
1310static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1311 struct sk_buff *buff,
1312 struct sock *sk)
1313{
Gerrit Renker7de6c032008-04-14 00:05:09 -07001314 __skb_queue_after(&sk->sk_write_queue, skb, buff);
David S. Millerfe067e82007-03-07 12:12:44 -08001315}
1316
David S. Miller43f59c82008-09-21 21:28:51 -07001317/* Insert new before skb on the write queue of sk. */
David S. Millerfe067e82007-03-07 12:12:44 -08001318static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1319 struct sk_buff *skb,
1320 struct sock *sk)
1321{
David S. Miller43f59c82008-09-21 21:28:51 -07001322 __skb_queue_before(&sk->sk_write_queue, skb, new);
Ilpo Järvinen6e421412007-11-19 23:24:09 -08001323
1324 if (sk->sk_send_head == skb)
1325 sk->sk_send_head = new;
David S. Millerfe067e82007-03-07 12:12:44 -08001326}
1327
1328static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1329{
1330 __skb_unlink(skb, &sk->sk_write_queue);
1331}
1332
David S. Millerfe067e82007-03-07 12:12:44 -08001333static inline int tcp_write_queue_empty(struct sock *sk)
1334{
1335 return skb_queue_empty(&sk->sk_write_queue);
1336}
1337
Krishna Kumar12d50c42009-12-08 22:26:13 +00001338static inline void tcp_push_pending_frames(struct sock *sk)
1339{
1340 if (tcp_send_head(sk)) {
1341 struct tcp_sock *tp = tcp_sk(sk);
1342
1343 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1344 }
1345}
1346
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001347/* Start sequence of the highest skb with SACKed bit, valid only if
1348 * sacked > 0 or when the caller has ensured validity by itself.
1349 */
1350static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1351{
1352 if (!tp->sacked_out)
1353 return tp->snd_una;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001354
1355 if (tp->highest_sack == NULL)
1356 return tp->snd_nxt;
1357
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001358 return TCP_SKB_CB(tp->highest_sack)->seq;
1359}
1360
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001361static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1362{
1363 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1364 tcp_write_queue_next(sk, skb);
1365}
1366
1367static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1368{
1369 return tcp_sk(sk)->highest_sack;
1370}
1371
1372static inline void tcp_highest_sack_reset(struct sock *sk)
1373{
1374 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1375}
1376
1377/* Called when old skb is about to be deleted (to be combined with new skb) */
1378static inline void tcp_highest_sack_combine(struct sock *sk,
1379 struct sk_buff *old,
1380 struct sk_buff *new)
1381{
1382 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1383 tcp_sk(sk)->highest_sack = new;
1384}
1385
Andreas Petlund5aa4b322010-02-18 02:45:45 +00001386/* Determines whether this is a thin stream (which may suffer from
1387 * increased latency). Used to trigger latency-reducing mechanisms.
1388 */
1389static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
1390{
1391 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1392}
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394/* /proc */
1395enum tcp_seq_states {
1396 TCP_SEQ_STATE_LISTENING,
1397 TCP_SEQ_STATE_OPENREQ,
1398 TCP_SEQ_STATE_ESTABLISHED,
1399 TCP_SEQ_STATE_TIME_WAIT,
1400};
1401
1402struct tcp_seq_afinfo {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 char *name;
1404 sa_family_t family;
Denis V. Lunev68fcadd2008-04-13 22:13:30 -07001405 struct file_operations seq_fops;
Denis V. Lunev9427c4b2008-04-13 22:12:13 -07001406 struct seq_operations seq_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407};
1408
1409struct tcp_iter_state {
Denis V. Luneva4146b12008-04-13 22:11:14 -07001410 struct seq_net_private p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 sa_family_t family;
1412 enum tcp_seq_states state;
1413 struct sock *syn_wait_sk;
Tom Herberta8b690f2010-06-07 00:43:42 -07001414 int bucket, offset, sbucket, num, uid;
1415 loff_t last_pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416};
1417
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001418extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1419extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001421extern struct request_sock_ops tcp_request_sock_ops;
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001422extern struct request_sock_ops tcp6_request_sock_ops;
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001423
Brian Haley7d06b2e2008-06-14 17:04:49 -07001424extern void tcp_v4_destroy_sock(struct sock *sk);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001425
Herbert Xua430a432006-07-08 13:34:56 -07001426extern int tcp_v4_gso_send_check(struct sk_buff *skb);
Michał Mirosław04ed3e72011-01-24 15:32:47 -08001427extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
Herbert Xubf296b12008-12-15 23:43:36 -08001428extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1429 struct sk_buff *skb);
1430extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1431 struct sk_buff *skb);
1432extern int tcp_gro_complete(struct sk_buff *skb);
1433extern int tcp4_gro_complete(struct sk_buff *skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07001434
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001435#ifdef CONFIG_PROC_FS
Changli Gao53d31762010-07-10 20:41:06 +00001436extern int tcp4_proc_init(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001437extern void tcp4_proc_exit(void);
1438#endif
1439
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001440/* TCP af-specific functions */
1441struct tcp_sock_af_ops {
1442#ifdef CONFIG_TCP_MD5SIG
1443 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1444 struct sock *addr_sk);
1445 int (*calc_md5_hash) (char *location,
1446 struct tcp_md5sig_key *md5,
1447 struct sock *sk,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001448 struct request_sock *req,
Adam Langley49a72df2008-07-19 00:01:42 -07001449 struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001450 int (*md5_add) (struct sock *sk,
1451 struct sock *addr_sk,
1452 u8 *newkey,
1453 u8 len);
1454 int (*md5_parse) (struct sock *sk,
1455 char __user *optval,
1456 int optlen);
1457#endif
1458};
1459
1460struct tcp_request_sock_ops {
1461#ifdef CONFIG_TCP_MD5SIG
1462 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1463 struct request_sock *req);
John Dykstrae3afe7b2009-07-16 05:04:51 +00001464 int (*calc_md5_hash) (char *location,
1465 struct tcp_md5sig_key *md5,
1466 struct sock *sk,
1467 struct request_sock *req,
1468 struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001469#endif
1470};
1471
William Allen Simpsonda5c78c2009-12-02 18:12:09 +00001472/* Using SHA1 for now, define some constants.
1473 */
1474#define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS)
1475#define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4)
1476#define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS)
1477
1478extern int tcp_cookie_generator(u32 *bakery);
1479
William Allen Simpson435cf552009-12-02 18:17:05 +00001480/**
1481 * struct tcp_cookie_values - each socket needs extra space for the
1482 * cookies, together with (optional) space for any SYN data.
1483 *
1484 * A tcp_sock contains a pointer to the current value, and this is
1485 * cloned to the tcp_timewait_sock.
1486 *
1487 * @cookie_pair: variable data from the option exchange.
1488 *
1489 * @cookie_desired: user specified tcpct_cookie_desired. Zero
1490 * indicates default (sysctl_tcp_cookie_size).
1491 * After cookie sent, remembers size of cookie.
1492 * Range 0, TCP_COOKIE_MIN to TCP_COOKIE_MAX.
1493 *
1494 * @s_data_desired: user specified tcpct_s_data_desired. When the
1495 * constant payload is specified (@s_data_constant),
1496 * holds its length instead.
1497 * Range 0 to TCP_MSS_DESIRED.
1498 *
1499 * @s_data_payload: constant data that is to be included in the
1500 * payload of SYN or SYNACK segments when the
1501 * cookie option is present.
1502 */
1503struct tcp_cookie_values {
1504 struct kref kref;
1505 u8 cookie_pair[TCP_COOKIE_PAIR_SIZE];
1506 u8 cookie_pair_size;
1507 u8 cookie_desired;
1508 u16 s_data_desired:11,
1509 s_data_constant:1,
1510 s_data_in:1,
1511 s_data_out:1,
1512 s_data_unused:2;
1513 u8 s_data_payload[0];
1514};
1515
1516static inline void tcp_cookie_values_release(struct kref *kref)
1517{
1518 kfree(container_of(kref, struct tcp_cookie_values, kref));
1519}
1520
1521/* The length of constant payload data. Note that s_data_desired is
1522 * overloaded, depending on s_data_constant: either the length of constant
1523 * data (returned here) or the limit on variable data.
1524 */
1525static inline int tcp_s_data_size(const struct tcp_sock *tp)
1526{
1527 return (tp->cookie_values != NULL && tp->cookie_values->s_data_constant)
1528 ? tp->cookie_values->s_data_desired
1529 : 0;
1530}
1531
1532/**
1533 * struct tcp_extend_values - tcp_ipv?.c to tcp_output.c workspace.
1534 *
1535 * As tcp_request_sock has already been extended in other places, the
1536 * only remaining method is to pass stack values along as function
1537 * parameters. These parameters are not needed after sending SYNACK.
1538 *
1539 * @cookie_bakery: cryptographic secret and message workspace.
1540 *
1541 * @cookie_plus: bytes in authenticator/cookie option, copied from
1542 * struct tcp_options_received (above).
1543 */
1544struct tcp_extend_values {
1545 struct request_values rv;
1546 u32 cookie_bakery[COOKIE_WORKSPACE_WORDS];
1547 u8 cookie_plus:6,
1548 cookie_out_never:1,
1549 cookie_in_always:1;
1550};
1551
1552static inline struct tcp_extend_values *tcp_xv(struct request_values *rvp)
1553{
1554 return (struct tcp_extend_values *)rvp;
1555}
1556
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08001557extern void tcp_v4_init(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001558extern void tcp_init(void);
1559
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560#endif /* _TCP_H */