blob: e36c874c7fb16c47138428d4ecc106800c5fe48f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
21#define TCP_DEBUG 1
22#define FASTRETRANS_DEBUG 1
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/list.h>
25#include <linux/tcp.h>
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
Herbert Xufb286bb2005-11-10 13:01:24 -080029#include <linux/skbuff.h>
Chris Leech97fc2f02006-05-23 17:55:33 -070030#include <linux/dmaengine.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080031#include <linux/crypto.h>
Glenn Griffinc6aefaf2008-02-07 21:49:26 -080032#include <linux/cryptohash.h>
William Allen Simpson435cf552009-12-02 18:17:05 +000033#include <linux/kref.h>
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070034
35#include <net/inet_connection_sock.h>
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -070036#include <net/inet_timewait_sock.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070037#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <net/checksum.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070039#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <net/sock.h>
41#include <net/snmp.h>
42#include <net/ip.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070043#include <net/tcp_states.h>
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -070044#include <net/inet_ecn.h>
Satoru SATOH0c266892009-05-04 11:11:01 -070045#include <net/dst.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/seq_file.h>
48
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -070049extern struct inet_hashinfo tcp_hashinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Eric Dumazetdd24c002008-11-25 21:17:14 -080051extern struct percpu_counter tcp_orphan_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052extern void tcp_time_wait(struct sock *sk, int state, int timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#define MAX_TCP_HEADER (128 + MAX_HEADER)
Adam Langley33ad7982008-07-19 00:04:31 -070055#define MAX_TCP_OPTION_SPACE 40
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57/*
58 * Never offer a window over 32767 without using window scaling. Some
59 * poor stacks do signed 16bit maths!
60 */
61#define MAX_TCP_WINDOW 32767U
62
63/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
64#define TCP_MIN_MSS 88U
65
John Heffner5d424d52006-03-20 17:53:41 -080066/* The least MTU to use for probing */
67#define TCP_BASE_MSS 512
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/* After receiving this amount of duplicate ACKs fast retransmit starts. */
70#define TCP_FASTRETRANS_THRESH 3
71
72/* Maximal reordering. */
73#define TCP_MAX_REORDERING 127
74
75/* Maximal number of ACKs sent quickly to accelerate slow-start. */
76#define TCP_MAX_QUICKACKS 16U
77
78/* urg_data states */
79#define TCP_URG_VALID 0x0100
80#define TCP_URG_NOTYET 0x0200
81#define TCP_URG_READ 0x0400
82
83#define TCP_RETR1 3 /*
84 * This is how many retries it does before it
85 * tries to figure out if the gateway is
86 * down. Minimal RFC value is 3; it corresponds
87 * to ~3sec-8min depending on RTO.
88 */
89
90#define TCP_RETR2 15 /*
91 * This should take at least
92 * 90 minutes to time out.
93 * RFC1122 says that the limit is 100 sec.
94 * 15 is ~13-30min depending on RTO.
95 */
96
97#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -080098 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
100#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800101 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103
104#define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
105 * socket. 7 is ~50sec-16min.
106 */
107
108
109#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
110 * state, about 60 seconds */
111#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
112 /* BSD style FIN_WAIT2 deadlock breaker.
113 * It used to be 3min, new value is 60sec,
114 * to combine FIN-WAIT-2 timeout with
115 * TIME-WAIT timer.
116 */
117
118#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
119#if HZ >= 100
120#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
121#define TCP_ATO_MIN ((unsigned)(HZ/25))
122#else
123#define TCP_DELACK_MIN 4U
124#define TCP_ATO_MIN 4U
125#endif
126#define TCP_RTO_MAX ((unsigned)(120*HZ))
127#define TCP_RTO_MIN ((unsigned)(HZ/5))
128#define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
129
130#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
131 * for local resources.
132 */
133
134#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
135#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
136#define TCP_KEEPALIVE_INTVL (75*HZ)
137
138#define MAX_TCP_KEEPIDLE 32767
139#define MAX_TCP_KEEPINTVL 32767
140#define MAX_TCP_KEEPCNT 127
141#define MAX_TCP_SYNCNT 127
142
143#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
146#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
147 * after this time. It should be equal
148 * (or greater than) TCP_TIMEWAIT_LEN
149 * to provide reliability equal to one
150 * provided by timewait state.
151 */
152#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
153 * timestamps. It must be less than
154 * minimal timewait lifetime.
155 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156/*
157 * TCP option
158 */
159
160#define TCPOPT_NOP 1 /* Padding */
161#define TCPOPT_EOL 0 /* End of options */
162#define TCPOPT_MSS 2 /* Segment size negotiating */
163#define TCPOPT_WINDOW 3 /* Window scaling */
164#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
165#define TCPOPT_SACK 5 /* SACK Block */
166#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800167#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
William Allen Simpson435cf552009-12-02 18:17:05 +0000168#define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
170/*
171 * TCP option lengths
172 */
173
174#define TCPOLEN_MSS 4
175#define TCPOLEN_WINDOW 3
176#define TCPOLEN_SACK_PERM 2
177#define TCPOLEN_TIMESTAMP 10
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800178#define TCPOLEN_MD5SIG 18
William Allen Simpson435cf552009-12-02 18:17:05 +0000179#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
180#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
181#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
182#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
184/* But this is what stacks really send out. */
185#define TCPOLEN_TSTAMP_ALIGNED 12
186#define TCPOLEN_WSCALE_ALIGNED 4
187#define TCPOLEN_SACKPERM_ALIGNED 4
188#define TCPOLEN_SACK_BASE 2
189#define TCPOLEN_SACK_BASE_ALIGNED 4
190#define TCPOLEN_SACK_PERBLOCK 8
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800191#define TCPOLEN_MD5SIG_ALIGNED 20
Adam Langley33ad7982008-07-19 00:04:31 -0700192#define TCPOLEN_MSS_ALIGNED 4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194/* Flags in tp->nonagle */
195#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
196#define TCP_NAGLE_CORK 2 /* Socket is corked */
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800197#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Andreas Petlund36e31b02010-02-18 02:47:01 +0000199/* TCP thin-stream limits */
200#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
201
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700202extern struct inet_timewait_death_row tcp_death_row;
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204/* sysctl variables for tcp */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205extern int sysctl_tcp_timestamps;
206extern int sysctl_tcp_window_scaling;
207extern int sysctl_tcp_sack;
208extern int sysctl_tcp_fin_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209extern int sysctl_tcp_keepalive_time;
210extern int sysctl_tcp_keepalive_probes;
211extern int sysctl_tcp_keepalive_intvl;
212extern int sysctl_tcp_syn_retries;
213extern int sysctl_tcp_synack_retries;
214extern int sysctl_tcp_retries1;
215extern int sysctl_tcp_retries2;
216extern int sysctl_tcp_orphan_retries;
217extern int sysctl_tcp_syncookies;
218extern int sysctl_tcp_retrans_collapse;
219extern int sysctl_tcp_stdurg;
220extern int sysctl_tcp_rfc1337;
221extern int sysctl_tcp_abort_on_overflow;
222extern int sysctl_tcp_max_orphans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223extern int sysctl_tcp_fack;
224extern int sysctl_tcp_reordering;
225extern int sysctl_tcp_ecn;
226extern int sysctl_tcp_dsack;
Eric Dumazet8d987e52010-11-09 23:24:26 +0000227extern long sysctl_tcp_mem[3];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228extern int sysctl_tcp_wmem[3];
229extern int sysctl_tcp_rmem[3];
230extern int sysctl_tcp_app_win;
231extern int sysctl_tcp_adv_win_scale;
232extern int sysctl_tcp_tw_reuse;
233extern int sysctl_tcp_frto;
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800234extern int sysctl_tcp_frto_response;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235extern int sysctl_tcp_low_latency;
Chris Leech95937822006-05-23 18:02:55 -0700236extern int sysctl_tcp_dma_copybreak;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237extern int sysctl_tcp_nometrics_save;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238extern int sysctl_tcp_moderate_rcvbuf;
239extern int sysctl_tcp_tso_win_divisor;
Stephen Hemminger9772efb2005-11-10 17:09:53 -0800240extern int sysctl_tcp_abc;
John Heffner5d424d52006-03-20 17:53:41 -0800241extern int sysctl_tcp_mtu_probing;
242extern int sysctl_tcp_base_mss;
Rick Jones15d99e02006-03-20 22:40:29 -0800243extern int sysctl_tcp_workaround_signed_windows;
David S. Miller35089bb2006-06-13 22:33:04 -0700244extern int sysctl_tcp_slow_start_after_idle;
John Heffner886236c2007-03-25 19:21:45 -0700245extern int sysctl_tcp_max_ssthresh;
William Allen Simpson519855c2009-12-02 18:14:19 +0000246extern int sysctl_tcp_cookie_size;
Andreas Petlund36e31b02010-02-18 02:47:01 +0000247extern int sysctl_tcp_thin_linear_timeouts;
Andreas Petlund7e380172010-02-18 04:48:19 +0000248extern int sysctl_tcp_thin_dupack;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Eric Dumazet8d987e52010-11-09 23:24:26 +0000250extern atomic_long_t tcp_memory_allocated;
Eric Dumazet17483762008-11-25 21:16:35 -0800251extern struct percpu_counter tcp_sockets_allocated;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252extern int tcp_memory_pressure;
253
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 * The next routines deal with comparing 32 bit unsigned ints
256 * and worry about wraparound (automatic with unsigned arithmetic).
257 */
258
259static inline int before(__u32 seq1, __u32 seq2)
260{
Gerrit Renker0d630cc2007-01-04 12:25:16 -0800261 return (__s32)(seq1-seq2) < 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262}
Gerrit Renker9a036b92006-12-20 10:25:55 -0800263#define after(seq2, seq1) before(seq1, seq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
265/* is s2<=s1<=s3 ? */
266static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
267{
268 return seq3 - seq2 >= seq1 - seq2;
269}
270
David S. Millerad1af0f2010-08-25 02:27:49 -0700271static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700272{
David S. Millerad1af0f2010-08-25 02:27:49 -0700273 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
274 int orphans = percpu_counter_read_positive(ocp);
275
276 if (orphans << shift > sysctl_tcp_max_orphans) {
277 orphans = percpu_counter_sum_positive(ocp);
278 if (orphans << shift > sysctl_tcp_max_orphans)
279 return true;
280 }
281
282 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
Eric Dumazet8d987e52010-11-09 23:24:26 +0000283 atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
David S. Millerad1af0f2010-08-25 02:27:49 -0700284 return true;
285 return false;
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700286}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
Florian Westphala0f82f62009-04-19 09:43:48 +0000288/* syncookies: remember time of last synqueue overflow */
289static inline void tcp_synq_overflow(struct sock *sk)
290{
291 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
292}
293
294/* syncookies: no recent synqueue overflow on this listening socket? */
295static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
296{
297 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
298 return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT);
299}
300
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301extern struct proto tcp_prot;
302
Pavel Emelyanov57ef42d2008-07-18 04:02:08 -0700303#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
304#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
305#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
306#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
Tom Herbertaa2ea052010-04-22 07:00:24 +0000307#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Changli Gao53d31762010-07-10 20:41:06 +0000309extern void tcp_v4_err(struct sk_buff *skb, u32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
Changli Gao53d31762010-07-10 20:41:06 +0000311extern void tcp_shutdown (struct sock *sk, int how);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Changli Gao53d31762010-07-10 20:41:06 +0000313extern int tcp_v4_rcv(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
Changli Gao53d31762010-07-10 20:41:06 +0000315extern int tcp_v4_remember_stamp(struct sock *sk);
316extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
Changli Gao7ba42912010-07-10 20:41:55 +0000317extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
318 size_t size);
319extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
320 size_t size, int flags);
Changli Gao53d31762010-07-10 20:41:06 +0000321extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
322extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
323 struct tcphdr *th, unsigned len);
324extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
325 struct tcphdr *th, unsigned len);
326extern void tcp_rcv_space_adjust(struct sock *sk);
327extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
328extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
329extern void tcp_twsk_destructor(struct sock *sk);
330extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
331 struct pipe_inode_info *pipe, size_t len,
332 unsigned int flags);
Jens Axboe9c55e012007-11-06 23:30:13 -0800333
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700334static inline void tcp_dec_quickack_mode(struct sock *sk,
335 const unsigned int pkts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700337 struct inet_connection_sock *icsk = inet_csk(sk);
David S. Millerfc6415b2005-07-05 15:17:45 -0700338
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700339 if (icsk->icsk_ack.quick) {
340 if (pkts >= icsk->icsk_ack.quick) {
341 icsk->icsk_ack.quick = 0;
David S. Millerfc6415b2005-07-05 15:17:45 -0700342 /* Leaving quickack mode we deflate ATO. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700343 icsk->icsk_ack.ato = TCP_ATO_MIN;
David S. Millerfc6415b2005-07-05 15:17:45 -0700344 } else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700345 icsk->icsk_ack.quick -= pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 }
347}
348
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700349#define TCP_ECN_OK 1
350#define TCP_ECN_QUEUE_CWR 2
351#define TCP_ECN_DEMAND_CWR 4
352
353static __inline__ void
354TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
355{
356 if (sysctl_tcp_ecn && th->ece && th->cwr)
357 inet_rsk(req)->ecn_ok = 1;
358}
359
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000360enum tcp_tw_status {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 TCP_TW_SUCCESS = 0,
362 TCP_TW_RST = 1,
363 TCP_TW_ACK = 2,
364 TCP_TW_SYN = 3
365};
366
367
Changli Gao53d31762010-07-10 20:41:06 +0000368extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
369 struct sk_buff *skb,
370 const struct tcphdr *th);
371extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
372 struct request_sock *req,
373 struct request_sock **prev);
374extern int tcp_child_process(struct sock *parent, struct sock *child,
375 struct sk_buff *skb);
376extern int tcp_use_frto(struct sock *sk);
377extern void tcp_enter_frto(struct sock *sk);
378extern void tcp_enter_loss(struct sock *sk, int how);
379extern void tcp_clear_retrans(struct tcp_sock *tp);
380extern void tcp_update_metrics(struct sock *sk);
381extern void tcp_close(struct sock *sk, long timeout);
382extern unsigned int tcp_poll(struct file * file, struct socket *sock,
383 struct poll_table_struct *wait);
384extern int tcp_getsockopt(struct sock *sk, int level, int optname,
385 char __user *optval, int __user *optlen);
386extern int tcp_setsockopt(struct sock *sk, int level, int optname,
387 char __user *optval, unsigned int optlen);
388extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
389 char __user *optval, int __user *optlen);
390extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
391 char __user *optval, unsigned int optlen);
392extern void tcp_set_keepalive(struct sock *sk, int val);
393extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
394extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
395 size_t len, int nonblock, int flags, int *addr_len);
396extern void tcp_parse_options(struct sk_buff *skb,
397 struct tcp_options_received *opt_rx, u8 **hvpp,
398 int estab);
399extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401/*
402 * TCP v4 functions exported for the inet6 API
403 */
404
Changli Gao53d31762010-07-10 20:41:06 +0000405extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
406extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
407extern struct sock * tcp_create_openreq_child(struct sock *sk,
408 struct request_sock *req,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 struct sk_buff *skb);
Changli Gao53d31762010-07-10 20:41:06 +0000410extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
411 struct request_sock *req,
412 struct dst_entry *dst);
413extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
414extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
415 int addr_len);
416extern int tcp_connect(struct sock *sk);
417extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
418 struct request_sock *req,
419 struct request_values *rvp);
420extern int tcp_disconnect(struct sock *sk, int flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423/* From syncookies.c */
Florian Westphal2051f112008-03-23 22:21:28 -0700424extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
426 struct ip_options *opt);
427extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
428 __u16 *mss);
429
Florian Westphal4dfc2812008-04-10 03:12:40 -0700430extern __u32 cookie_init_timestamp(struct request_sock *req);
Florian Westphal172d69e2010-06-21 11:48:45 +0000431extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
Florian Westphal4dfc2812008-04-10 03:12:40 -0700432
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800433/* From net/ipv6/syncookies.c */
434extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
435extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
436 __u16 *mss);
437
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438/* tcp_output.c */
439
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700440extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
441 int nonagle);
442extern int tcp_may_send_now(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000444extern void tcp_retransmit_timer(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445extern void tcp_xmit_retransmit_queue(struct sock *);
446extern void tcp_simple_retransmit(struct sock *);
447extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
David S. Miller6475be12005-09-01 22:47:01 -0700448extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
450extern void tcp_send_probe0(struct sock *);
451extern void tcp_send_partial(struct sock *);
Changli Gao53d31762010-07-10 20:41:06 +0000452extern int tcp_write_wakeup(struct sock *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453extern void tcp_send_fin(struct sock *sk);
Al Virodd0fc662005-10-07 07:46:04 +0100454extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
Changli Gao53d31762010-07-10 20:41:06 +0000455extern int tcp_send_synack(struct sock *);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700456extern void tcp_push_one(struct sock *, unsigned int mss_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457extern void tcp_send_ack(struct sock *sk);
458extern void tcp_send_delayed_ack(struct sock *sk);
459
David S. Millera762a982005-07-05 15:18:51 -0700460/* tcp_input.c */
461extern void tcp_cwnd_application_limited(struct sock *sk);
462
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463/* tcp_timer.c */
464extern void tcp_init_xmit_timers(struct sock *);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700465static inline void tcp_clear_xmit_timers(struct sock *sk)
466{
467 inet_csk_clear_xmit_timers(sk);
468}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000471extern unsigned int tcp_current_mss(struct sock *sk);
472
473/* Bound MSS / TSO packet size with the half of the window */
474static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
475{
Alexey Kuznetsov01f83d62010-09-15 10:27:52 -0700476 int cutoff;
477
478 /* When peer uses tiny windows, there is no use in packetizing
479 * to sub-MSS pieces for the sake of SWS or making sure there
480 * are enough packets in the pipe for fast recovery.
481 *
482 * On the other hand, for extremely large MSS devices, handling
483 * smaller than MSS windows in this way does make sense.
484 */
485 if (tp->max_window >= 512)
486 cutoff = (tp->max_window >> 1);
487 else
488 cutoff = tp->max_window;
489
490 if (cutoff && pktsize > cutoff)
491 return max_t(int, cutoff, 68U - tp->tcp_header_len);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000492 else
493 return pktsize;
494}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
Arnaldo Carvalho de Melo17b085e2005-08-12 12:59:17 -0300496/* tcp.c */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497extern void tcp_get_info(struct sock *, struct tcp_info *);
498
499/* Read 'sendfile()'-style from a TCP socket */
500typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
501 unsigned int, size_t);
502extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
503 sk_read_actor_t recv_actor);
504
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800505extern void tcp_initialize_rcv_mss(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
John Heffner5d424d52006-03-20 17:53:41 -0800507extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
508extern int tcp_mss_to_mtu(struct sock *sk, int mss);
509extern void tcp_mtup_init(struct sock *sk);
510
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000511static inline void tcp_bound_rto(const struct sock *sk)
512{
513 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
514 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
515}
516
517static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
518{
519 return (tp->srtt >> 3) + tp->rttvar;
520}
521
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800522static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523{
524 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
525 ntohl(TCP_FLAG_ACK) |
526 snd_wnd);
527}
528
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800529static inline void tcp_fast_path_on(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
531 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
532}
533
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700534static inline void tcp_fast_path_check(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700536 struct tcp_sock *tp = tcp_sk(sk);
537
David S. Millerb03efcf2005-07-08 14:57:23 -0700538 if (skb_queue_empty(&tp->out_of_order_queue) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 tp->rcv_wnd &&
540 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
541 !tp->urg_data)
542 tcp_fast_path_on(tp);
543}
544
Satoru SATOH0c266892009-05-04 11:11:01 -0700545/* Compute the actual rto_min value */
546static inline u32 tcp_rto_min(struct sock *sk)
547{
548 struct dst_entry *dst = __sk_dst_get(sk);
549 u32 rto_min = TCP_RTO_MIN;
550
551 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
552 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
553 return rto_min;
554}
555
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556/* Compute the actual receive window we are currently advertising.
557 * Rcv_nxt can be after the window if our peer push more data
558 * than the offered window.
559 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800560static inline u32 tcp_receive_window(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561{
562 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
563
564 if (win < 0)
565 win = 0;
566 return (u32) win;
567}
568
569/* Choose a new window, without checks for shrinking, and without
570 * scaling applied to the result. The caller does these things
571 * if necessary. This is a "raw" window selection.
572 */
Changli Gao53d31762010-07-10 20:41:06 +0000573extern u32 __tcp_select_window(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
575/* TCP timestamps are only 32-bits, this causes a slight
576 * complication on 64-bit systems since we store a snapshot
Stephen Hemminger31f34262005-11-15 15:17:10 -0800577 * of jiffies in the buffer control blocks below. We decided
578 * to use only the low 32-bits of jiffies and hide the ugly
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 * casts with the following macro.
580 */
581#define tcp_time_stamp ((__u32)(jiffies))
582
Changli Gaoa3433f32010-06-12 14:01:43 +0000583#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
584
585#define TCPHDR_FIN 0x01
586#define TCPHDR_SYN 0x02
587#define TCPHDR_RST 0x04
588#define TCPHDR_PSH 0x08
589#define TCPHDR_ACK 0x10
590#define TCPHDR_URG 0x20
591#define TCPHDR_ECE 0x40
592#define TCPHDR_CWR 0x80
593
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800594/* This is what the send packet queuing engine uses to pass
Eric Dumazetf86586f2010-07-15 21:41:00 -0700595 * TCP per-packet control information to the transmission code.
596 * We also store the host-order sequence numbers in here too.
597 * This is 44 bytes if IPV6 is enabled.
598 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 */
600struct tcp_skb_cb {
601 union {
602 struct inet_skb_parm h4;
603#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
604 struct inet6_skb_parm h6;
605#endif
606 } header; /* For incoming frames */
607 __u32 seq; /* Starting sequence number */
608 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
609 __u32 when; /* used to compute rtt's */
610 __u8 flags; /* TCP header flags. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 __u8 sacked; /* State flags for SACK/FACK. */
612#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
613#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
614#define TCPCB_LOST 0x04 /* SKB is lost */
615#define TCPCB_TAGBITS 0x07 /* All tag bits */
616
617#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
618#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
619
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 __u32 ack_seq; /* Sequence number ACK'd */
621};
622
623#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
624
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625/* Due to TSO, an SKB can be composed of multiple actual
626 * packets. To keep these tracked properly, we use this.
627 */
628static inline int tcp_skb_pcount(const struct sk_buff *skb)
629{
Herbert Xu79671682006-06-22 02:40:14 -0700630 return skb_shinfo(skb)->gso_segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631}
632
633/* This is valid iff tcp_skb_pcount() > 1. */
634static inline int tcp_skb_mss(const struct sk_buff *skb)
635{
Herbert Xu79671682006-06-22 02:40:14 -0700636 return skb_shinfo(skb)->gso_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637}
638
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700639/* Events passed to congestion control interface */
640enum tcp_ca_event {
641 CA_EVENT_TX_START, /* first transmit when no packets in flight */
642 CA_EVENT_CWND_RESTART, /* congestion window restart */
643 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
644 CA_EVENT_FRTO, /* fast recovery timeout */
645 CA_EVENT_LOSS, /* loss timeout */
646 CA_EVENT_FAST_ACK, /* in sequence ack */
647 CA_EVENT_SLOW_ACK, /* other ack */
648};
649
650/*
651 * Interface for adding new TCP congestion control handlers
652 */
653#define TCP_CA_NAME_MAX 16
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800654#define TCP_CA_MAX 128
655#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
656
Stephen Hemminger164891a2007-04-23 22:26:16 -0700657#define TCP_CONG_NON_RESTRICTED 0x1
658#define TCP_CONG_RTT_STAMP 0x2
659
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700660struct tcp_congestion_ops {
661 struct list_head list;
Stephen Hemminger164891a2007-04-23 22:26:16 -0700662 unsigned long flags;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700663
664 /* initialize private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300665 void (*init)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700666 /* cleanup private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300667 void (*release)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700668
669 /* return slow start threshold (required) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300670 u32 (*ssthresh)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700671 /* lower bound for congestion window (optional) */
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700672 u32 (*min_cwnd)(const struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700673 /* do new cwnd calculation (required) */
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200674 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700675 /* call before changing ca_state (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300676 void (*set_state)(struct sock *sk, u8 new_state);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700677 /* call when cwnd event occurs (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300678 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700679 /* new value of cwnd after loss (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300680 u32 (*undo_cwnd)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700681 /* hook for packet ack accounting (optional) */
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -0700682 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
Arnaldo Carvalho de Melo73c1f4a2005-08-12 12:51:49 -0300683 /* get info for inet_diag (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300684 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700685
686 char name[TCP_CA_NAME_MAX];
687 struct module *owner;
688};
689
690extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
691extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
692
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300693extern void tcp_init_congestion_control(struct sock *sk);
694extern void tcp_cleanup_congestion_control(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700695extern int tcp_set_default_congestion_control(const char *name);
696extern void tcp_get_default_congestion_control(char *name);
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800697extern void tcp_get_available_congestion_control(char *buf, size_t len);
Stephen Hemmingerce7bc3b2006-11-09 16:35:15 -0800698extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
699extern int tcp_set_allowed_congestion_control(char *allowed);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300700extern int tcp_set_congestion_control(struct sock *sk, const char *name);
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800701extern void tcp_slow_start(struct tcp_sock *tp);
Ilpo Järvinen758ce5c2009-02-28 04:44:37 +0000702extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700703
Stephen Hemminger5f8ef482005-06-23 20:37:36 -0700704extern struct tcp_congestion_ops tcp_init_congestion_ops;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300705extern u32 tcp_reno_ssthresh(struct sock *sk);
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200706extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700707extern u32 tcp_reno_min_cwnd(const struct sock *sk);
David S. Millera8acfba2005-06-23 23:45:02 -0700708extern struct tcp_congestion_ops tcp_reno;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700709
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300710static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700711{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300712 struct inet_connection_sock *icsk = inet_csk(sk);
713
714 if (icsk->icsk_ca_ops->set_state)
715 icsk->icsk_ca_ops->set_state(sk, ca_state);
716 icsk->icsk_ca_state = ca_state;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700717}
718
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300719static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700720{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300721 const struct inet_connection_sock *icsk = inet_csk(sk);
722
723 if (icsk->icsk_ca_ops->cwnd_event)
724 icsk->icsk_ca_ops->cwnd_event(sk, event);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700725}
726
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300727/* These functions determine how the current flow behaves in respect of SACK
728 * handling. SACK is negotiated with the peer, and therefore it can vary
729 * between different flows.
730 *
731 * tcp_is_sack - SACK enabled
732 * tcp_is_reno - No SACK
733 * tcp_is_fack - FACK enabled, implies SACK enabled
734 */
735static inline int tcp_is_sack(const struct tcp_sock *tp)
736{
737 return tp->rx_opt.sack_ok;
738}
739
740static inline int tcp_is_reno(const struct tcp_sock *tp)
741{
742 return !tcp_is_sack(tp);
743}
744
745static inline int tcp_is_fack(const struct tcp_sock *tp)
746{
747 return tp->rx_opt.sack_ok & 2;
748}
749
750static inline void tcp_enable_fack(struct tcp_sock *tp)
751{
752 tp->rx_opt.sack_ok |= 2;
753}
754
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300755static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
756{
757 return tp->sacked_out + tp->lost_out;
758}
759
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760/* This determines how many packets are "in the network" to the best
761 * of our knowledge. In many cases it is conservative, but where
762 * detailed information is available from the receiver (via SACK
763 * blocks etc.) we can make more aggressive calculations.
764 *
765 * Use this for decisions involving congestion control, use just
766 * tp->packets_out to determine if the send queue is empty or not.
767 *
768 * Read this equation as:
769 *
770 * "Packets sent once on transmission queue" MINUS
771 * "Packets left network, but not honestly ACKed yet" PLUS
772 * "Packets fast retransmitted"
773 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800774static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300776 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
778
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -0700779#define TCP_INFINITE_SSTHRESH 0x7fffffff
780
781static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
782{
783 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
784}
785
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
787 * The exception is rate halving phase, when cwnd is decreasing towards
788 * ssthresh.
789 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300790static inline __u32 tcp_current_ssthresh(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300792 const struct tcp_sock *tp = tcp_sk(sk);
793 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 return tp->snd_ssthresh;
795 else
796 return max(tp->snd_ssthresh,
797 ((tp->snd_cwnd >> 1) +
798 (tp->snd_cwnd >> 2)));
799}
800
Ilpo Järvinenb9c45952007-07-27 16:36:17 +0300801/* Use define here intentionally to get WARN_ON location shown at the caller */
802#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
Gerrit Renker22b71c82010-08-29 19:23:12 +0000804/*
805 * Convert RFC 3390 larger initial window into an equivalent number of packets.
Gerrit Renker3d5b99a2010-08-29 19:27:34 +0000806 * This is based on the numbers specified in RFC 5681, 3.1.
Gerrit Renker22b71c82010-08-29 19:23:12 +0000807 */
808static inline u32 rfc3390_bytes_to_packets(const u32 smss)
809{
Gerrit Renker3d5b99a2010-08-29 19:27:34 +0000810 return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
Gerrit Renker22b71c82010-08-29 19:23:12 +0000811}
812
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800813extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
815
816/* Slow start with delack produces 3 packets of burst, so that
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700817 * it is safe "de facto". This will be the default - same as
818 * the default reordering threshold - but if reordering increases,
819 * we must be able to allow cwnd to burst at least this much in order
820 * to not pull it back when holes are filled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 */
822static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
823{
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700824 return tp->reordering;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825}
826
Ilpo Järvinen90840de2007-12-31 04:48:41 -0800827/* Returns end sequence number of the receiver's advertised window */
828static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
829{
830 return tp->snd_una + tp->snd_wnd;
831}
Ilpo Järvinencea14e02008-01-12 03:19:12 -0800832extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
Stephen Hemmingerf4805ed2005-11-10 16:53:30 -0800833
Chuck Leverc1bd24b2007-10-23 21:08:54 -0700834static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800835 const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836{
837 if (skb->len < mss)
838 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
839}
840
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700841static inline void tcp_check_probe_timer(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700843 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700844 const struct inet_connection_sock *icsk = inet_csk(sk);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700845
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700846 if (!tp->packets_out && !icsk->icsk_pending)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700847 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
848 icsk->icsk_rto, TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849}
850
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800851static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852{
853 tp->snd_wl1 = seq;
854}
855
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800856static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857{
858 tp->snd_wl1 = seq;
859}
860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861/*
862 * Calculate(/check) TCP checksum
863 */
Frederik Deweerdtba7808e2007-02-04 20:15:27 -0800864static inline __sum16 tcp_v4_check(int len, __be32 saddr,
865 __be32 daddr, __wsum base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866{
867 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
868}
869
Al Virob51655b2006-11-14 21:40:42 -0800870static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871{
Herbert Xufb286bb2005-11-10 13:01:24 -0800872 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873}
874
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800875static inline int tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876{
Herbert Xu60476372007-04-09 11:59:39 -0700877 return !skb_csum_unnecessary(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 __tcp_checksum_complete(skb);
879}
880
881/* Prequeue for VJ style copy to user, combined with checksumming. */
882
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800883static inline void tcp_prequeue_init(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884{
885 tp->ucopy.task = NULL;
886 tp->ucopy.len = 0;
887 tp->ucopy.memory = 0;
888 skb_queue_head_init(&tp->ucopy.prequeue);
Chris Leech97fc2f02006-05-23 17:55:33 -0700889#ifdef CONFIG_NET_DMA
890 tp->ucopy.dma_chan = NULL;
891 tp->ucopy.wakeup = 0;
892 tp->ucopy.pinned_list = NULL;
893 tp->ucopy.dma_cookie = 0;
894#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895}
896
897/* Packet is added to VJ-style prequeue for processing in process
898 * context, if a reader task is waiting. Apparently, this exciting
899 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
900 * failed somewhere. Latency? Burstiness? Well, at least now we will
901 * see, why it failed. 8)8) --ANK
902 *
903 * NOTE: is this not too big to inline?
904 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800905static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
907 struct tcp_sock *tp = tcp_sk(sk);
908
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000909 if (sysctl_tcp_low_latency || !tp->ucopy.task)
910 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000912 __skb_queue_tail(&tp->ucopy.prequeue, skb);
913 tp->ucopy.memory += skb->truesize;
914 if (tp->ucopy.memory > sk->sk_rcvbuf) {
915 struct sk_buff *skb1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000917 BUG_ON(sock_owned_by_user(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000919 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
920 sk_backlog_rcv(sk, skb1);
921 NET_INC_STATS_BH(sock_net(sk),
922 LINUX_MIB_TCPPREQUEUEDROPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 }
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000924
925 tp->ucopy.memory = 0;
926 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
Eric Dumazetaa395142010-04-20 13:03:51 +0000927 wake_up_interruptible_sync_poll(sk_sleep(sk),
Eric Dumazet7aedec22009-05-07 07:20:39 +0000928 POLLIN | POLLRDNORM | POLLRDBAND);
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000929 if (!inet_csk_ack_scheduled(sk))
930 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
David S. Miller22f6dac2009-05-08 02:48:30 -0700931 (3 * tcp_rto_min(sk)) / 4,
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000932 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 }
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000934 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935}
936
937
938#undef STATE_TRACE
939
940#ifdef STATE_TRACE
941static const char *statename[]={
942 "Unused","Established","Syn Sent","Syn Recv",
943 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
944 "Close Wait","Last ACK","Listen","Closing"
945};
946#endif
Ilpo Järvinen490d5042008-01-12 03:17:20 -0800947extern void tcp_set_state(struct sock *sk, int state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
Andi Kleen4ac02ba2007-04-20 17:11:46 -0700949extern void tcp_done(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800951static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952{
953 rx_opt->dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 rx_opt->num_sacks = 0;
955}
956
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957/* Determine a window scaling and initial window to offer. */
958extern void tcp_select_initial_window(int __space, __u32 mss,
959 __u32 *rcv_wnd, __u32 *window_clamp,
laurent chavey31d12922009-12-15 11:15:28 +0000960 int wscale_ok, __u8 *rcv_wscale,
961 __u32 init_rcv_wnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
963static inline int tcp_win_from_space(int space)
964{
965 return sysctl_tcp_adv_win_scale<=0 ?
966 (space>>(-sysctl_tcp_adv_win_scale)) :
967 space - (space>>sysctl_tcp_adv_win_scale);
968}
969
970/* Note: caller must be prepared to deal with negative returns */
971static inline int tcp_space(const struct sock *sk)
972{
973 return tcp_win_from_space(sk->sk_rcvbuf -
974 atomic_read(&sk->sk_rmem_alloc));
975}
976
977static inline int tcp_full_space(const struct sock *sk)
978{
979 return tcp_win_from_space(sk->sk_rcvbuf);
980}
981
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800982static inline void tcp_openreq_init(struct request_sock *req,
983 struct tcp_options_received *rx_opt,
984 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700986 struct inet_request_sock *ireq = inet_rsk(req);
987
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
Florian Westphal4dfc2812008-04-10 03:12:40 -0700989 req->cookie_ts = 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700990 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 req->mss = rx_opt->mss_clamp;
992 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700993 ireq->tstamp_ok = rx_opt->tstamp_ok;
994 ireq->sack_ok = rx_opt->sack_ok;
995 ireq->snd_wscale = rx_opt->snd_wscale;
996 ireq->wscale_ok = rx_opt->wscale_ok;
997 ireq->acked = 0;
998 ireq->ecn_ok = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700999 ireq->rmt_port = tcp_hdr(skb)->source;
KOVACS Krisztiana3116ac2008-10-01 07:46:49 -07001000 ireq->loc_port = tcp_hdr(skb)->dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001}
1002
Pavel Emelyanov5c52ba12008-07-16 20:28:10 -07001003extern void tcp_enter_memory_pressure(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1006{
1007 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1008}
1009
1010static inline int keepalive_time_when(const struct tcp_sock *tp)
1011{
1012 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1013}
1014
Eric Dumazetdf19a622009-08-28 23:48:54 -07001015static inline int keepalive_probes(const struct tcp_sock *tp)
1016{
1017 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1018}
1019
Flavio Leitner6c37e5d2010-04-26 18:33:27 +00001020static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1021{
1022 const struct inet_connection_sock *icsk = &tp->inet_conn;
1023
1024 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1025 tcp_time_stamp - tp->rcv_tstamp);
1026}
1027
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001028static inline int tcp_fin_time(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001030 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1031 const int rto = inet_csk(sk)->icsk_rto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001033 if (fin_timeout < (rto << 2) - (rto >> 1))
1034 fin_timeout = (rto << 2) - (rto >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035
1036 return fin_timeout;
1037}
1038
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001039static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
1040 int paws_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041{
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001042 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1043 return 1;
1044 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1045 return 1;
1046
1047 return 0;
1048}
1049
1050static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1051 int rst)
1052{
1053 if (tcp_paws_check(rx_opt, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 return 0;
1055
1056 /* RST segments are not recommended to carry timestamp,
1057 and, if they do, it is recommended to ignore PAWS because
1058 "their cleanup function should take precedence over timestamps."
1059 Certainly, it is mistake. It is necessary to understand the reasons
1060 of this constraint to relax it: if peer reboots, clock may go
1061 out-of-sync and half-open connections will not be reset.
1062 Actually, the problem would be not existing if all
1063 the implementations followed draft about maintaining clock
1064 via reboots. Linux-2.2 DOES NOT!
1065
1066 However, we can relax time bounds for RST segments to MSL.
1067 */
James Morris9d729f72007-03-04 16:12:44 -08001068 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 return 0;
1070 return 1;
1071}
1072
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073#define TCP_CHECK_TIMER(sk) do { } while (0)
1074
Pavel Emelyanova9c19322008-07-16 20:21:42 -07001075static inline void tcp_mib_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076{
1077 /* See RFC 2012 */
Pavel Emelyanovcf1100a2008-07-16 20:27:38 -07001078 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1079 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1080 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1081 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082}
1083
Ilpo Järvinen5af4ec22007-09-20 11:30:48 -07001084/* from STCP */
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001085static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
David S. Miller0800f172007-09-20 11:40:37 -07001086{
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001087 tp->lost_skb_hint = NULL;
1088 tp->scoreboard_skb_hint = NULL;
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001089}
1090
1091static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1092{
1093 tcp_clear_retrans_hints_partial(tp);
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001094 tp->retransmit_skb_hint = NULL;
Ilpo Järvinenb7689202007-09-20 11:37:19 -07001095}
1096
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001097/* MD5 Signature */
1098struct crypto_hash;
1099
1100/* - key database */
1101struct tcp_md5sig_key {
1102 u8 *key;
1103 u8 keylen;
1104};
1105
1106struct tcp4_md5sig_key {
David S. Millerf8ab18d2007-09-28 15:18:35 -07001107 struct tcp_md5sig_key base;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001108 __be32 addr;
1109};
1110
1111struct tcp6_md5sig_key {
David S. Millerf8ab18d2007-09-28 15:18:35 -07001112 struct tcp_md5sig_key base;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001113#if 0
1114 u32 scope_id; /* XXX */
1115#endif
1116 struct in6_addr addr;
1117};
1118
1119/* - sock block */
1120struct tcp_md5sig_info {
1121 struct tcp4_md5sig_key *keys4;
1122#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1123 struct tcp6_md5sig_key *keys6;
1124 u32 entries6;
1125 u32 alloced6;
1126#endif
1127 u32 entries4;
1128 u32 alloced4;
1129};
1130
1131/* - pseudo header */
1132struct tcp4_pseudohdr {
1133 __be32 saddr;
1134 __be32 daddr;
1135 __u8 pad;
1136 __u8 protocol;
1137 __be16 len;
1138};
1139
1140struct tcp6_pseudohdr {
1141 struct in6_addr saddr;
1142 struct in6_addr daddr;
1143 __be32 len;
1144 __be32 protocol; /* including padding */
1145};
1146
1147union tcp_md5sum_block {
1148 struct tcp4_pseudohdr ip4;
1149#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1150 struct tcp6_pseudohdr ip6;
1151#endif
1152};
1153
1154/* - pool: digest algorithm, hash description and scratch buffer */
1155struct tcp_md5sig_pool {
1156 struct hash_desc md5_desc;
1157 union tcp_md5sum_block md5_blk;
1158};
1159
1160#define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */
1161
1162/* - functions */
Changli Gao53d31762010-07-10 20:41:06 +00001163extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1164 struct sock *sk, struct request_sock *req,
1165 struct sk_buff *skb);
1166extern struct tcp_md5sig_key * tcp_v4_md5_lookup(struct sock *sk,
1167 struct sock *addr_sk);
1168extern int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey,
1169 u8 newkeylen);
1170extern int tcp_v4_md5_do_del(struct sock *sk, __be32 addr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001171
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001172#ifdef CONFIG_TCP_MD5SIG
1173#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \
1174 &(struct tcp_md5sig_key) { \
1175 .key = (twsk)->tw_md5_key, \
1176 .keylen = (twsk)->tw_md5_keylen, \
1177 } : NULL)
1178#else
1179#define tcp_twsk_md5_key(twsk) NULL
1180#endif
1181
Tejun Heo7d720c32010-02-16 15:20:26 +00001182extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *);
Changli Gao53d31762010-07-10 20:41:06 +00001183extern void tcp_free_md5sig_pool(void);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001184
Eric Dumazet35790c02010-05-16 00:34:04 -07001185extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
Changli Gao53d31762010-07-10 20:41:06 +00001186extern void tcp_put_md5sig_pool(void);
Eric Dumazet35790c02010-05-16 00:34:04 -07001187
Adam Langley49a72df2008-07-19 00:01:42 -07001188extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
1189extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
1190 unsigned header_len);
1191extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1192 struct tcp_md5sig_key *key);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001193
David S. Millerfe067e82007-03-07 12:12:44 -08001194/* write queue abstraction */
1195static inline void tcp_write_queue_purge(struct sock *sk)
1196{
1197 struct sk_buff *skb;
1198
1199 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001200 sk_wmem_free_skb(sk, skb);
1201 sk_mem_reclaim(sk);
Ilpo Järvinen8818a9d2009-12-02 22:24:02 -08001202 tcp_clear_all_retrans_hints(tcp_sk(sk));
David S. Millerfe067e82007-03-07 12:12:44 -08001203}
1204
1205static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
1206{
David S. Millercd07a8e2008-09-23 00:50:13 -07001207 return skb_peek(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001208}
1209
1210static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
1211{
David S. Millercd07a8e2008-09-23 00:50:13 -07001212 return skb_peek_tail(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001213}
1214
1215static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
1216{
David S. Millercd07a8e2008-09-23 00:50:13 -07001217 return skb_queue_next(&sk->sk_write_queue, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001218}
1219
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08001220static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
1221{
1222 return skb_queue_prev(&sk->sk_write_queue, skb);
1223}
1224
David S. Millerfe067e82007-03-07 12:12:44 -08001225#define tcp_for_write_queue(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001226 skb_queue_walk(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001227
1228#define tcp_for_write_queue_from(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001229 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001230
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001231#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001232 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001233
David S. Millerfe067e82007-03-07 12:12:44 -08001234static inline struct sk_buff *tcp_send_head(struct sock *sk)
1235{
1236 return sk->sk_send_head;
1237}
1238
David S. Millercd07a8e2008-09-23 00:50:13 -07001239static inline bool tcp_skb_is_last(const struct sock *sk,
1240 const struct sk_buff *skb)
1241{
1242 return skb_queue_is_last(&sk->sk_write_queue, skb);
1243}
1244
David S. Millerfe067e82007-03-07 12:12:44 -08001245static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
1246{
David S. Millercd07a8e2008-09-23 00:50:13 -07001247 if (tcp_skb_is_last(sk, skb))
David S. Millerfe067e82007-03-07 12:12:44 -08001248 sk->sk_send_head = NULL;
David S. Millercd07a8e2008-09-23 00:50:13 -07001249 else
1250 sk->sk_send_head = tcp_write_queue_next(sk, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001251}
1252
1253static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1254{
1255 if (sk->sk_send_head == skb_unlinked)
1256 sk->sk_send_head = NULL;
1257}
1258
1259static inline void tcp_init_send_head(struct sock *sk)
1260{
1261 sk->sk_send_head = NULL;
1262}
1263
1264static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1265{
1266 __skb_queue_tail(&sk->sk_write_queue, skb);
1267}
1268
1269static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1270{
1271 __tcp_add_write_queue_tail(sk, skb);
1272
1273 /* Queue it, remembering where we must start sending. */
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001274 if (sk->sk_send_head == NULL) {
David S. Millerfe067e82007-03-07 12:12:44 -08001275 sk->sk_send_head = skb;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001276
1277 if (tcp_sk(sk)->highest_sack == NULL)
1278 tcp_sk(sk)->highest_sack = skb;
1279 }
David S. Millerfe067e82007-03-07 12:12:44 -08001280}
1281
1282static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1283{
1284 __skb_queue_head(&sk->sk_write_queue, skb);
1285}
1286
1287/* Insert buff after skb on the write queue of sk. */
1288static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1289 struct sk_buff *buff,
1290 struct sock *sk)
1291{
Gerrit Renker7de6c032008-04-14 00:05:09 -07001292 __skb_queue_after(&sk->sk_write_queue, skb, buff);
David S. Millerfe067e82007-03-07 12:12:44 -08001293}
1294
David S. Miller43f59c82008-09-21 21:28:51 -07001295/* Insert new before skb on the write queue of sk. */
David S. Millerfe067e82007-03-07 12:12:44 -08001296static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1297 struct sk_buff *skb,
1298 struct sock *sk)
1299{
David S. Miller43f59c82008-09-21 21:28:51 -07001300 __skb_queue_before(&sk->sk_write_queue, skb, new);
Ilpo Järvinen6e421412007-11-19 23:24:09 -08001301
1302 if (sk->sk_send_head == skb)
1303 sk->sk_send_head = new;
David S. Millerfe067e82007-03-07 12:12:44 -08001304}
1305
1306static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1307{
1308 __skb_unlink(skb, &sk->sk_write_queue);
1309}
1310
David S. Millerfe067e82007-03-07 12:12:44 -08001311static inline int tcp_write_queue_empty(struct sock *sk)
1312{
1313 return skb_queue_empty(&sk->sk_write_queue);
1314}
1315
Krishna Kumar12d50c42009-12-08 22:26:13 +00001316static inline void tcp_push_pending_frames(struct sock *sk)
1317{
1318 if (tcp_send_head(sk)) {
1319 struct tcp_sock *tp = tcp_sk(sk);
1320
1321 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1322 }
1323}
1324
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001325/* Start sequence of the highest skb with SACKed bit, valid only if
1326 * sacked > 0 or when the caller has ensured validity by itself.
1327 */
1328static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1329{
1330 if (!tp->sacked_out)
1331 return tp->snd_una;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001332
1333 if (tp->highest_sack == NULL)
1334 return tp->snd_nxt;
1335
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001336 return TCP_SKB_CB(tp->highest_sack)->seq;
1337}
1338
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001339static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1340{
1341 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1342 tcp_write_queue_next(sk, skb);
1343}
1344
1345static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1346{
1347 return tcp_sk(sk)->highest_sack;
1348}
1349
1350static inline void tcp_highest_sack_reset(struct sock *sk)
1351{
1352 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1353}
1354
1355/* Called when old skb is about to be deleted (to be combined with new skb) */
1356static inline void tcp_highest_sack_combine(struct sock *sk,
1357 struct sk_buff *old,
1358 struct sk_buff *new)
1359{
1360 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1361 tcp_sk(sk)->highest_sack = new;
1362}
1363
Andreas Petlund5aa4b322010-02-18 02:45:45 +00001364/* Determines whether this is a thin stream (which may suffer from
1365 * increased latency). Used to trigger latency-reducing mechanisms.
1366 */
1367static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
1368{
1369 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1370}
1371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372/* /proc */
1373enum tcp_seq_states {
1374 TCP_SEQ_STATE_LISTENING,
1375 TCP_SEQ_STATE_OPENREQ,
1376 TCP_SEQ_STATE_ESTABLISHED,
1377 TCP_SEQ_STATE_TIME_WAIT,
1378};
1379
1380struct tcp_seq_afinfo {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 char *name;
1382 sa_family_t family;
Denis V. Lunev68fcadd2008-04-13 22:13:30 -07001383 struct file_operations seq_fops;
Denis V. Lunev9427c4b2008-04-13 22:12:13 -07001384 struct seq_operations seq_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385};
1386
1387struct tcp_iter_state {
Denis V. Luneva4146b12008-04-13 22:11:14 -07001388 struct seq_net_private p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 sa_family_t family;
1390 enum tcp_seq_states state;
1391 struct sock *syn_wait_sk;
Tom Herberta8b690f2010-06-07 00:43:42 -07001392 int bucket, offset, sbucket, num, uid;
1393 loff_t last_pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394};
1395
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001396extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1397extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001399extern struct request_sock_ops tcp_request_sock_ops;
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001400extern struct request_sock_ops tcp6_request_sock_ops;
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001401
Brian Haley7d06b2e2008-06-14 17:04:49 -07001402extern void tcp_v4_destroy_sock(struct sock *sk);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001403
Herbert Xua430a432006-07-08 13:34:56 -07001404extern int tcp_v4_gso_send_check(struct sk_buff *skb);
Herbert Xu576a30e2006-06-27 13:22:38 -07001405extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
Herbert Xubf296b12008-12-15 23:43:36 -08001406extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1407 struct sk_buff *skb);
1408extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1409 struct sk_buff *skb);
1410extern int tcp_gro_complete(struct sk_buff *skb);
1411extern int tcp4_gro_complete(struct sk_buff *skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07001412
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001413#ifdef CONFIG_PROC_FS
Changli Gao53d31762010-07-10 20:41:06 +00001414extern int tcp4_proc_init(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001415extern void tcp4_proc_exit(void);
1416#endif
1417
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001418/* TCP af-specific functions */
1419struct tcp_sock_af_ops {
1420#ifdef CONFIG_TCP_MD5SIG
1421 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1422 struct sock *addr_sk);
1423 int (*calc_md5_hash) (char *location,
1424 struct tcp_md5sig_key *md5,
1425 struct sock *sk,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001426 struct request_sock *req,
Adam Langley49a72df2008-07-19 00:01:42 -07001427 struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001428 int (*md5_add) (struct sock *sk,
1429 struct sock *addr_sk,
1430 u8 *newkey,
1431 u8 len);
1432 int (*md5_parse) (struct sock *sk,
1433 char __user *optval,
1434 int optlen);
1435#endif
1436};
1437
1438struct tcp_request_sock_ops {
1439#ifdef CONFIG_TCP_MD5SIG
1440 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1441 struct request_sock *req);
John Dykstrae3afe7b2009-07-16 05:04:51 +00001442 int (*calc_md5_hash) (char *location,
1443 struct tcp_md5sig_key *md5,
1444 struct sock *sk,
1445 struct request_sock *req,
1446 struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001447#endif
1448};
1449
William Allen Simpsonda5c78c2009-12-02 18:12:09 +00001450/* Using SHA1 for now, define some constants.
1451 */
1452#define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS)
1453#define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4)
1454#define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS)
1455
1456extern int tcp_cookie_generator(u32 *bakery);
1457
William Allen Simpson435cf552009-12-02 18:17:05 +00001458/**
1459 * struct tcp_cookie_values - each socket needs extra space for the
1460 * cookies, together with (optional) space for any SYN data.
1461 *
1462 * A tcp_sock contains a pointer to the current value, and this is
1463 * cloned to the tcp_timewait_sock.
1464 *
1465 * @cookie_pair: variable data from the option exchange.
1466 *
1467 * @cookie_desired: user specified tcpct_cookie_desired. Zero
1468 * indicates default (sysctl_tcp_cookie_size).
1469 * After cookie sent, remembers size of cookie.
1470 * Range 0, TCP_COOKIE_MIN to TCP_COOKIE_MAX.
1471 *
1472 * @s_data_desired: user specified tcpct_s_data_desired. When the
1473 * constant payload is specified (@s_data_constant),
1474 * holds its length instead.
1475 * Range 0 to TCP_MSS_DESIRED.
1476 *
1477 * @s_data_payload: constant data that is to be included in the
1478 * payload of SYN or SYNACK segments when the
1479 * cookie option is present.
1480 */
1481struct tcp_cookie_values {
1482 struct kref kref;
1483 u8 cookie_pair[TCP_COOKIE_PAIR_SIZE];
1484 u8 cookie_pair_size;
1485 u8 cookie_desired;
1486 u16 s_data_desired:11,
1487 s_data_constant:1,
1488 s_data_in:1,
1489 s_data_out:1,
1490 s_data_unused:2;
1491 u8 s_data_payload[0];
1492};
1493
1494static inline void tcp_cookie_values_release(struct kref *kref)
1495{
1496 kfree(container_of(kref, struct tcp_cookie_values, kref));
1497}
1498
1499/* The length of constant payload data. Note that s_data_desired is
1500 * overloaded, depending on s_data_constant: either the length of constant
1501 * data (returned here) or the limit on variable data.
1502 */
1503static inline int tcp_s_data_size(const struct tcp_sock *tp)
1504{
1505 return (tp->cookie_values != NULL && tp->cookie_values->s_data_constant)
1506 ? tp->cookie_values->s_data_desired
1507 : 0;
1508}
1509
1510/**
1511 * struct tcp_extend_values - tcp_ipv?.c to tcp_output.c workspace.
1512 *
1513 * As tcp_request_sock has already been extended in other places, the
1514 * only remaining method is to pass stack values along as function
1515 * parameters. These parameters are not needed after sending SYNACK.
1516 *
1517 * @cookie_bakery: cryptographic secret and message workspace.
1518 *
1519 * @cookie_plus: bytes in authenticator/cookie option, copied from
1520 * struct tcp_options_received (above).
1521 */
1522struct tcp_extend_values {
1523 struct request_values rv;
1524 u32 cookie_bakery[COOKIE_WORKSPACE_WORDS];
1525 u8 cookie_plus:6,
1526 cookie_out_never:1,
1527 cookie_in_always:1;
1528};
1529
1530static inline struct tcp_extend_values *tcp_xv(struct request_values *rvp)
1531{
1532 return (struct tcp_extend_values *)rvp;
1533}
1534
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08001535extern void tcp_v4_init(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001536extern void tcp_init(void);
1537
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538#endif /* _TCP_H */