blob: 03a49c7033774c9769921e69cd233d55882ad667 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
21#define TCP_DEBUG 1
22#define FASTRETRANS_DEBUG 1
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/list.h>
25#include <linux/tcp.h>
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
Herbert Xufb286bb2005-11-10 13:01:24 -080029#include <linux/skbuff.h>
Chris Leech97fc2f02006-05-23 17:55:33 -070030#include <linux/dmaengine.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080031#include <linux/crypto.h>
Glenn Griffinc6aefaf2008-02-07 21:49:26 -080032#include <linux/cryptohash.h>
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070033
34#include <net/inet_connection_sock.h>
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -070035#include <net/inet_timewait_sock.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070036#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <net/checksum.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070038#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <net/sock.h>
40#include <net/snmp.h>
41#include <net/ip.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070042#include <net/tcp_states.h>
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -070043#include <net/inet_ecn.h>
Satoru SATOH0c266892009-05-04 11:11:01 -070044#include <net/dst.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <linux/seq_file.h>
47
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -070048extern struct inet_hashinfo tcp_hashinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Eric Dumazetdd24c002008-11-25 21:17:14 -080050extern struct percpu_counter tcp_orphan_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051extern void tcp_time_wait(struct sock *sk, int state, int timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#define MAX_TCP_HEADER (128 + MAX_HEADER)
Adam Langley33ad7982008-07-19 00:04:31 -070054#define MAX_TCP_OPTION_SPACE 40
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56/*
57 * Never offer a window over 32767 without using window scaling. Some
58 * poor stacks do signed 16bit maths!
59 */
60#define MAX_TCP_WINDOW 32767U
61
62/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
63#define TCP_MIN_MSS 88U
64
65/* Minimal RCV_MSS. */
66#define TCP_MIN_RCVMSS 536U
67
John Heffner5d424d52006-03-20 17:53:41 -080068/* The least MTU to use for probing */
69#define TCP_BASE_MSS 512
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071/* After receiving this amount of duplicate ACKs fast retransmit starts. */
72#define TCP_FASTRETRANS_THRESH 3
73
74/* Maximal reordering. */
75#define TCP_MAX_REORDERING 127
76
77/* Maximal number of ACKs sent quickly to accelerate slow-start. */
78#define TCP_MAX_QUICKACKS 16U
79
80/* urg_data states */
81#define TCP_URG_VALID 0x0100
82#define TCP_URG_NOTYET 0x0200
83#define TCP_URG_READ 0x0400
84
85#define TCP_RETR1 3 /*
86 * This is how many retries it does before it
87 * tries to figure out if the gateway is
88 * down. Minimal RFC value is 3; it corresponds
89 * to ~3sec-8min depending on RTO.
90 */
91
92#define TCP_RETR2 15 /*
93 * This should take at least
94 * 90 minutes to time out.
95 * RFC1122 says that the limit is 100 sec.
96 * 15 is ~13-30min depending on RTO.
97 */
98
99#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800100 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800103 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105
106#define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
107 * socket. 7 is ~50sec-16min.
108 */
109
110
111#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
112 * state, about 60 seconds */
113#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
114 /* BSD style FIN_WAIT2 deadlock breaker.
115 * It used to be 3min, new value is 60sec,
116 * to combine FIN-WAIT-2 timeout with
117 * TIME-WAIT timer.
118 */
119
120#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
121#if HZ >= 100
122#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
123#define TCP_ATO_MIN ((unsigned)(HZ/25))
124#else
125#define TCP_DELACK_MIN 4U
126#define TCP_ATO_MIN 4U
127#endif
128#define TCP_RTO_MAX ((unsigned)(120*HZ))
129#define TCP_RTO_MIN ((unsigned)(HZ/5))
130#define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
131
132#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
133 * for local resources.
134 */
135
136#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
137#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
138#define TCP_KEEPALIVE_INTVL (75*HZ)
139
140#define MAX_TCP_KEEPIDLE 32767
141#define MAX_TCP_KEEPINTVL 32767
142#define MAX_TCP_KEEPCNT 127
143#define MAX_TCP_SYNCNT 127
144
145#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
147#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
148#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
149 * after this time. It should be equal
150 * (or greater than) TCP_TIMEWAIT_LEN
151 * to provide reliability equal to one
152 * provided by timewait state.
153 */
154#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
155 * timestamps. It must be less than
156 * minimal timewait lifetime.
157 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158/*
159 * TCP option
160 */
161
162#define TCPOPT_NOP 1 /* Padding */
163#define TCPOPT_EOL 0 /* End of options */
164#define TCPOPT_MSS 2 /* Segment size negotiating */
165#define TCPOPT_WINDOW 3 /* Window scaling */
166#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
167#define TCPOPT_SACK 5 /* SACK Block */
168#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800169#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171/*
172 * TCP option lengths
173 */
174
175#define TCPOLEN_MSS 4
176#define TCPOLEN_WINDOW 3
177#define TCPOLEN_SACK_PERM 2
178#define TCPOLEN_TIMESTAMP 10
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800179#define TCPOLEN_MD5SIG 18
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
181/* But this is what stacks really send out. */
182#define TCPOLEN_TSTAMP_ALIGNED 12
183#define TCPOLEN_WSCALE_ALIGNED 4
184#define TCPOLEN_SACKPERM_ALIGNED 4
185#define TCPOLEN_SACK_BASE 2
186#define TCPOLEN_SACK_BASE_ALIGNED 4
187#define TCPOLEN_SACK_PERBLOCK 8
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800188#define TCPOLEN_MD5SIG_ALIGNED 20
Adam Langley33ad7982008-07-19 00:04:31 -0700189#define TCPOLEN_MSS_ALIGNED 4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191/* Flags in tp->nonagle */
192#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
193#define TCP_NAGLE_CORK 2 /* Socket is corked */
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800194#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700196extern struct inet_timewait_death_row tcp_death_row;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* sysctl variables for tcp */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199extern int sysctl_tcp_timestamps;
200extern int sysctl_tcp_window_scaling;
201extern int sysctl_tcp_sack;
202extern int sysctl_tcp_fin_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203extern int sysctl_tcp_keepalive_time;
204extern int sysctl_tcp_keepalive_probes;
205extern int sysctl_tcp_keepalive_intvl;
206extern int sysctl_tcp_syn_retries;
207extern int sysctl_tcp_synack_retries;
208extern int sysctl_tcp_retries1;
209extern int sysctl_tcp_retries2;
210extern int sysctl_tcp_orphan_retries;
211extern int sysctl_tcp_syncookies;
212extern int sysctl_tcp_retrans_collapse;
213extern int sysctl_tcp_stdurg;
214extern int sysctl_tcp_rfc1337;
215extern int sysctl_tcp_abort_on_overflow;
216extern int sysctl_tcp_max_orphans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217extern int sysctl_tcp_fack;
218extern int sysctl_tcp_reordering;
219extern int sysctl_tcp_ecn;
220extern int sysctl_tcp_dsack;
221extern int sysctl_tcp_mem[3];
222extern int sysctl_tcp_wmem[3];
223extern int sysctl_tcp_rmem[3];
224extern int sysctl_tcp_app_win;
225extern int sysctl_tcp_adv_win_scale;
226extern int sysctl_tcp_tw_reuse;
227extern int sysctl_tcp_frto;
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800228extern int sysctl_tcp_frto_response;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229extern int sysctl_tcp_low_latency;
Chris Leech95937822006-05-23 18:02:55 -0700230extern int sysctl_tcp_dma_copybreak;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231extern int sysctl_tcp_nometrics_save;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232extern int sysctl_tcp_moderate_rcvbuf;
233extern int sysctl_tcp_tso_win_divisor;
Stephen Hemminger9772efb2005-11-10 17:09:53 -0800234extern int sysctl_tcp_abc;
John Heffner5d424d52006-03-20 17:53:41 -0800235extern int sysctl_tcp_mtu_probing;
236extern int sysctl_tcp_base_mss;
Rick Jones15d99e02006-03-20 22:40:29 -0800237extern int sysctl_tcp_workaround_signed_windows;
David S. Miller35089bb2006-06-13 22:33:04 -0700238extern int sysctl_tcp_slow_start_after_idle;
John Heffner886236c2007-03-25 19:21:45 -0700239extern int sysctl_tcp_max_ssthresh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241extern atomic_t tcp_memory_allocated;
Eric Dumazet17483762008-11-25 21:16:35 -0800242extern struct percpu_counter tcp_sockets_allocated;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243extern int tcp_memory_pressure;
244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 * The next routines deal with comparing 32 bit unsigned ints
247 * and worry about wraparound (automatic with unsigned arithmetic).
248 */
249
250static inline int before(__u32 seq1, __u32 seq2)
251{
Gerrit Renker0d630cc2007-01-04 12:25:16 -0800252 return (__s32)(seq1-seq2) < 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253}
Gerrit Renker9a036b92006-12-20 10:25:55 -0800254#define after(seq2, seq1) before(seq1, seq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256/* is s2<=s1<=s3 ? */
257static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
258{
259 return seq3 - seq2 >= seq1 - seq2;
260}
261
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700262static inline int tcp_too_many_orphans(struct sock *sk, int num)
263{
264 return (num > sysctl_tcp_max_orphans) ||
265 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
266 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
267}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Florian Westphala0f82f62009-04-19 09:43:48 +0000269/* syncookies: remember time of last synqueue overflow */
270static inline void tcp_synq_overflow(struct sock *sk)
271{
272 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
273}
274
275/* syncookies: no recent synqueue overflow on this listening socket? */
276static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
277{
278 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
279 return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT);
280}
281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282extern struct proto tcp_prot;
283
Pavel Emelyanov57ef42d2008-07-18 04:02:08 -0700284#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
285#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
286#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
287#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289extern void tcp_v4_err(struct sk_buff *skb, u32);
290
291extern void tcp_shutdown (struct sock *sk, int how);
292
293extern int tcp_v4_rcv(struct sk_buff *skb);
294
295extern int tcp_v4_remember_stamp(struct sock *sk);
296
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -0700297extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
David S. Miller3516ffb2007-08-02 19:23:56 -0700299extern int tcp_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 struct msghdr *msg, size_t size);
301extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
302
303extern int tcp_ioctl(struct sock *sk,
304 int cmd,
305 unsigned long arg);
306
307extern int tcp_rcv_state_process(struct sock *sk,
308 struct sk_buff *skb,
309 struct tcphdr *th,
310 unsigned len);
311
312extern int tcp_rcv_established(struct sock *sk,
313 struct sk_buff *skb,
314 struct tcphdr *th,
315 unsigned len);
316
317extern void tcp_rcv_space_adjust(struct sock *sk);
318
Chris Leech0e4b4992006-05-23 18:00:16 -0700319extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
320
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800321extern int tcp_twsk_unique(struct sock *sk,
322 struct sock *sktw, void *twp);
323
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800324extern void tcp_twsk_destructor(struct sock *sk);
325
Jens Axboe9c55e012007-11-06 23:30:13 -0800326extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
327 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
328
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700329static inline void tcp_dec_quickack_mode(struct sock *sk,
330 const unsigned int pkts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700332 struct inet_connection_sock *icsk = inet_csk(sk);
David S. Millerfc6415b2005-07-05 15:17:45 -0700333
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700334 if (icsk->icsk_ack.quick) {
335 if (pkts >= icsk->icsk_ack.quick) {
336 icsk->icsk_ack.quick = 0;
David S. Millerfc6415b2005-07-05 15:17:45 -0700337 /* Leaving quickack mode we deflate ATO. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700338 icsk->icsk_ack.ato = TCP_ATO_MIN;
David S. Millerfc6415b2005-07-05 15:17:45 -0700339 } else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700340 icsk->icsk_ack.quick -= pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 }
342}
343
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700344extern void tcp_enter_quickack_mode(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
347{
348 rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
349}
350
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700351#define TCP_ECN_OK 1
352#define TCP_ECN_QUEUE_CWR 2
353#define TCP_ECN_DEMAND_CWR 4
354
355static __inline__ void
356TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
357{
358 if (sysctl_tcp_ecn && th->ece && th->cwr)
359 inet_rsk(req)->ecn_ok = 1;
360}
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362enum tcp_tw_status
363{
364 TCP_TW_SUCCESS = 0,
365 TCP_TW_RST = 1,
366 TCP_TW_ACK = 2,
367 TCP_TW_SYN = 3
368};
369
370
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -0700371extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 struct sk_buff *skb,
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -0700373 const struct tcphdr *th);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700376 struct request_sock *req,
377 struct request_sock **prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378extern int tcp_child_process(struct sock *parent,
379 struct sock *child,
380 struct sk_buff *skb);
Ilpo Järvinen46d0de42007-02-21 23:10:39 -0800381extern int tcp_use_frto(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382extern void tcp_enter_frto(struct sock *sk);
383extern void tcp_enter_loss(struct sock *sk, int how);
384extern void tcp_clear_retrans(struct tcp_sock *tp);
385extern void tcp_update_metrics(struct sock *sk);
386
387extern void tcp_close(struct sock *sk,
388 long timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
390
391extern int tcp_getsockopt(struct sock *sk, int level,
392 int optname,
393 char __user *optval,
394 int __user *optlen);
395extern int tcp_setsockopt(struct sock *sk, int level,
396 int optname, char __user *optval,
David S. Millerb7058842009-09-30 16:12:20 -0700397 unsigned int optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -0800398extern int compat_tcp_getsockopt(struct sock *sk,
399 int level, int optname,
400 char __user *optval, int __user *optlen);
401extern int compat_tcp_setsockopt(struct sock *sk,
402 int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700403 char __user *optval, unsigned int optlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404extern void tcp_set_keepalive(struct sock *sk, int val);
405extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
406 struct msghdr *msg,
407 size_t len, int nonblock,
408 int flags, int *addr_len);
409
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410extern void tcp_parse_options(struct sk_buff *skb,
411 struct tcp_options_received *opt_rx,
412 int estab);
413
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900414extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416/*
417 * TCP v4 functions exported for the inet6 API
418 */
419
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -0800420extern void tcp_v4_send_check(struct sock *sk, int len,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 struct sk_buff *skb);
422
423extern int tcp_v4_conn_request(struct sock *sk,
424 struct sk_buff *skb);
425
426extern struct sock * tcp_create_openreq_child(struct sock *sk,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700427 struct request_sock *req,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 struct sk_buff *skb);
429
430extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
431 struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700432 struct request_sock *req,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 struct dst_entry *dst);
434
435extern int tcp_v4_do_rcv(struct sock *sk,
436 struct sk_buff *skb);
437
438extern int tcp_v4_connect(struct sock *sk,
439 struct sockaddr *uaddr,
440 int addr_len);
441
442extern int tcp_connect(struct sock *sk);
443
444extern struct sk_buff * tcp_make_synack(struct sock *sk,
445 struct dst_entry *dst,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700446 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448extern int tcp_disconnect(struct sock *sk, int flags);
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451/* From syncookies.c */
Florian Westphal2051f112008-03-23 22:21:28 -0700452extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
454 struct ip_options *opt);
455extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
456 __u16 *mss);
457
Florian Westphal4dfc2812008-04-10 03:12:40 -0700458extern __u32 cookie_init_timestamp(struct request_sock *req);
459extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt);
460
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800461/* From net/ipv6/syncookies.c */
462extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
463extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
464 __u16 *mss);
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466/* tcp_output.c */
467
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700468extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
469 int nonagle);
470extern int tcp_may_send_now(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000472extern void tcp_retransmit_timer(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473extern void tcp_xmit_retransmit_queue(struct sock *);
474extern void tcp_simple_retransmit(struct sock *);
475extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
David S. Miller6475be12005-09-01 22:47:01 -0700476extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
478extern void tcp_send_probe0(struct sock *);
479extern void tcp_send_partial(struct sock *);
480extern int tcp_write_wakeup(struct sock *);
481extern void tcp_send_fin(struct sock *sk);
Al Virodd0fc662005-10-07 07:46:04 +0100482extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483extern int tcp_send_synack(struct sock *);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700484extern void tcp_push_one(struct sock *, unsigned int mss_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485extern void tcp_send_ack(struct sock *sk);
486extern void tcp_send_delayed_ack(struct sock *sk);
487
David S. Millera762a982005-07-05 15:18:51 -0700488/* tcp_input.c */
489extern void tcp_cwnd_application_limited(struct sock *sk);
490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491/* tcp_timer.c */
492extern void tcp_init_xmit_timers(struct sock *);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700493static inline void tcp_clear_xmit_timers(struct sock *sk)
494{
495 inet_csk_clear_xmit_timers(sk);
496}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000499extern unsigned int tcp_current_mss(struct sock *sk);
500
501/* Bound MSS / TSO packet size with the half of the window */
502static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
503{
504 if (tp->max_window && pktsize > (tp->max_window >> 1))
505 return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
506 else
507 return pktsize;
508}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
Arnaldo Carvalho de Melo17b085e2005-08-12 12:59:17 -0300510/* tcp.c */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511extern void tcp_get_info(struct sock *, struct tcp_info *);
512
513/* Read 'sendfile()'-style from a TCP socket */
514typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
515 unsigned int, size_t);
516extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
517 sk_read_actor_t recv_actor);
518
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800519extern void tcp_initialize_rcv_mss(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
John Heffner5d424d52006-03-20 17:53:41 -0800521extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
522extern int tcp_mss_to_mtu(struct sock *sk, int mss);
523extern void tcp_mtup_init(struct sock *sk);
524
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000525static inline void tcp_bound_rto(const struct sock *sk)
526{
527 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
528 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
529}
530
531static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
532{
533 return (tp->srtt >> 3) + tp->rttvar;
534}
535
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800536static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537{
538 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
539 ntohl(TCP_FLAG_ACK) |
540 snd_wnd);
541}
542
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800543static inline void tcp_fast_path_on(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544{
545 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
546}
547
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700548static inline void tcp_fast_path_check(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700550 struct tcp_sock *tp = tcp_sk(sk);
551
David S. Millerb03efcf2005-07-08 14:57:23 -0700552 if (skb_queue_empty(&tp->out_of_order_queue) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 tp->rcv_wnd &&
554 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
555 !tp->urg_data)
556 tcp_fast_path_on(tp);
557}
558
Satoru SATOH0c266892009-05-04 11:11:01 -0700559/* Compute the actual rto_min value */
560static inline u32 tcp_rto_min(struct sock *sk)
561{
562 struct dst_entry *dst = __sk_dst_get(sk);
563 u32 rto_min = TCP_RTO_MIN;
564
565 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
566 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
567 return rto_min;
568}
569
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570/* Compute the actual receive window we are currently advertising.
571 * Rcv_nxt can be after the window if our peer push more data
572 * than the offered window.
573 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800574static inline u32 tcp_receive_window(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575{
576 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
577
578 if (win < 0)
579 win = 0;
580 return (u32) win;
581}
582
583/* Choose a new window, without checks for shrinking, and without
584 * scaling applied to the result. The caller does these things
585 * if necessary. This is a "raw" window selection.
586 */
587extern u32 __tcp_select_window(struct sock *sk);
588
589/* TCP timestamps are only 32-bits, this causes a slight
590 * complication on 64-bit systems since we store a snapshot
Stephen Hemminger31f34262005-11-15 15:17:10 -0800591 * of jiffies in the buffer control blocks below. We decided
592 * to use only the low 32-bits of jiffies and hide the ugly
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 * casts with the following macro.
594 */
595#define tcp_time_stamp ((__u32)(jiffies))
596
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800597/* This is what the send packet queuing engine uses to pass
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 * TCP per-packet control information to the transmission
599 * code. We also store the host-order sequence numbers in
600 * here too. This is 36 bytes on 32-bit architectures,
601 * 40 bytes on 64-bit machines, if this grows please adjust
602 * skbuff.h:skbuff->cb[xxx] size appropriately.
603 */
604struct tcp_skb_cb {
605 union {
606 struct inet_skb_parm h4;
607#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
608 struct inet6_skb_parm h6;
609#endif
610 } header; /* For incoming frames */
611 __u32 seq; /* Starting sequence number */
612 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
613 __u32 when; /* used to compute rtt's */
614 __u8 flags; /* TCP header flags. */
615
616 /* NOTE: These must match up to the flags byte in a
617 * real TCP header.
618 */
619#define TCPCB_FLAG_FIN 0x01
620#define TCPCB_FLAG_SYN 0x02
621#define TCPCB_FLAG_RST 0x04
622#define TCPCB_FLAG_PSH 0x08
623#define TCPCB_FLAG_ACK 0x10
624#define TCPCB_FLAG_URG 0x20
625#define TCPCB_FLAG_ECE 0x40
626#define TCPCB_FLAG_CWR 0x80
627
628 __u8 sacked; /* State flags for SACK/FACK. */
629#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
630#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
631#define TCPCB_LOST 0x04 /* SKB is lost */
632#define TCPCB_TAGBITS 0x07 /* All tag bits */
633
634#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
635#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
636
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 __u32 ack_seq; /* Sequence number ACK'd */
638};
639
640#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642/* Due to TSO, an SKB can be composed of multiple actual
643 * packets. To keep these tracked properly, we use this.
644 */
645static inline int tcp_skb_pcount(const struct sk_buff *skb)
646{
Herbert Xu79671682006-06-22 02:40:14 -0700647 return skb_shinfo(skb)->gso_segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648}
649
650/* This is valid iff tcp_skb_pcount() > 1. */
651static inline int tcp_skb_mss(const struct sk_buff *skb)
652{
Herbert Xu79671682006-06-22 02:40:14 -0700653 return skb_shinfo(skb)->gso_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654}
655
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700656/* Events passed to congestion control interface */
657enum tcp_ca_event {
658 CA_EVENT_TX_START, /* first transmit when no packets in flight */
659 CA_EVENT_CWND_RESTART, /* congestion window restart */
660 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
661 CA_EVENT_FRTO, /* fast recovery timeout */
662 CA_EVENT_LOSS, /* loss timeout */
663 CA_EVENT_FAST_ACK, /* in sequence ack */
664 CA_EVENT_SLOW_ACK, /* other ack */
665};
666
667/*
668 * Interface for adding new TCP congestion control handlers
669 */
670#define TCP_CA_NAME_MAX 16
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800671#define TCP_CA_MAX 128
672#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
673
Stephen Hemminger164891a2007-04-23 22:26:16 -0700674#define TCP_CONG_NON_RESTRICTED 0x1
675#define TCP_CONG_RTT_STAMP 0x2
676
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700677struct tcp_congestion_ops {
678 struct list_head list;
Stephen Hemminger164891a2007-04-23 22:26:16 -0700679 unsigned long flags;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700680
681 /* initialize private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300682 void (*init)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700683 /* cleanup private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300684 void (*release)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700685
686 /* return slow start threshold (required) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300687 u32 (*ssthresh)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700688 /* lower bound for congestion window (optional) */
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700689 u32 (*min_cwnd)(const struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700690 /* do new cwnd calculation (required) */
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200691 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700692 /* call before changing ca_state (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300693 void (*set_state)(struct sock *sk, u8 new_state);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700694 /* call when cwnd event occurs (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300695 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700696 /* new value of cwnd after loss (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300697 u32 (*undo_cwnd)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700698 /* hook for packet ack accounting (optional) */
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -0700699 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
Arnaldo Carvalho de Melo73c1f4a2005-08-12 12:51:49 -0300700 /* get info for inet_diag (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300701 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700702
703 char name[TCP_CA_NAME_MAX];
704 struct module *owner;
705};
706
707extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
708extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
709
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300710extern void tcp_init_congestion_control(struct sock *sk);
711extern void tcp_cleanup_congestion_control(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700712extern int tcp_set_default_congestion_control(const char *name);
713extern void tcp_get_default_congestion_control(char *name);
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800714extern void tcp_get_available_congestion_control(char *buf, size_t len);
Stephen Hemmingerce7bc3b2006-11-09 16:35:15 -0800715extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
716extern int tcp_set_allowed_congestion_control(char *allowed);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300717extern int tcp_set_congestion_control(struct sock *sk, const char *name);
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800718extern void tcp_slow_start(struct tcp_sock *tp);
Ilpo Järvinen758ce5c2009-02-28 04:44:37 +0000719extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700720
Stephen Hemminger5f8ef482005-06-23 20:37:36 -0700721extern struct tcp_congestion_ops tcp_init_congestion_ops;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300722extern u32 tcp_reno_ssthresh(struct sock *sk);
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200723extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700724extern u32 tcp_reno_min_cwnd(const struct sock *sk);
David S. Millera8acfba2005-06-23 23:45:02 -0700725extern struct tcp_congestion_ops tcp_reno;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700726
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300727static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700728{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300729 struct inet_connection_sock *icsk = inet_csk(sk);
730
731 if (icsk->icsk_ca_ops->set_state)
732 icsk->icsk_ca_ops->set_state(sk, ca_state);
733 icsk->icsk_ca_state = ca_state;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700734}
735
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300736static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700737{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300738 const struct inet_connection_sock *icsk = inet_csk(sk);
739
740 if (icsk->icsk_ca_ops->cwnd_event)
741 icsk->icsk_ca_ops->cwnd_event(sk, event);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700742}
743
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300744/* These functions determine how the current flow behaves in respect of SACK
745 * handling. SACK is negotiated with the peer, and therefore it can vary
746 * between different flows.
747 *
748 * tcp_is_sack - SACK enabled
749 * tcp_is_reno - No SACK
750 * tcp_is_fack - FACK enabled, implies SACK enabled
751 */
752static inline int tcp_is_sack(const struct tcp_sock *tp)
753{
754 return tp->rx_opt.sack_ok;
755}
756
757static inline int tcp_is_reno(const struct tcp_sock *tp)
758{
759 return !tcp_is_sack(tp);
760}
761
762static inline int tcp_is_fack(const struct tcp_sock *tp)
763{
764 return tp->rx_opt.sack_ok & 2;
765}
766
767static inline void tcp_enable_fack(struct tcp_sock *tp)
768{
769 tp->rx_opt.sack_ok |= 2;
770}
771
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300772static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
773{
774 return tp->sacked_out + tp->lost_out;
775}
776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777/* This determines how many packets are "in the network" to the best
778 * of our knowledge. In many cases it is conservative, but where
779 * detailed information is available from the receiver (via SACK
780 * blocks etc.) we can make more aggressive calculations.
781 *
782 * Use this for decisions involving congestion control, use just
783 * tp->packets_out to determine if the send queue is empty or not.
784 *
785 * Read this equation as:
786 *
787 * "Packets sent once on transmission queue" MINUS
788 * "Packets left network, but not honestly ACKed yet" PLUS
789 * "Packets fast retransmitted"
790 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800791static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792{
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300793 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794}
795
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -0700796#define TCP_INFINITE_SSTHRESH 0x7fffffff
797
798static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
799{
800 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
801}
802
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
804 * The exception is rate halving phase, when cwnd is decreasing towards
805 * ssthresh.
806 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300807static inline __u32 tcp_current_ssthresh(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300809 const struct tcp_sock *tp = tcp_sk(sk);
810 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 return tp->snd_ssthresh;
812 else
813 return max(tp->snd_ssthresh,
814 ((tp->snd_cwnd >> 1) +
815 (tp->snd_cwnd >> 2)));
816}
817
Ilpo Järvinenb9c45952007-07-27 16:36:17 +0300818/* Use define here intentionally to get WARN_ON location shown at the caller */
819#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800821extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
823
824/* Slow start with delack produces 3 packets of burst, so that
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700825 * it is safe "de facto". This will be the default - same as
826 * the default reordering threshold - but if reordering increases,
827 * we must be able to allow cwnd to burst at least this much in order
828 * to not pull it back when holes are filled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 */
830static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
831{
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700832 return tp->reordering;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833}
834
Ilpo Järvinen90840de2007-12-31 04:48:41 -0800835/* Returns end sequence number of the receiver's advertised window */
836static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
837{
838 return tp->snd_una + tp->snd_wnd;
839}
Ilpo Järvinencea14e02008-01-12 03:19:12 -0800840extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
Stephen Hemmingerf4805ed2005-11-10 16:53:30 -0800841
Chuck Leverc1bd24b2007-10-23 21:08:54 -0700842static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800843 const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844{
845 if (skb->len < mss)
846 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
847}
848
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700849static inline void tcp_check_probe_timer(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700851 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700852 const struct inet_connection_sock *icsk = inet_csk(sk);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700853
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700854 if (!tp->packets_out && !icsk->icsk_pending)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700855 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
856 icsk->icsk_rto, TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857}
858
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700859static inline void tcp_push_pending_frames(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700861 struct tcp_sock *tp = tcp_sk(sk);
862
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000863 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864}
865
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800866static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867{
868 tp->snd_wl1 = seq;
869}
870
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800871static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872{
873 tp->snd_wl1 = seq;
874}
875
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876/*
877 * Calculate(/check) TCP checksum
878 */
Frederik Deweerdtba7808e2007-02-04 20:15:27 -0800879static inline __sum16 tcp_v4_check(int len, __be32 saddr,
880 __be32 daddr, __wsum base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881{
882 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
883}
884
Al Virob51655b2006-11-14 21:40:42 -0800885static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886{
Herbert Xufb286bb2005-11-10 13:01:24 -0800887 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888}
889
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800890static inline int tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891{
Herbert Xu60476372007-04-09 11:59:39 -0700892 return !skb_csum_unnecessary(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 __tcp_checksum_complete(skb);
894}
895
896/* Prequeue for VJ style copy to user, combined with checksumming. */
897
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800898static inline void tcp_prequeue_init(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899{
900 tp->ucopy.task = NULL;
901 tp->ucopy.len = 0;
902 tp->ucopy.memory = 0;
903 skb_queue_head_init(&tp->ucopy.prequeue);
Chris Leech97fc2f02006-05-23 17:55:33 -0700904#ifdef CONFIG_NET_DMA
905 tp->ucopy.dma_chan = NULL;
906 tp->ucopy.wakeup = 0;
907 tp->ucopy.pinned_list = NULL;
908 tp->ucopy.dma_cookie = 0;
909#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910}
911
912/* Packet is added to VJ-style prequeue for processing in process
913 * context, if a reader task is waiting. Apparently, this exciting
914 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
915 * failed somewhere. Latency? Burstiness? Well, at least now we will
916 * see, why it failed. 8)8) --ANK
917 *
918 * NOTE: is this not too big to inline?
919 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800920static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921{
922 struct tcp_sock *tp = tcp_sk(sk);
923
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000924 if (sysctl_tcp_low_latency || !tp->ucopy.task)
925 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000927 __skb_queue_tail(&tp->ucopy.prequeue, skb);
928 tp->ucopy.memory += skb->truesize;
929 if (tp->ucopy.memory > sk->sk_rcvbuf) {
930 struct sk_buff *skb1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000932 BUG_ON(sock_owned_by_user(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000934 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
935 sk_backlog_rcv(sk, skb1);
936 NET_INC_STATS_BH(sock_net(sk),
937 LINUX_MIB_TCPPREQUEUEDROPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 }
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000939
940 tp->ucopy.memory = 0;
941 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
Eric Dumazet7aedec22009-05-07 07:20:39 +0000942 wake_up_interruptible_poll(sk->sk_sleep,
943 POLLIN | POLLRDNORM | POLLRDBAND);
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000944 if (!inet_csk_ack_scheduled(sk))
945 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
David S. Miller22f6dac2009-05-08 02:48:30 -0700946 (3 * tcp_rto_min(sk)) / 4,
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000947 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 }
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000949 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950}
951
952
953#undef STATE_TRACE
954
955#ifdef STATE_TRACE
956static const char *statename[]={
957 "Unused","Established","Syn Sent","Syn Recv",
958 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
959 "Close Wait","Last ACK","Listen","Closing"
960};
961#endif
Ilpo Järvinen490d5042008-01-12 03:17:20 -0800962extern void tcp_set_state(struct sock *sk, int state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963
Andi Kleen4ac02ba2007-04-20 17:11:46 -0700964extern void tcp_done(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800966static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967{
968 rx_opt->dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 rx_opt->num_sacks = 0;
970}
971
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972/* Determine a window scaling and initial window to offer. */
973extern void tcp_select_initial_window(int __space, __u32 mss,
974 __u32 *rcv_wnd, __u32 *window_clamp,
975 int wscale_ok, __u8 *rcv_wscale);
976
977static inline int tcp_win_from_space(int space)
978{
979 return sysctl_tcp_adv_win_scale<=0 ?
980 (space>>(-sysctl_tcp_adv_win_scale)) :
981 space - (space>>sysctl_tcp_adv_win_scale);
982}
983
984/* Note: caller must be prepared to deal with negative returns */
985static inline int tcp_space(const struct sock *sk)
986{
987 return tcp_win_from_space(sk->sk_rcvbuf -
988 atomic_read(&sk->sk_rmem_alloc));
989}
990
991static inline int tcp_full_space(const struct sock *sk)
992{
993 return tcp_win_from_space(sk->sk_rcvbuf);
994}
995
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800996static inline void tcp_openreq_init(struct request_sock *req,
997 struct tcp_options_received *rx_opt,
998 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001000 struct inet_request_sock *ireq = inet_rsk(req);
1001
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
Florian Westphal4dfc2812008-04-10 03:12:40 -07001003 req->cookie_ts = 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001004 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 req->mss = rx_opt->mss_clamp;
1006 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001007 ireq->tstamp_ok = rx_opt->tstamp_ok;
1008 ireq->sack_ok = rx_opt->sack_ok;
1009 ireq->snd_wscale = rx_opt->snd_wscale;
1010 ireq->wscale_ok = rx_opt->wscale_ok;
1011 ireq->acked = 0;
1012 ireq->ecn_ok = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001013 ireq->rmt_port = tcp_hdr(skb)->source;
KOVACS Krisztiana3116ac2008-10-01 07:46:49 -07001014 ireq->loc_port = tcp_hdr(skb)->dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015}
1016
Pavel Emelyanov5c52ba12008-07-16 20:28:10 -07001017extern void tcp_enter_memory_pressure(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1020{
1021 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1022}
1023
1024static inline int keepalive_time_when(const struct tcp_sock *tp)
1025{
1026 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1027}
1028
Eric Dumazetdf19a622009-08-28 23:48:54 -07001029static inline int keepalive_probes(const struct tcp_sock *tp)
1030{
1031 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1032}
1033
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001034static inline int tcp_fin_time(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001036 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1037 const int rto = inet_csk(sk)->icsk_rto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001039 if (fin_timeout < (rto << 2) - (rto >> 1))
1040 fin_timeout = (rto << 2) - (rto >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
1042 return fin_timeout;
1043}
1044
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001045static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
1046 int paws_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047{
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001048 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1049 return 1;
1050 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1051 return 1;
1052
1053 return 0;
1054}
1055
1056static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1057 int rst)
1058{
1059 if (tcp_paws_check(rx_opt, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 return 0;
1061
1062 /* RST segments are not recommended to carry timestamp,
1063 and, if they do, it is recommended to ignore PAWS because
1064 "their cleanup function should take precedence over timestamps."
1065 Certainly, it is mistake. It is necessary to understand the reasons
1066 of this constraint to relax it: if peer reboots, clock may go
1067 out-of-sync and half-open connections will not be reset.
1068 Actually, the problem would be not existing if all
1069 the implementations followed draft about maintaining clock
1070 via reboots. Linux-2.2 DOES NOT!
1071
1072 However, we can relax time bounds for RST segments to MSL.
1073 */
James Morris9d729f72007-03-04 16:12:44 -08001074 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 return 0;
1076 return 1;
1077}
1078
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079#define TCP_CHECK_TIMER(sk) do { } while (0)
1080
Pavel Emelyanova9c19322008-07-16 20:21:42 -07001081static inline void tcp_mib_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082{
1083 /* See RFC 2012 */
Pavel Emelyanovcf1100a2008-07-16 20:27:38 -07001084 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1085 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1086 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1087 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088}
1089
Ilpo Järvinen5af4ec22007-09-20 11:30:48 -07001090/* from STCP */
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001091static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
David S. Miller0800f172007-09-20 11:40:37 -07001092{
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001093 tp->lost_skb_hint = NULL;
1094 tp->scoreboard_skb_hint = NULL;
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001095}
1096
1097static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1098{
1099 tcp_clear_retrans_hints_partial(tp);
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001100 tp->retransmit_skb_hint = NULL;
Ilpo Järvinenb7689202007-09-20 11:37:19 -07001101}
1102
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001103/* MD5 Signature */
1104struct crypto_hash;
1105
1106/* - key database */
1107struct tcp_md5sig_key {
1108 u8 *key;
1109 u8 keylen;
1110};
1111
1112struct tcp4_md5sig_key {
David S. Millerf8ab18d2007-09-28 15:18:35 -07001113 struct tcp_md5sig_key base;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001114 __be32 addr;
1115};
1116
1117struct tcp6_md5sig_key {
David S. Millerf8ab18d2007-09-28 15:18:35 -07001118 struct tcp_md5sig_key base;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001119#if 0
1120 u32 scope_id; /* XXX */
1121#endif
1122 struct in6_addr addr;
1123};
1124
1125/* - sock block */
1126struct tcp_md5sig_info {
1127 struct tcp4_md5sig_key *keys4;
1128#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1129 struct tcp6_md5sig_key *keys6;
1130 u32 entries6;
1131 u32 alloced6;
1132#endif
1133 u32 entries4;
1134 u32 alloced4;
1135};
1136
1137/* - pseudo header */
1138struct tcp4_pseudohdr {
1139 __be32 saddr;
1140 __be32 daddr;
1141 __u8 pad;
1142 __u8 protocol;
1143 __be16 len;
1144};
1145
1146struct tcp6_pseudohdr {
1147 struct in6_addr saddr;
1148 struct in6_addr daddr;
1149 __be32 len;
1150 __be32 protocol; /* including padding */
1151};
1152
1153union tcp_md5sum_block {
1154 struct tcp4_pseudohdr ip4;
1155#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1156 struct tcp6_pseudohdr ip6;
1157#endif
1158};
1159
1160/* - pool: digest algorithm, hash description and scratch buffer */
1161struct tcp_md5sig_pool {
1162 struct hash_desc md5_desc;
1163 union tcp_md5sum_block md5_blk;
1164};
1165
1166#define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */
1167
1168/* - functions */
Adam Langley49a72df2008-07-19 00:01:42 -07001169extern int tcp_v4_md5_hash_skb(char *md5_hash,
1170 struct tcp_md5sig_key *key,
1171 struct sock *sk,
1172 struct request_sock *req,
1173 struct sk_buff *skb);
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +09001174
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001175extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1176 struct sock *addr_sk);
1177
1178extern int tcp_v4_md5_do_add(struct sock *sk,
1179 __be32 addr,
1180 u8 *newkey,
1181 u8 newkeylen);
1182
1183extern int tcp_v4_md5_do_del(struct sock *sk,
Al Viro8e5200f2006-11-20 18:06:37 -08001184 __be32 addr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001185
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001186#ifdef CONFIG_TCP_MD5SIG
1187#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \
1188 &(struct tcp_md5sig_key) { \
1189 .key = (twsk)->tw_md5_key, \
1190 .keylen = (twsk)->tw_md5_keylen, \
1191 } : NULL)
1192#else
1193#define tcp_twsk_md5_key(twsk) NULL
1194#endif
1195
Wu Fengguangaa133072009-09-02 23:45:45 -07001196extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001197extern void tcp_free_md5sig_pool(void);
1198
1199extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
1200extern void __tcp_put_md5sig_pool(void);
Adam Langley49a72df2008-07-19 00:01:42 -07001201extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
1202extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
1203 unsigned header_len);
1204extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1205 struct tcp_md5sig_key *key);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001206
1207static inline
1208struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
1209{
1210 int cpu = get_cpu();
1211 struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
1212 if (!ret)
1213 put_cpu();
1214 return ret;
1215}
1216
1217static inline void tcp_put_md5sig_pool(void)
1218{
1219 __tcp_put_md5sig_pool();
1220 put_cpu();
1221}
1222
David S. Millerfe067e82007-03-07 12:12:44 -08001223/* write queue abstraction */
1224static inline void tcp_write_queue_purge(struct sock *sk)
1225{
1226 struct sk_buff *skb;
1227
1228 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001229 sk_wmem_free_skb(sk, skb);
1230 sk_mem_reclaim(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08001231}
1232
1233static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
1234{
David S. Millercd07a8e2008-09-23 00:50:13 -07001235 return skb_peek(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001236}
1237
1238static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
1239{
David S. Millercd07a8e2008-09-23 00:50:13 -07001240 return skb_peek_tail(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001241}
1242
1243static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
1244{
David S. Millercd07a8e2008-09-23 00:50:13 -07001245 return skb_queue_next(&sk->sk_write_queue, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001246}
1247
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08001248static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
1249{
1250 return skb_queue_prev(&sk->sk_write_queue, skb);
1251}
1252
David S. Millerfe067e82007-03-07 12:12:44 -08001253#define tcp_for_write_queue(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001254 skb_queue_walk(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001255
1256#define tcp_for_write_queue_from(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001257 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001258
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001259#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001260 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001261
Damian Lukowski5152fc72009-09-01 10:24:00 +00001262/* This function calculates a "timeout" which is equivalent to the timeout of a
1263 * TCP connection after "boundary" unsucessful, exponentially backed-off
1264 * retransmissions with an initial RTO of TCP_RTO_MIN.
1265 */
Damian Lukowski6fa12c82009-08-26 00:16:34 +00001266static inline bool retransmits_timed_out(const struct sock *sk,
1267 unsigned int boundary)
1268{
Damian Lukowski5152fc72009-09-01 10:24:00 +00001269 unsigned int timeout, linear_backoff_thresh;
1270
Damian Lukowski6fa12c82009-08-26 00:16:34 +00001271 if (!inet_csk(sk)->icsk_retransmits)
1272 return false;
1273
Damian Lukowski5152fc72009-09-01 10:24:00 +00001274 linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
Damian Lukowski6fa12c82009-08-26 00:16:34 +00001275
Damian Lukowski5152fc72009-09-01 10:24:00 +00001276 if (boundary <= linear_backoff_thresh)
1277 timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
Damian Lukowski6fa12c82009-08-26 00:16:34 +00001278 else
Damian Lukowski5152fc72009-09-01 10:24:00 +00001279 timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
1280 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
Damian Lukowski6fa12c82009-08-26 00:16:34 +00001281
Damian Lukowski5152fc72009-09-01 10:24:00 +00001282 return (tcp_time_stamp - tcp_sk(sk)->retrans_stamp) >= timeout;
Damian Lukowski6fa12c82009-08-26 00:16:34 +00001283}
1284
David S. Millerfe067e82007-03-07 12:12:44 -08001285static inline struct sk_buff *tcp_send_head(struct sock *sk)
1286{
1287 return sk->sk_send_head;
1288}
1289
David S. Millercd07a8e2008-09-23 00:50:13 -07001290static inline bool tcp_skb_is_last(const struct sock *sk,
1291 const struct sk_buff *skb)
1292{
1293 return skb_queue_is_last(&sk->sk_write_queue, skb);
1294}
1295
David S. Millerfe067e82007-03-07 12:12:44 -08001296static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
1297{
David S. Millercd07a8e2008-09-23 00:50:13 -07001298 if (tcp_skb_is_last(sk, skb))
David S. Millerfe067e82007-03-07 12:12:44 -08001299 sk->sk_send_head = NULL;
David S. Millercd07a8e2008-09-23 00:50:13 -07001300 else
1301 sk->sk_send_head = tcp_write_queue_next(sk, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001302}
1303
1304static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1305{
1306 if (sk->sk_send_head == skb_unlinked)
1307 sk->sk_send_head = NULL;
1308}
1309
1310static inline void tcp_init_send_head(struct sock *sk)
1311{
1312 sk->sk_send_head = NULL;
1313}
1314
1315static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1316{
1317 __skb_queue_tail(&sk->sk_write_queue, skb);
1318}
1319
1320static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1321{
1322 __tcp_add_write_queue_tail(sk, skb);
1323
1324 /* Queue it, remembering where we must start sending. */
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001325 if (sk->sk_send_head == NULL) {
David S. Millerfe067e82007-03-07 12:12:44 -08001326 sk->sk_send_head = skb;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001327
1328 if (tcp_sk(sk)->highest_sack == NULL)
1329 tcp_sk(sk)->highest_sack = skb;
1330 }
David S. Millerfe067e82007-03-07 12:12:44 -08001331}
1332
1333static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1334{
1335 __skb_queue_head(&sk->sk_write_queue, skb);
1336}
1337
1338/* Insert buff after skb on the write queue of sk. */
1339static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1340 struct sk_buff *buff,
1341 struct sock *sk)
1342{
Gerrit Renker7de6c032008-04-14 00:05:09 -07001343 __skb_queue_after(&sk->sk_write_queue, skb, buff);
David S. Millerfe067e82007-03-07 12:12:44 -08001344}
1345
David S. Miller43f59c82008-09-21 21:28:51 -07001346/* Insert new before skb on the write queue of sk. */
David S. Millerfe067e82007-03-07 12:12:44 -08001347static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1348 struct sk_buff *skb,
1349 struct sock *sk)
1350{
David S. Miller43f59c82008-09-21 21:28:51 -07001351 __skb_queue_before(&sk->sk_write_queue, skb, new);
Ilpo Järvinen6e421412007-11-19 23:24:09 -08001352
1353 if (sk->sk_send_head == skb)
1354 sk->sk_send_head = new;
David S. Millerfe067e82007-03-07 12:12:44 -08001355}
1356
1357static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1358{
1359 __skb_unlink(skb, &sk->sk_write_queue);
1360}
1361
David S. Millerfe067e82007-03-07 12:12:44 -08001362static inline int tcp_write_queue_empty(struct sock *sk)
1363{
1364 return skb_queue_empty(&sk->sk_write_queue);
1365}
1366
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001367/* Start sequence of the highest skb with SACKed bit, valid only if
1368 * sacked > 0 or when the caller has ensured validity by itself.
1369 */
1370static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1371{
1372 if (!tp->sacked_out)
1373 return tp->snd_una;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001374
1375 if (tp->highest_sack == NULL)
1376 return tp->snd_nxt;
1377
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001378 return TCP_SKB_CB(tp->highest_sack)->seq;
1379}
1380
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001381static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1382{
1383 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1384 tcp_write_queue_next(sk, skb);
1385}
1386
1387static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1388{
1389 return tcp_sk(sk)->highest_sack;
1390}
1391
1392static inline void tcp_highest_sack_reset(struct sock *sk)
1393{
1394 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1395}
1396
1397/* Called when old skb is about to be deleted (to be combined with new skb) */
1398static inline void tcp_highest_sack_combine(struct sock *sk,
1399 struct sk_buff *old,
1400 struct sk_buff *new)
1401{
1402 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1403 tcp_sk(sk)->highest_sack = new;
1404}
1405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406/* /proc */
1407enum tcp_seq_states {
1408 TCP_SEQ_STATE_LISTENING,
1409 TCP_SEQ_STATE_OPENREQ,
1410 TCP_SEQ_STATE_ESTABLISHED,
1411 TCP_SEQ_STATE_TIME_WAIT,
1412};
1413
1414struct tcp_seq_afinfo {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 char *name;
1416 sa_family_t family;
Denis V. Lunev68fcadd2008-04-13 22:13:30 -07001417 struct file_operations seq_fops;
Denis V. Lunev9427c4b2008-04-13 22:12:13 -07001418 struct seq_operations seq_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419};
1420
1421struct tcp_iter_state {
Denis V. Luneva4146b12008-04-13 22:11:14 -07001422 struct seq_net_private p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 sa_family_t family;
1424 enum tcp_seq_states state;
1425 struct sock *syn_wait_sk;
1426 int bucket, sbucket, num, uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427};
1428
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001429extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1430extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001432extern struct request_sock_ops tcp_request_sock_ops;
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001433extern struct request_sock_ops tcp6_request_sock_ops;
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001434
Brian Haley7d06b2e2008-06-14 17:04:49 -07001435extern void tcp_v4_destroy_sock(struct sock *sk);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001436
Herbert Xua430a432006-07-08 13:34:56 -07001437extern int tcp_v4_gso_send_check(struct sk_buff *skb);
Herbert Xu576a30e2006-06-27 13:22:38 -07001438extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
Herbert Xubf296b12008-12-15 23:43:36 -08001439extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1440 struct sk_buff *skb);
1441extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1442 struct sk_buff *skb);
1443extern int tcp_gro_complete(struct sk_buff *skb);
1444extern int tcp4_gro_complete(struct sk_buff *skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07001445
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001446#ifdef CONFIG_PROC_FS
1447extern int tcp4_proc_init(void);
1448extern void tcp4_proc_exit(void);
1449#endif
1450
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001451/* TCP af-specific functions */
1452struct tcp_sock_af_ops {
1453#ifdef CONFIG_TCP_MD5SIG
1454 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1455 struct sock *addr_sk);
1456 int (*calc_md5_hash) (char *location,
1457 struct tcp_md5sig_key *md5,
1458 struct sock *sk,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001459 struct request_sock *req,
Adam Langley49a72df2008-07-19 00:01:42 -07001460 struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001461 int (*md5_add) (struct sock *sk,
1462 struct sock *addr_sk,
1463 u8 *newkey,
1464 u8 len);
1465 int (*md5_parse) (struct sock *sk,
1466 char __user *optval,
1467 int optlen);
1468#endif
1469};
1470
1471struct tcp_request_sock_ops {
1472#ifdef CONFIG_TCP_MD5SIG
1473 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1474 struct request_sock *req);
John Dykstrae3afe7b2009-07-16 05:04:51 +00001475 int (*calc_md5_hash) (char *location,
1476 struct tcp_md5sig_key *md5,
1477 struct sock *sk,
1478 struct request_sock *req,
1479 struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001480#endif
1481};
1482
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08001483extern void tcp_v4_init(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001484extern void tcp_init(void);
1485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486#endif /* _TCP_H */