blob: b71a446d58f6c6a74b50ebb9d2ff5d1b8c55b13f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
21#define TCP_DEBUG 1
22#define FASTRETRANS_DEBUG 1
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/list.h>
25#include <linux/tcp.h>
26#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
Herbert Xufb286bb2005-11-10 13:01:24 -080029#include <linux/skbuff.h>
Chris Leech97fc2f02006-05-23 17:55:33 -070030#include <linux/dmaengine.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080031#include <linux/crypto.h>
Glenn Griffinc6aefaf2008-02-07 21:49:26 -080032#include <linux/cryptohash.h>
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070033
34#include <net/inet_connection_sock.h>
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -070035#include <net/inet_timewait_sock.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070036#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <net/checksum.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070038#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <net/sock.h>
40#include <net/snmp.h>
41#include <net/ip.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070042#include <net/tcp_states.h>
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -070043#include <net/inet_ecn.h>
Satoru SATOH0c266892009-05-04 11:11:01 -070044#include <net/dst.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <linux/seq_file.h>
47
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -070048extern struct inet_hashinfo tcp_hashinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Eric Dumazetdd24c002008-11-25 21:17:14 -080050extern struct percpu_counter tcp_orphan_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051extern void tcp_time_wait(struct sock *sk, int state, int timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#define MAX_TCP_HEADER (128 + MAX_HEADER)
Adam Langley33ad7982008-07-19 00:04:31 -070054#define MAX_TCP_OPTION_SPACE 40
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56/*
57 * Never offer a window over 32767 without using window scaling. Some
58 * poor stacks do signed 16bit maths!
59 */
60#define MAX_TCP_WINDOW 32767U
61
62/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
63#define TCP_MIN_MSS 88U
64
65/* Minimal RCV_MSS. */
66#define TCP_MIN_RCVMSS 536U
67
John Heffner5d424d52006-03-20 17:53:41 -080068/* The least MTU to use for probing */
69#define TCP_BASE_MSS 512
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071/* After receiving this amount of duplicate ACKs fast retransmit starts. */
72#define TCP_FASTRETRANS_THRESH 3
73
74/* Maximal reordering. */
75#define TCP_MAX_REORDERING 127
76
77/* Maximal number of ACKs sent quickly to accelerate slow-start. */
78#define TCP_MAX_QUICKACKS 16U
79
80/* urg_data states */
81#define TCP_URG_VALID 0x0100
82#define TCP_URG_NOTYET 0x0200
83#define TCP_URG_READ 0x0400
84
85#define TCP_RETR1 3 /*
86 * This is how many retries it does before it
87 * tries to figure out if the gateway is
88 * down. Minimal RFC value is 3; it corresponds
89 * to ~3sec-8min depending on RTO.
90 */
91
92#define TCP_RETR2 15 /*
93 * This should take at least
94 * 90 minutes to time out.
95 * RFC1122 says that the limit is 100 sec.
96 * 15 is ~13-30min depending on RTO.
97 */
98
99#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800100 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800103 * connection: ~180sec is RFC minimum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105
106#define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
107 * socket. 7 is ~50sec-16min.
108 */
109
110
111#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
112 * state, about 60 seconds */
113#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
114 /* BSD style FIN_WAIT2 deadlock breaker.
115 * It used to be 3min, new value is 60sec,
116 * to combine FIN-WAIT-2 timeout with
117 * TIME-WAIT timer.
118 */
119
120#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
121#if HZ >= 100
122#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
123#define TCP_ATO_MIN ((unsigned)(HZ/25))
124#else
125#define TCP_DELACK_MIN 4U
126#define TCP_ATO_MIN 4U
127#endif
128#define TCP_RTO_MAX ((unsigned)(120*HZ))
129#define TCP_RTO_MIN ((unsigned)(HZ/5))
130#define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
131
132#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
133 * for local resources.
134 */
135
136#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
137#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
138#define TCP_KEEPALIVE_INTVL (75*HZ)
139
140#define MAX_TCP_KEEPIDLE 32767
141#define MAX_TCP_KEEPINTVL 32767
142#define MAX_TCP_KEEPCNT 127
143#define MAX_TCP_SYNCNT 127
144
145#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
147#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
148#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
149 * after this time. It should be equal
150 * (or greater than) TCP_TIMEWAIT_LEN
151 * to provide reliability equal to one
152 * provided by timewait state.
153 */
154#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
155 * timestamps. It must be less than
156 * minimal timewait lifetime.
157 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158/*
159 * TCP option
160 */
161
162#define TCPOPT_NOP 1 /* Padding */
163#define TCPOPT_EOL 0 /* End of options */
164#define TCPOPT_MSS 2 /* Segment size negotiating */
165#define TCPOPT_WINDOW 3 /* Window scaling */
166#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
167#define TCPOPT_SACK 5 /* SACK Block */
168#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800169#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171/*
172 * TCP option lengths
173 */
174
175#define TCPOLEN_MSS 4
176#define TCPOLEN_WINDOW 3
177#define TCPOLEN_SACK_PERM 2
178#define TCPOLEN_TIMESTAMP 10
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800179#define TCPOLEN_MD5SIG 18
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
181/* But this is what stacks really send out. */
182#define TCPOLEN_TSTAMP_ALIGNED 12
183#define TCPOLEN_WSCALE_ALIGNED 4
184#define TCPOLEN_SACKPERM_ALIGNED 4
185#define TCPOLEN_SACK_BASE 2
186#define TCPOLEN_SACK_BASE_ALIGNED 4
187#define TCPOLEN_SACK_PERBLOCK 8
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800188#define TCPOLEN_MD5SIG_ALIGNED 20
Adam Langley33ad7982008-07-19 00:04:31 -0700189#define TCPOLEN_MSS_ALIGNED 4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191/* Flags in tp->nonagle */
192#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
193#define TCP_NAGLE_CORK 2 /* Socket is corked */
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800194#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700196extern struct inet_timewait_death_row tcp_death_row;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* sysctl variables for tcp */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199extern int sysctl_tcp_timestamps;
200extern int sysctl_tcp_window_scaling;
201extern int sysctl_tcp_sack;
202extern int sysctl_tcp_fin_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203extern int sysctl_tcp_keepalive_time;
204extern int sysctl_tcp_keepalive_probes;
205extern int sysctl_tcp_keepalive_intvl;
206extern int sysctl_tcp_syn_retries;
207extern int sysctl_tcp_synack_retries;
208extern int sysctl_tcp_retries1;
209extern int sysctl_tcp_retries2;
210extern int sysctl_tcp_orphan_retries;
211extern int sysctl_tcp_syncookies;
212extern int sysctl_tcp_retrans_collapse;
213extern int sysctl_tcp_stdurg;
214extern int sysctl_tcp_rfc1337;
215extern int sysctl_tcp_abort_on_overflow;
216extern int sysctl_tcp_max_orphans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217extern int sysctl_tcp_fack;
218extern int sysctl_tcp_reordering;
219extern int sysctl_tcp_ecn;
220extern int sysctl_tcp_dsack;
221extern int sysctl_tcp_mem[3];
222extern int sysctl_tcp_wmem[3];
223extern int sysctl_tcp_rmem[3];
224extern int sysctl_tcp_app_win;
225extern int sysctl_tcp_adv_win_scale;
226extern int sysctl_tcp_tw_reuse;
227extern int sysctl_tcp_frto;
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800228extern int sysctl_tcp_frto_response;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229extern int sysctl_tcp_low_latency;
Chris Leech95937822006-05-23 18:02:55 -0700230extern int sysctl_tcp_dma_copybreak;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231extern int sysctl_tcp_nometrics_save;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232extern int sysctl_tcp_moderate_rcvbuf;
233extern int sysctl_tcp_tso_win_divisor;
Stephen Hemminger9772efb2005-11-10 17:09:53 -0800234extern int sysctl_tcp_abc;
John Heffner5d424d52006-03-20 17:53:41 -0800235extern int sysctl_tcp_mtu_probing;
236extern int sysctl_tcp_base_mss;
Rick Jones15d99e02006-03-20 22:40:29 -0800237extern int sysctl_tcp_workaround_signed_windows;
David S. Miller35089bb2006-06-13 22:33:04 -0700238extern int sysctl_tcp_slow_start_after_idle;
John Heffner886236c2007-03-25 19:21:45 -0700239extern int sysctl_tcp_max_ssthresh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241extern atomic_t tcp_memory_allocated;
Eric Dumazet17483762008-11-25 21:16:35 -0800242extern struct percpu_counter tcp_sockets_allocated;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243extern int tcp_memory_pressure;
244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 * The next routines deal with comparing 32 bit unsigned ints
247 * and worry about wraparound (automatic with unsigned arithmetic).
248 */
249
250static inline int before(__u32 seq1, __u32 seq2)
251{
Gerrit Renker0d630cc2007-01-04 12:25:16 -0800252 return (__s32)(seq1-seq2) < 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253}
Gerrit Renker9a036b9c2006-12-20 10:25:55 -0800254#define after(seq2, seq1) before(seq1, seq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256/* is s2<=s1<=s3 ? */
257static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
258{
259 return seq3 - seq2 >= seq1 - seq2;
260}
261
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700262static inline int tcp_too_many_orphans(struct sock *sk, int num)
263{
264 return (num > sysctl_tcp_max_orphans) ||
265 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
266 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
267}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Florian Westphala0f82f62009-04-19 09:43:48 +0000269/* syncookies: remember time of last synqueue overflow */
270static inline void tcp_synq_overflow(struct sock *sk)
271{
272 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
273}
274
275/* syncookies: no recent synqueue overflow on this listening socket? */
276static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
277{
278 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
279 return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT);
280}
281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282extern struct proto tcp_prot;
283
Pavel Emelyanov57ef42d2008-07-18 04:02:08 -0700284#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
285#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
286#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
287#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289extern void tcp_v4_err(struct sk_buff *skb, u32);
290
291extern void tcp_shutdown (struct sock *sk, int how);
292
293extern int tcp_v4_rcv(struct sk_buff *skb);
294
295extern int tcp_v4_remember_stamp(struct sock *sk);
296
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700297extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
David S. Miller3516ffb2007-08-02 19:23:56 -0700299extern int tcp_sendmsg(struct kiocb *iocb, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 struct msghdr *msg, size_t size);
301extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
302
303extern int tcp_ioctl(struct sock *sk,
304 int cmd,
305 unsigned long arg);
306
307extern int tcp_rcv_state_process(struct sock *sk,
308 struct sk_buff *skb,
309 struct tcphdr *th,
310 unsigned len);
311
312extern int tcp_rcv_established(struct sock *sk,
313 struct sk_buff *skb,
314 struct tcphdr *th,
315 unsigned len);
316
317extern void tcp_rcv_space_adjust(struct sock *sk);
318
Chris Leech0e4b4992006-05-23 18:00:16 -0700319extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
320
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800321extern int tcp_twsk_unique(struct sock *sk,
322 struct sock *sktw, void *twp);
323
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800324extern void tcp_twsk_destructor(struct sock *sk);
325
Jens Axboe9c55e012007-11-06 23:30:13 -0800326extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
327 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
328
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700329static inline void tcp_dec_quickack_mode(struct sock *sk,
330 const unsigned int pkts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700332 struct inet_connection_sock *icsk = inet_csk(sk);
David S. Millerfc6415bc2005-07-05 15:17:45 -0700333
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700334 if (icsk->icsk_ack.quick) {
335 if (pkts >= icsk->icsk_ack.quick) {
336 icsk->icsk_ack.quick = 0;
David S. Millerfc6415bc2005-07-05 15:17:45 -0700337 /* Leaving quickack mode we deflate ATO. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700338 icsk->icsk_ack.ato = TCP_ATO_MIN;
David S. Millerfc6415bc2005-07-05 15:17:45 -0700339 } else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700340 icsk->icsk_ack.quick -= pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 }
342}
343
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700344extern void tcp_enter_quickack_mode(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
347{
348 rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
349}
350
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700351#define TCP_ECN_OK 1
352#define TCP_ECN_QUEUE_CWR 2
353#define TCP_ECN_DEMAND_CWR 4
354
355static __inline__ void
356TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
357{
358 if (sysctl_tcp_ecn && th->ece && th->cwr)
359 inet_rsk(req)->ecn_ok = 1;
360}
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362enum tcp_tw_status
363{
364 TCP_TW_SUCCESS = 0,
365 TCP_TW_RST = 1,
366 TCP_TW_ACK = 2,
367 TCP_TW_SYN = 3
368};
369
370
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700371extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 struct sk_buff *skb,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700373 const struct tcphdr *th);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700376 struct request_sock *req,
377 struct request_sock **prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378extern int tcp_child_process(struct sock *parent,
379 struct sock *child,
380 struct sk_buff *skb);
Ilpo Järvinen46d0de42007-02-21 23:10:39 -0800381extern int tcp_use_frto(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382extern void tcp_enter_frto(struct sock *sk);
383extern void tcp_enter_loss(struct sock *sk, int how);
384extern void tcp_clear_retrans(struct tcp_sock *tp);
385extern void tcp_update_metrics(struct sock *sk);
386
387extern void tcp_close(struct sock *sk,
388 long timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
390
391extern int tcp_getsockopt(struct sock *sk, int level,
392 int optname,
393 char __user *optval,
394 int __user *optlen);
395extern int tcp_setsockopt(struct sock *sk, int level,
396 int optname, char __user *optval,
397 int optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -0800398extern int compat_tcp_getsockopt(struct sock *sk,
399 int level, int optname,
400 char __user *optval, int __user *optlen);
401extern int compat_tcp_setsockopt(struct sock *sk,
402 int level, int optname,
403 char __user *optval, int optlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404extern void tcp_set_keepalive(struct sock *sk, int val);
405extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
406 struct msghdr *msg,
407 size_t len, int nonblock,
408 int flags, int *addr_len);
409
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410extern void tcp_parse_options(struct sk_buff *skb,
411 struct tcp_options_received *opt_rx,
412 int estab);
413
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900414extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416/*
417 * TCP v4 functions exported for the inet6 API
418 */
419
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -0800420extern void tcp_v4_send_check(struct sock *sk, int len,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 struct sk_buff *skb);
422
423extern int tcp_v4_conn_request(struct sock *sk,
424 struct sk_buff *skb);
425
426extern struct sock * tcp_create_openreq_child(struct sock *sk,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700427 struct request_sock *req,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 struct sk_buff *skb);
429
430extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
431 struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700432 struct request_sock *req,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 struct dst_entry *dst);
434
435extern int tcp_v4_do_rcv(struct sock *sk,
436 struct sk_buff *skb);
437
438extern int tcp_v4_connect(struct sock *sk,
439 struct sockaddr *uaddr,
440 int addr_len);
441
442extern int tcp_connect(struct sock *sk);
443
444extern struct sk_buff * tcp_make_synack(struct sock *sk,
445 struct dst_entry *dst,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700446 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448extern int tcp_disconnect(struct sock *sk, int flags);
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451/* From syncookies.c */
Florian Westphal2051f112008-03-23 22:21:28 -0700452extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
454 struct ip_options *opt);
455extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
456 __u16 *mss);
457
Florian Westphal4dfc2812008-04-10 03:12:40 -0700458extern __u32 cookie_init_timestamp(struct request_sock *req);
459extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt);
460
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800461/* From net/ipv6/syncookies.c */
462extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
463extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
464 __u16 *mss);
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466/* tcp_output.c */
467
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700468extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
469 int nonagle);
470extern int tcp_may_send_now(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000472extern void tcp_retransmit_timer(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473extern void tcp_xmit_retransmit_queue(struct sock *);
474extern void tcp_simple_retransmit(struct sock *);
475extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
David S. Miller6475be12005-09-01 22:47:01 -0700476extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
478extern void tcp_send_probe0(struct sock *);
479extern void tcp_send_partial(struct sock *);
480extern int tcp_write_wakeup(struct sock *);
481extern void tcp_send_fin(struct sock *sk);
Al Virodd0fc662005-10-07 07:46:04 +0100482extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483extern int tcp_send_synack(struct sock *);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700484extern void tcp_push_one(struct sock *, unsigned int mss_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485extern void tcp_send_ack(struct sock *sk);
486extern void tcp_send_delayed_ack(struct sock *sk);
487
David S. Millera762a982005-07-05 15:18:51 -0700488/* tcp_input.c */
489extern void tcp_cwnd_application_limited(struct sock *sk);
490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491/* tcp_timer.c */
492extern void tcp_init_xmit_timers(struct sock *);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700493static inline void tcp_clear_xmit_timers(struct sock *sk)
494{
495 inet_csk_clear_xmit_timers(sk);
496}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000499extern unsigned int tcp_current_mss(struct sock *sk);
500
501/* Bound MSS / TSO packet size with the half of the window */
502static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
503{
504 if (tp->max_window && pktsize > (tp->max_window >> 1))
505 return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
506 else
507 return pktsize;
508}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
Arnaldo Carvalho de Melo17b085e2005-08-12 12:59:17 -0300510/* tcp.c */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511extern void tcp_get_info(struct sock *, struct tcp_info *);
512
513/* Read 'sendfile()'-style from a TCP socket */
514typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
515 unsigned int, size_t);
516extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
517 sk_read_actor_t recv_actor);
518
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800519extern void tcp_initialize_rcv_mss(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
John Heffner5d424d52006-03-20 17:53:41 -0800521extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
522extern int tcp_mss_to_mtu(struct sock *sk, int mss);
523extern void tcp_mtup_init(struct sock *sk);
524
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000525static inline void tcp_bound_rto(const struct sock *sk)
526{
527 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
528 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
529}
530
531static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
532{
533 return (tp->srtt >> 3) + tp->rttvar;
534}
535
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800536static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537{
538 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
539 ntohl(TCP_FLAG_ACK) |
540 snd_wnd);
541}
542
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800543static inline void tcp_fast_path_on(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544{
545 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
546}
547
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700548static inline void tcp_fast_path_check(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700550 struct tcp_sock *tp = tcp_sk(sk);
551
David S. Millerb03efcf2005-07-08 14:57:23 -0700552 if (skb_queue_empty(&tp->out_of_order_queue) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 tp->rcv_wnd &&
554 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
555 !tp->urg_data)
556 tcp_fast_path_on(tp);
557}
558
Satoru SATOH0c266892009-05-04 11:11:01 -0700559/* Compute the actual rto_min value */
560static inline u32 tcp_rto_min(struct sock *sk)
561{
562 struct dst_entry *dst = __sk_dst_get(sk);
563 u32 rto_min = TCP_RTO_MIN;
564
565 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
566 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
567 return rto_min;
568}
569
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570/* Compute the actual receive window we are currently advertising.
571 * Rcv_nxt can be after the window if our peer push more data
572 * than the offered window.
573 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800574static inline u32 tcp_receive_window(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575{
576 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
577
578 if (win < 0)
579 win = 0;
580 return (u32) win;
581}
582
583/* Choose a new window, without checks for shrinking, and without
584 * scaling applied to the result. The caller does these things
585 * if necessary. This is a "raw" window selection.
586 */
587extern u32 __tcp_select_window(struct sock *sk);
588
589/* TCP timestamps are only 32-bits, this causes a slight
590 * complication on 64-bit systems since we store a snapshot
Stephen Hemminger31f34262005-11-15 15:17:10 -0800591 * of jiffies in the buffer control blocks below. We decided
592 * to use only the low 32-bits of jiffies and hide the ugly
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 * casts with the following macro.
594 */
595#define tcp_time_stamp ((__u32)(jiffies))
596
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800597/* This is what the send packet queuing engine uses to pass
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 * TCP per-packet control information to the transmission
599 * code. We also store the host-order sequence numbers in
600 * here too. This is 36 bytes on 32-bit architectures,
601 * 40 bytes on 64-bit machines, if this grows please adjust
602 * skbuff.h:skbuff->cb[xxx] size appropriately.
603 */
604struct tcp_skb_cb {
605 union {
606 struct inet_skb_parm h4;
607#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
608 struct inet6_skb_parm h6;
609#endif
610 } header; /* For incoming frames */
611 __u32 seq; /* Starting sequence number */
612 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
613 __u32 when; /* used to compute rtt's */
614 __u8 flags; /* TCP header flags. */
615
616 /* NOTE: These must match up to the flags byte in a
617 * real TCP header.
618 */
619#define TCPCB_FLAG_FIN 0x01
620#define TCPCB_FLAG_SYN 0x02
621#define TCPCB_FLAG_RST 0x04
622#define TCPCB_FLAG_PSH 0x08
623#define TCPCB_FLAG_ACK 0x10
624#define TCPCB_FLAG_URG 0x20
625#define TCPCB_FLAG_ECE 0x40
626#define TCPCB_FLAG_CWR 0x80
627
628 __u8 sacked; /* State flags for SACK/FACK. */
629#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
630#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
631#define TCPCB_LOST 0x04 /* SKB is lost */
632#define TCPCB_TAGBITS 0x07 /* All tag bits */
633
634#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
635#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
636
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 __u32 ack_seq; /* Sequence number ACK'd */
638};
639
640#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642/* Due to TSO, an SKB can be composed of multiple actual
643 * packets. To keep these tracked properly, we use this.
644 */
645static inline int tcp_skb_pcount(const struct sk_buff *skb)
646{
Herbert Xu79671682006-06-22 02:40:14 -0700647 return skb_shinfo(skb)->gso_segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648}
649
650/* This is valid iff tcp_skb_pcount() > 1. */
651static inline int tcp_skb_mss(const struct sk_buff *skb)
652{
Herbert Xu79671682006-06-22 02:40:14 -0700653 return skb_shinfo(skb)->gso_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654}
655
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700656/* Events passed to congestion control interface */
657enum tcp_ca_event {
658 CA_EVENT_TX_START, /* first transmit when no packets in flight */
659 CA_EVENT_CWND_RESTART, /* congestion window restart */
660 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
661 CA_EVENT_FRTO, /* fast recovery timeout */
662 CA_EVENT_LOSS, /* loss timeout */
663 CA_EVENT_FAST_ACK, /* in sequence ack */
664 CA_EVENT_SLOW_ACK, /* other ack */
665};
666
667/*
668 * Interface for adding new TCP congestion control handlers
669 */
670#define TCP_CA_NAME_MAX 16
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800671#define TCP_CA_MAX 128
672#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
673
Stephen Hemminger164891a2007-04-23 22:26:16 -0700674#define TCP_CONG_NON_RESTRICTED 0x1
675#define TCP_CONG_RTT_STAMP 0x2
676
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700677struct tcp_congestion_ops {
678 struct list_head list;
Stephen Hemminger164891a2007-04-23 22:26:16 -0700679 unsigned long flags;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700680
681 /* initialize private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300682 void (*init)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700683 /* cleanup private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300684 void (*release)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700685
686 /* return slow start threshold (required) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300687 u32 (*ssthresh)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700688 /* lower bound for congestion window (optional) */
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700689 u32 (*min_cwnd)(const struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700690 /* do new cwnd calculation (required) */
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200691 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700692 /* call before changing ca_state (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300693 void (*set_state)(struct sock *sk, u8 new_state);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700694 /* call when cwnd event occurs (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300695 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700696 /* new value of cwnd after loss (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300697 u32 (*undo_cwnd)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700698 /* hook for packet ack accounting (optional) */
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -0700699 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
Arnaldo Carvalho de Melo73c1f4a2005-08-12 12:51:49 -0300700 /* get info for inet_diag (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300701 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700702
703 char name[TCP_CA_NAME_MAX];
704 struct module *owner;
705};
706
707extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
708extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
709
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300710extern void tcp_init_congestion_control(struct sock *sk);
711extern void tcp_cleanup_congestion_control(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700712extern int tcp_set_default_congestion_control(const char *name);
713extern void tcp_get_default_congestion_control(char *name);
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800714extern void tcp_get_available_congestion_control(char *buf, size_t len);
Stephen Hemmingerce7bc3b2006-11-09 16:35:15 -0800715extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
716extern int tcp_set_allowed_congestion_control(char *allowed);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300717extern int tcp_set_congestion_control(struct sock *sk, const char *name);
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800718extern void tcp_slow_start(struct tcp_sock *tp);
Ilpo Järvinen758ce5c2009-02-28 04:44:37 +0000719extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700720
Stephen Hemminger5f8ef482005-06-23 20:37:36 -0700721extern struct tcp_congestion_ops tcp_init_congestion_ops;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300722extern u32 tcp_reno_ssthresh(struct sock *sk);
Ilpo Järvinenc3a05c62007-12-02 00:47:59 +0200723extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
Stephen Hemminger72dc5b92006-06-05 17:30:08 -0700724extern u32 tcp_reno_min_cwnd(const struct sock *sk);
David S. Millera8acfba2005-06-23 23:45:02 -0700725extern struct tcp_congestion_ops tcp_reno;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700726
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300727static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700728{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300729 struct inet_connection_sock *icsk = inet_csk(sk);
730
731 if (icsk->icsk_ca_ops->set_state)
732 icsk->icsk_ca_ops->set_state(sk, ca_state);
733 icsk->icsk_ca_state = ca_state;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700734}
735
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300736static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700737{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300738 const struct inet_connection_sock *icsk = inet_csk(sk);
739
740 if (icsk->icsk_ca_ops->cwnd_event)
741 icsk->icsk_ca_ops->cwnd_event(sk, event);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700742}
743
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300744/* These functions determine how the current flow behaves in respect of SACK
745 * handling. SACK is negotiated with the peer, and therefore it can vary
746 * between different flows.
747 *
748 * tcp_is_sack - SACK enabled
749 * tcp_is_reno - No SACK
750 * tcp_is_fack - FACK enabled, implies SACK enabled
751 */
752static inline int tcp_is_sack(const struct tcp_sock *tp)
753{
754 return tp->rx_opt.sack_ok;
755}
756
757static inline int tcp_is_reno(const struct tcp_sock *tp)
758{
759 return !tcp_is_sack(tp);
760}
761
762static inline int tcp_is_fack(const struct tcp_sock *tp)
763{
764 return tp->rx_opt.sack_ok & 2;
765}
766
767static inline void tcp_enable_fack(struct tcp_sock *tp)
768{
769 tp->rx_opt.sack_ok |= 2;
770}
771
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300772static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
773{
774 return tp->sacked_out + tp->lost_out;
775}
776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777/* This determines how many packets are "in the network" to the best
778 * of our knowledge. In many cases it is conservative, but where
779 * detailed information is available from the receiver (via SACK
780 * blocks etc.) we can make more aggressive calculations.
781 *
782 * Use this for decisions involving congestion control, use just
783 * tp->packets_out to determine if the send queue is empty or not.
784 *
785 * Read this equation as:
786 *
787 * "Packets sent once on transmission queue" MINUS
788 * "Packets left network, but not honestly ACKed yet" PLUS
789 * "Packets fast retransmitted"
790 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800791static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792{
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300793 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794}
795
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
797 * The exception is rate halving phase, when cwnd is decreasing towards
798 * ssthresh.
799 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300800static inline __u32 tcp_current_ssthresh(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300802 const struct tcp_sock *tp = tcp_sk(sk);
803 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 return tp->snd_ssthresh;
805 else
806 return max(tp->snd_ssthresh,
807 ((tp->snd_cwnd >> 1) +
808 (tp->snd_cwnd >> 2)));
809}
810
Ilpo Järvinenb9c45952007-07-27 16:36:17 +0300811/* Use define here intentionally to get WARN_ON location shown at the caller */
812#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800814extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
816
817/* Slow start with delack produces 3 packets of burst, so that
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700818 * it is safe "de facto". This will be the default - same as
819 * the default reordering threshold - but if reordering increases,
820 * we must be able to allow cwnd to burst at least this much in order
821 * to not pull it back when holes are filled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 */
823static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
824{
John Heffnerdd9e0dd2008-04-15 15:26:39 -0700825 return tp->reordering;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826}
827
Ilpo Järvinen90840de2007-12-31 04:48:41 -0800828/* Returns end sequence number of the receiver's advertised window */
829static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
830{
831 return tp->snd_una + tp->snd_wnd;
832}
Ilpo Järvinencea14e02008-01-12 03:19:12 -0800833extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
Stephen Hemmingerf4805ed2005-11-10 16:53:30 -0800834
Chuck Leverc1bd24b2007-10-23 21:08:54 -0700835static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800836 const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837{
838 if (skb->len < mss)
839 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
840}
841
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700842static inline void tcp_check_probe_timer(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700844 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700845 const struct inet_connection_sock *icsk = inet_csk(sk);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700846
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700847 if (!tp->packets_out && !icsk->icsk_pending)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700848 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
849 icsk->icsk_rto, TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850}
851
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700852static inline void tcp_push_pending_frames(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700854 struct tcp_sock *tp = tcp_sk(sk);
855
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000856 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857}
858
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800859static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
861 tp->snd_wl1 = seq;
862}
863
Hantzis Fotisee7537b2009-03-02 22:42:02 -0800864static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865{
866 tp->snd_wl1 = seq;
867}
868
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869/*
870 * Calculate(/check) TCP checksum
871 */
Frederik Deweerdtba7808e2007-02-04 20:15:27 -0800872static inline __sum16 tcp_v4_check(int len, __be32 saddr,
873 __be32 daddr, __wsum base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874{
875 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
876}
877
Al Virob51655b2006-11-14 21:40:42 -0800878static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879{
Herbert Xufb286bb2005-11-10 13:01:24 -0800880 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881}
882
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800883static inline int tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884{
Herbert Xu60476372007-04-09 11:59:39 -0700885 return !skb_csum_unnecessary(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 __tcp_checksum_complete(skb);
887}
888
889/* Prequeue for VJ style copy to user, combined with checksumming. */
890
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800891static inline void tcp_prequeue_init(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892{
893 tp->ucopy.task = NULL;
894 tp->ucopy.len = 0;
895 tp->ucopy.memory = 0;
896 skb_queue_head_init(&tp->ucopy.prequeue);
Chris Leech97fc2f02006-05-23 17:55:33 -0700897#ifdef CONFIG_NET_DMA
898 tp->ucopy.dma_chan = NULL;
899 tp->ucopy.wakeup = 0;
900 tp->ucopy.pinned_list = NULL;
901 tp->ucopy.dma_cookie = 0;
902#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903}
904
905/* Packet is added to VJ-style prequeue for processing in process
906 * context, if a reader task is waiting. Apparently, this exciting
907 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
908 * failed somewhere. Latency? Burstiness? Well, at least now we will
909 * see, why it failed. 8)8) --ANK
910 *
911 * NOTE: is this not too big to inline?
912 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800913static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914{
915 struct tcp_sock *tp = tcp_sk(sk);
916
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000917 if (sysctl_tcp_low_latency || !tp->ucopy.task)
918 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000920 __skb_queue_tail(&tp->ucopy.prequeue, skb);
921 tp->ucopy.memory += skb->truesize;
922 if (tp->ucopy.memory > sk->sk_rcvbuf) {
923 struct sk_buff *skb1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000925 BUG_ON(sock_owned_by_user(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000927 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
928 sk_backlog_rcv(sk, skb1);
929 NET_INC_STATS_BH(sock_net(sk),
930 LINUX_MIB_TCPPREQUEUEDROPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 }
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000932
933 tp->ucopy.memory = 0;
934 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
Eric Dumazet7aedec22009-05-07 07:20:39 +0000935 wake_up_interruptible_poll(sk->sk_sleep,
936 POLLIN | POLLRDNORM | POLLRDBAND);
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000937 if (!inet_csk_ack_scheduled(sk))
938 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
David S. Miller22f6dac2009-05-08 02:48:30 -0700939 (3 * tcp_rto_min(sk)) / 4,
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000940 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 }
Eric Dumazetf5f8d862009-05-07 07:08:38 +0000942 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943}
944
945
946#undef STATE_TRACE
947
948#ifdef STATE_TRACE
949static const char *statename[]={
950 "Unused","Established","Syn Sent","Syn Recv",
951 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
952 "Close Wait","Last ACK","Listen","Closing"
953};
954#endif
Ilpo Järvinen490d5042008-01-12 03:17:20 -0800955extern void tcp_set_state(struct sock *sk, int state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956
Andi Kleen4ac02ba2007-04-20 17:11:46 -0700957extern void tcp_done(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800959static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960{
961 rx_opt->dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 rx_opt->num_sacks = 0;
963}
964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965/* Determine a window scaling and initial window to offer. */
966extern void tcp_select_initial_window(int __space, __u32 mss,
967 __u32 *rcv_wnd, __u32 *window_clamp,
968 int wscale_ok, __u8 *rcv_wscale);
969
970static inline int tcp_win_from_space(int space)
971{
972 return sysctl_tcp_adv_win_scale<=0 ?
973 (space>>(-sysctl_tcp_adv_win_scale)) :
974 space - (space>>sysctl_tcp_adv_win_scale);
975}
976
977/* Note: caller must be prepared to deal with negative returns */
978static inline int tcp_space(const struct sock *sk)
979{
980 return tcp_win_from_space(sk->sk_rcvbuf -
981 atomic_read(&sk->sk_rmem_alloc));
982}
983
984static inline int tcp_full_space(const struct sock *sk)
985{
986 return tcp_win_from_space(sk->sk_rcvbuf);
987}
988
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800989static inline void tcp_openreq_init(struct request_sock *req,
990 struct tcp_options_received *rx_opt,
991 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700993 struct inet_request_sock *ireq = inet_rsk(req);
994
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
Florian Westphal4dfc2812008-04-10 03:12:40 -0700996 req->cookie_ts = 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700997 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 req->mss = rx_opt->mss_clamp;
999 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001000 ireq->tstamp_ok = rx_opt->tstamp_ok;
1001 ireq->sack_ok = rx_opt->sack_ok;
1002 ireq->snd_wscale = rx_opt->snd_wscale;
1003 ireq->wscale_ok = rx_opt->wscale_ok;
1004 ireq->acked = 0;
1005 ireq->ecn_ok = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001006 ireq->rmt_port = tcp_hdr(skb)->source;
KOVACS Krisztiana3116ac52008-10-01 07:46:49 -07001007 ireq->loc_port = tcp_hdr(skb)->dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008}
1009
Pavel Emelyanov5c52ba12008-07-16 20:28:10 -07001010extern void tcp_enter_memory_pressure(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1013{
1014 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1015}
1016
1017static inline int keepalive_time_when(const struct tcp_sock *tp)
1018{
1019 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1020}
1021
Eric Dumazetdf19a622009-08-28 23:48:54 -07001022static inline int keepalive_probes(const struct tcp_sock *tp)
1023{
1024 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1025}
1026
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001027static inline int tcp_fin_time(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001029 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1030 const int rto = inet_csk(sk)->icsk_rto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001032 if (fin_timeout < (rto << 2) - (rto >> 1))
1033 fin_timeout = (rto << 2) - (rto >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
1035 return fin_timeout;
1036}
1037
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001038static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
1039 int paws_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040{
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001041 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1042 return 1;
1043 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1044 return 1;
1045
1046 return 0;
1047}
1048
1049static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1050 int rst)
1051{
1052 if (tcp_paws_check(rx_opt, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 return 0;
1054
1055 /* RST segments are not recommended to carry timestamp,
1056 and, if they do, it is recommended to ignore PAWS because
1057 "their cleanup function should take precedence over timestamps."
1058 Certainly, it is mistake. It is necessary to understand the reasons
1059 of this constraint to relax it: if peer reboots, clock may go
1060 out-of-sync and half-open connections will not be reset.
1061 Actually, the problem would be not existing if all
1062 the implementations followed draft about maintaining clock
1063 via reboots. Linux-2.2 DOES NOT!
1064
1065 However, we can relax time bounds for RST segments to MSL.
1066 */
James Morris9d729f72007-03-04 16:12:44 -08001067 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 return 0;
1069 return 1;
1070}
1071
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072#define TCP_CHECK_TIMER(sk) do { } while (0)
1073
Pavel Emelyanova9c193292008-07-16 20:21:42 -07001074static inline void tcp_mib_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075{
1076 /* See RFC 2012 */
Pavel Emelyanovcf1100a2008-07-16 20:27:38 -07001077 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1078 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1079 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1080 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081}
1082
Ilpo Järvinen5af4ec22007-09-20 11:30:48 -07001083/* from STCP */
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001084static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
David S. Miller0800f172007-09-20 11:40:37 -07001085{
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001086 tp->lost_skb_hint = NULL;
1087 tp->scoreboard_skb_hint = NULL;
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001088}
1089
1090static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1091{
1092 tcp_clear_retrans_hints_partial(tp);
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001093 tp->retransmit_skb_hint = NULL;
Ilpo Järvinenb7689202007-09-20 11:37:19 -07001094}
1095
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001096/* MD5 Signature */
1097struct crypto_hash;
1098
1099/* - key database */
1100struct tcp_md5sig_key {
1101 u8 *key;
1102 u8 keylen;
1103};
1104
1105struct tcp4_md5sig_key {
David S. Millerf8ab18d2007-09-28 15:18:35 -07001106 struct tcp_md5sig_key base;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001107 __be32 addr;
1108};
1109
1110struct tcp6_md5sig_key {
David S. Millerf8ab18d2007-09-28 15:18:35 -07001111 struct tcp_md5sig_key base;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001112#if 0
1113 u32 scope_id; /* XXX */
1114#endif
1115 struct in6_addr addr;
1116};
1117
1118/* - sock block */
1119struct tcp_md5sig_info {
1120 struct tcp4_md5sig_key *keys4;
1121#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1122 struct tcp6_md5sig_key *keys6;
1123 u32 entries6;
1124 u32 alloced6;
1125#endif
1126 u32 entries4;
1127 u32 alloced4;
1128};
1129
1130/* - pseudo header */
1131struct tcp4_pseudohdr {
1132 __be32 saddr;
1133 __be32 daddr;
1134 __u8 pad;
1135 __u8 protocol;
1136 __be16 len;
1137};
1138
1139struct tcp6_pseudohdr {
1140 struct in6_addr saddr;
1141 struct in6_addr daddr;
1142 __be32 len;
1143 __be32 protocol; /* including padding */
1144};
1145
1146union tcp_md5sum_block {
1147 struct tcp4_pseudohdr ip4;
1148#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1149 struct tcp6_pseudohdr ip6;
1150#endif
1151};
1152
1153/* - pool: digest algorithm, hash description and scratch buffer */
1154struct tcp_md5sig_pool {
1155 struct hash_desc md5_desc;
1156 union tcp_md5sum_block md5_blk;
1157};
1158
1159#define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */
1160
1161/* - functions */
Adam Langley49a72df2008-07-19 00:01:42 -07001162extern int tcp_v4_md5_hash_skb(char *md5_hash,
1163 struct tcp_md5sig_key *key,
1164 struct sock *sk,
1165 struct request_sock *req,
1166 struct sk_buff *skb);
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +09001167
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001168extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1169 struct sock *addr_sk);
1170
1171extern int tcp_v4_md5_do_add(struct sock *sk,
1172 __be32 addr,
1173 u8 *newkey,
1174 u8 newkeylen);
1175
1176extern int tcp_v4_md5_do_del(struct sock *sk,
Al Viro8e5200f2006-11-20 18:06:37 -08001177 __be32 addr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001178
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001179#ifdef CONFIG_TCP_MD5SIG
1180#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \
1181 &(struct tcp_md5sig_key) { \
1182 .key = (twsk)->tw_md5_key, \
1183 .keylen = (twsk)->tw_md5_keylen, \
1184 } : NULL)
1185#else
1186#define tcp_twsk_md5_key(twsk) NULL
1187#endif
1188
Wu Fengguangaa133072009-09-02 23:45:45 -07001189extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001190extern void tcp_free_md5sig_pool(void);
1191
1192extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
1193extern void __tcp_put_md5sig_pool(void);
Adam Langley49a72df2008-07-19 00:01:42 -07001194extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
1195extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
1196 unsigned header_len);
1197extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1198 struct tcp_md5sig_key *key);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001199
1200static inline
1201struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
1202{
1203 int cpu = get_cpu();
1204 struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
1205 if (!ret)
1206 put_cpu();
1207 return ret;
1208}
1209
1210static inline void tcp_put_md5sig_pool(void)
1211{
1212 __tcp_put_md5sig_pool();
1213 put_cpu();
1214}
1215
David S. Millerfe067e82007-03-07 12:12:44 -08001216/* write queue abstraction */
1217static inline void tcp_write_queue_purge(struct sock *sk)
1218{
1219 struct sk_buff *skb;
1220
1221 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001222 sk_wmem_free_skb(sk, skb);
1223 sk_mem_reclaim(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08001224}
1225
1226static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
1227{
David S. Millercd07a8e2008-09-23 00:50:13 -07001228 return skb_peek(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001229}
1230
1231static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
1232{
David S. Millercd07a8e2008-09-23 00:50:13 -07001233 return skb_peek_tail(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001234}
1235
1236static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
1237{
David S. Millercd07a8e2008-09-23 00:50:13 -07001238 return skb_queue_next(&sk->sk_write_queue, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001239}
1240
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08001241static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
1242{
1243 return skb_queue_prev(&sk->sk_write_queue, skb);
1244}
1245
David S. Millerfe067e82007-03-07 12:12:44 -08001246#define tcp_for_write_queue(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001247 skb_queue_walk(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001248
1249#define tcp_for_write_queue_from(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001250 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001251
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001252#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001253 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001254
Damian Lukowski5152fc72009-09-01 10:24:00 +00001255/* This function calculates a "timeout" which is equivalent to the timeout of a
1256 * TCP connection after "boundary" unsucessful, exponentially backed-off
1257 * retransmissions with an initial RTO of TCP_RTO_MIN.
1258 */
Damian Lukowski6fa12c82009-08-26 00:16:34 +00001259static inline bool retransmits_timed_out(const struct sock *sk,
1260 unsigned int boundary)
1261{
Damian Lukowski5152fc72009-09-01 10:24:00 +00001262 unsigned int timeout, linear_backoff_thresh;
1263
Damian Lukowski6fa12c82009-08-26 00:16:34 +00001264 if (!inet_csk(sk)->icsk_retransmits)
1265 return false;
1266
Damian Lukowski5152fc72009-09-01 10:24:00 +00001267 linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
Damian Lukowski6fa12c82009-08-26 00:16:34 +00001268
Damian Lukowski5152fc72009-09-01 10:24:00 +00001269 if (boundary <= linear_backoff_thresh)
1270 timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
Damian Lukowski6fa12c82009-08-26 00:16:34 +00001271 else
Damian Lukowski5152fc72009-09-01 10:24:00 +00001272 timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
1273 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
Damian Lukowski6fa12c82009-08-26 00:16:34 +00001274
Damian Lukowski5152fc72009-09-01 10:24:00 +00001275 return (tcp_time_stamp - tcp_sk(sk)->retrans_stamp) >= timeout;
Damian Lukowski6fa12c82009-08-26 00:16:34 +00001276}
1277
David S. Millerfe067e82007-03-07 12:12:44 -08001278static inline struct sk_buff *tcp_send_head(struct sock *sk)
1279{
1280 return sk->sk_send_head;
1281}
1282
David S. Millercd07a8e2008-09-23 00:50:13 -07001283static inline bool tcp_skb_is_last(const struct sock *sk,
1284 const struct sk_buff *skb)
1285{
1286 return skb_queue_is_last(&sk->sk_write_queue, skb);
1287}
1288
David S. Millerfe067e82007-03-07 12:12:44 -08001289static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
1290{
David S. Millercd07a8e2008-09-23 00:50:13 -07001291 if (tcp_skb_is_last(sk, skb))
David S. Millerfe067e82007-03-07 12:12:44 -08001292 sk->sk_send_head = NULL;
David S. Millercd07a8e2008-09-23 00:50:13 -07001293 else
1294 sk->sk_send_head = tcp_write_queue_next(sk, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001295}
1296
1297static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1298{
1299 if (sk->sk_send_head == skb_unlinked)
1300 sk->sk_send_head = NULL;
1301}
1302
1303static inline void tcp_init_send_head(struct sock *sk)
1304{
1305 sk->sk_send_head = NULL;
1306}
1307
1308static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1309{
1310 __skb_queue_tail(&sk->sk_write_queue, skb);
1311}
1312
1313static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1314{
1315 __tcp_add_write_queue_tail(sk, skb);
1316
1317 /* Queue it, remembering where we must start sending. */
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001318 if (sk->sk_send_head == NULL) {
David S. Millerfe067e82007-03-07 12:12:44 -08001319 sk->sk_send_head = skb;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001320
1321 if (tcp_sk(sk)->highest_sack == NULL)
1322 tcp_sk(sk)->highest_sack = skb;
1323 }
David S. Millerfe067e82007-03-07 12:12:44 -08001324}
1325
1326static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1327{
1328 __skb_queue_head(&sk->sk_write_queue, skb);
1329}
1330
1331/* Insert buff after skb on the write queue of sk. */
1332static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1333 struct sk_buff *buff,
1334 struct sock *sk)
1335{
Gerrit Renker7de6c032008-04-14 00:05:09 -07001336 __skb_queue_after(&sk->sk_write_queue, skb, buff);
David S. Millerfe067e82007-03-07 12:12:44 -08001337}
1338
David S. Miller43f59c82008-09-21 21:28:51 -07001339/* Insert new before skb on the write queue of sk. */
David S. Millerfe067e82007-03-07 12:12:44 -08001340static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1341 struct sk_buff *skb,
1342 struct sock *sk)
1343{
David S. Miller43f59c82008-09-21 21:28:51 -07001344 __skb_queue_before(&sk->sk_write_queue, skb, new);
Ilpo Järvinen6e421412007-11-19 23:24:09 -08001345
1346 if (sk->sk_send_head == skb)
1347 sk->sk_send_head = new;
David S. Millerfe067e82007-03-07 12:12:44 -08001348}
1349
1350static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1351{
1352 __skb_unlink(skb, &sk->sk_write_queue);
1353}
1354
David S. Millerfe067e82007-03-07 12:12:44 -08001355static inline int tcp_write_queue_empty(struct sock *sk)
1356{
1357 return skb_queue_empty(&sk->sk_write_queue);
1358}
1359
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001360/* Start sequence of the highest skb with SACKed bit, valid only if
1361 * sacked > 0 or when the caller has ensured validity by itself.
1362 */
1363static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1364{
1365 if (!tp->sacked_out)
1366 return tp->snd_una;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001367
1368 if (tp->highest_sack == NULL)
1369 return tp->snd_nxt;
1370
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001371 return TCP_SKB_CB(tp->highest_sack)->seq;
1372}
1373
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001374static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1375{
1376 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1377 tcp_write_queue_next(sk, skb);
1378}
1379
1380static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1381{
1382 return tcp_sk(sk)->highest_sack;
1383}
1384
1385static inline void tcp_highest_sack_reset(struct sock *sk)
1386{
1387 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1388}
1389
1390/* Called when old skb is about to be deleted (to be combined with new skb) */
1391static inline void tcp_highest_sack_combine(struct sock *sk,
1392 struct sk_buff *old,
1393 struct sk_buff *new)
1394{
1395 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1396 tcp_sk(sk)->highest_sack = new;
1397}
1398
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399/* /proc */
1400enum tcp_seq_states {
1401 TCP_SEQ_STATE_LISTENING,
1402 TCP_SEQ_STATE_OPENREQ,
1403 TCP_SEQ_STATE_ESTABLISHED,
1404 TCP_SEQ_STATE_TIME_WAIT,
1405};
1406
1407struct tcp_seq_afinfo {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 char *name;
1409 sa_family_t family;
Denis V. Lunev68fcadd2008-04-13 22:13:30 -07001410 struct file_operations seq_fops;
Denis V. Lunev9427c4b2008-04-13 22:12:13 -07001411 struct seq_operations seq_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412};
1413
1414struct tcp_iter_state {
Denis V. Luneva4146b12008-04-13 22:11:14 -07001415 struct seq_net_private p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 sa_family_t family;
1417 enum tcp_seq_states state;
1418 struct sock *syn_wait_sk;
1419 int bucket, sbucket, num, uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420};
1421
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001422extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1423extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001425extern struct request_sock_ops tcp_request_sock_ops;
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001426extern struct request_sock_ops tcp6_request_sock_ops;
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001427
Brian Haley7d06b2e2008-06-14 17:04:49 -07001428extern void tcp_v4_destroy_sock(struct sock *sk);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001429
Herbert Xua430a432006-07-08 13:34:56 -07001430extern int tcp_v4_gso_send_check(struct sk_buff *skb);
Herbert Xu576a30e2006-06-27 13:22:38 -07001431extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
Herbert Xubf296b12008-12-15 23:43:36 -08001432extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1433 struct sk_buff *skb);
1434extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1435 struct sk_buff *skb);
1436extern int tcp_gro_complete(struct sk_buff *skb);
1437extern int tcp4_gro_complete(struct sk_buff *skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07001438
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001439#ifdef CONFIG_PROC_FS
1440extern int tcp4_proc_init(void);
1441extern void tcp4_proc_exit(void);
1442#endif
1443
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001444/* TCP af-specific functions */
1445struct tcp_sock_af_ops {
1446#ifdef CONFIG_TCP_MD5SIG
1447 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1448 struct sock *addr_sk);
1449 int (*calc_md5_hash) (char *location,
1450 struct tcp_md5sig_key *md5,
1451 struct sock *sk,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001452 struct request_sock *req,
Adam Langley49a72df2008-07-19 00:01:42 -07001453 struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001454 int (*md5_add) (struct sock *sk,
1455 struct sock *addr_sk,
1456 u8 *newkey,
1457 u8 len);
1458 int (*md5_parse) (struct sock *sk,
1459 char __user *optval,
1460 int optlen);
1461#endif
1462};
1463
1464struct tcp_request_sock_ops {
1465#ifdef CONFIG_TCP_MD5SIG
1466 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1467 struct request_sock *req);
John Dykstrae3afe7b2009-07-16 05:04:51 +00001468 int (*calc_md5_hash) (char *location,
1469 struct tcp_md5sig_key *md5,
1470 struct sock *sk,
1471 struct request_sock *req,
1472 struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001473#endif
1474};
1475
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08001476extern void tcp_v4_init(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001477extern void tcp_init(void);
1478
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479#endif /* _TCP_H */