blob: f900fae8b87e9512fee2a7f0c213f46283dc79f8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21/*
22 * Changes: Pedro Roque : Retransmit queue handled by TCP.
23 * : Fragmentation on mtu decrease
24 * : Segment collapse on retransmit
25 * : AF independence
26 *
27 * Linus Torvalds : send_delayed_ack
28 * David S. Miller : Charge memory using the right skb
29 * during syn/ack processing.
30 * David S. Miller : Output engine completely rewritten.
31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr.
32 * Cacophonix Gaul : draft-minshall-nagle-01
33 * J Hadi Salim : ECN support
34 *
35 */
36
37#include <net/tcp.h>
38
39#include <linux/compiler.h>
40#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* People can turn this off for buggy TCP's found in printers etc. */
Brian Haleyab32ea52006-09-22 14:15:41 -070043int sysctl_tcp_retrans_collapse __read_mostly = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Rick Jones15d99e02006-03-20 22:40:29 -080045/* People can turn this on to work with those rare, broken TCPs that
46 * interpret the window field as a signed quantity.
47 */
Brian Haleyab32ea52006-09-22 14:15:41 -070048int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
Rick Jones15d99e02006-03-20 22:40:29 -080049
Linus Torvalds1da177e2005-04-16 15:20:36 -070050/* This limits the percentage of the congestion window which we
51 * will allow a single TSO frame to consume. Building TSO frames
52 * which are too large can cause TCP streams to be bursty.
53 */
Brian Haleyab32ea52006-09-22 14:15:41 -070054int sysctl_tcp_tso_win_divisor __read_mostly = 3;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Brian Haleyab32ea52006-09-22 14:15:41 -070056int sysctl_tcp_mtu_probing __read_mostly = 0;
57int sysctl_tcp_base_mss __read_mostly = 512;
John Heffner5d424d52006-03-20 17:53:41 -080058
David S. Miller35089bb2006-06-13 22:33:04 -070059/* By default, RFC2861 behavior. */
Brian Haleyab32ea52006-09-22 14:15:41 -070060int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
David S. Miller35089bb2006-06-13 22:33:04 -070061
Ilpo Järvinen66f5fe62007-12-31 04:43:57 -080062static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen6ff03ac2007-08-24 22:44:06 -070063{
64 struct tcp_sock *tp = tcp_sk(sk);
Ilpo Järvinen66f5fe62007-12-31 04:43:57 -080065 unsigned int prior_packets = tp->packets_out;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -070066
David S. Millerfe067e82007-03-07 12:12:44 -080067 tcp_advance_send_head(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
Ilpo Järvinen85124302007-11-26 20:17:38 +080069
70 /* Don't override Nagle indefinately with F-RTO */
71 if (tp->frto_counter == 2)
72 tp->frto_counter = 3;
Ilpo Järvinen66f5fe62007-12-31 04:43:57 -080073
74 tp->packets_out += tcp_skb_pcount(skb);
75 if (!prior_packets)
76 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
77 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078}
79
80/* SND.NXT, if window was not shrunk.
81 * If window has been shrunk, what should we make? It is not clear at all.
82 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
83 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
84 * invalid. OK, let's make this for now:
85 */
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -070086static inline __u32 tcp_acceptable_seq(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -070088 struct tcp_sock *tp = tcp_sk(sk);
89
Ilpo Järvinen90840de2007-12-31 04:48:41 -080090 if (!before(tcp_wnd_end(tp), tp->snd_nxt))
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 return tp->snd_nxt;
92 else
Ilpo Järvinen90840de2007-12-31 04:48:41 -080093 return tcp_wnd_end(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
96/* Calculate mss to advertise in SYN segment.
97 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
98 *
99 * 1. It is independent of path mtu.
100 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
101 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
102 * attached devices, because some buggy hosts are confused by
103 * large MSS.
104 * 4. We do not make 3, we advertise MSS, calculated from first
105 * hop device mtu, but allow to raise it to ip_rt_min_advmss.
106 * This may be overridden via information stored in routing table.
107 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
108 * probably even Jumbo".
109 */
110static __u16 tcp_advertise_mss(struct sock *sk)
111{
112 struct tcp_sock *tp = tcp_sk(sk);
113 struct dst_entry *dst = __sk_dst_get(sk);
114 int mss = tp->advmss;
115
116 if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
117 mss = dst_metric(dst, RTAX_ADVMSS);
118 tp->advmss = mss;
119 }
120
121 return (__u16)mss;
122}
123
124/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
125 * This is the first part of cwnd validation mechanism. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700126static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700128 struct tcp_sock *tp = tcp_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 s32 delta = tcp_time_stamp - tp->lsndtime;
130 u32 restart_cwnd = tcp_init_cwnd(tp, dst);
131 u32 cwnd = tp->snd_cwnd;
132
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300133 tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300135 tp->snd_ssthresh = tcp_current_ssthresh(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 restart_cwnd = min(restart_cwnd, cwnd);
137
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700138 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 cwnd >>= 1;
140 tp->snd_cwnd = max(cwnd, restart_cwnd);
141 tp->snd_cwnd_stamp = tcp_time_stamp;
142 tp->snd_cwnd_used = 0;
143}
144
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800145static void tcp_event_data_sent(struct tcp_sock *tp,
146 struct sk_buff *skb, struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700148 struct inet_connection_sock *icsk = inet_csk(sk);
149 const u32 now = tcp_time_stamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
David S. Miller35089bb2006-06-13 22:33:04 -0700151 if (sysctl_tcp_slow_start_after_idle &&
152 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700153 tcp_cwnd_restart(sk, __sk_dst_get(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
155 tp->lsndtime = now;
156
157 /* If it is a reply for ato after last received
158 * packet, enter pingpong mode.
159 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700160 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
161 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162}
163
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800164static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700166 tcp_dec_quickack_mode(sk, pkts);
167 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168}
169
170/* Determine a window scaling and initial window to offer.
171 * Based on the assumption that the given amount of space
172 * will be offered. Store the results in the tp structure.
173 * NOTE: for smooth operation initial space offering should
174 * be a multiple of mss if possible. We assume here that mss >= 1.
175 * This MUST be enforced by all callers.
176 */
177void tcp_select_initial_window(int __space, __u32 mss,
178 __u32 *rcv_wnd, __u32 *window_clamp,
179 int wscale_ok, __u8 *rcv_wscale)
180{
181 unsigned int space = (__space < 0 ? 0 : __space);
182
183 /* If no clamp set the clamp to the max possible scaled window */
184 if (*window_clamp == 0)
185 (*window_clamp) = (65535 << 14);
186 space = min(*window_clamp, space);
187
188 /* Quantize space offering to a multiple of mss if possible. */
189 if (space > mss)
190 space = (space / mss) * mss;
191
192 /* NOTE: offering an initial window larger than 32767
Rick Jones15d99e02006-03-20 22:40:29 -0800193 * will break some buggy TCP stacks. If the admin tells us
194 * it is likely we could be speaking with such a buggy stack
195 * we will truncate our initial window offering to 32K-1
196 * unless the remote has sent us a window scaling option,
197 * which we interpret as a sign the remote TCP is not
198 * misinterpreting the window field as a signed quantity.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 */
Rick Jones15d99e02006-03-20 22:40:29 -0800200 if (sysctl_tcp_workaround_signed_windows)
201 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
202 else
203 (*rcv_wnd) = space;
204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 (*rcv_wscale) = 0;
206 if (wscale_ok) {
207 /* Set window scaling on max possible window
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900208 * See RFC1323 for an explanation of the limit to 14
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 */
210 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
Stephen Hemminger316c1592006-08-22 00:06:11 -0700211 space = min_t(u32, space, *window_clamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 while (space > 65535 && (*rcv_wscale) < 14) {
213 space >>= 1;
214 (*rcv_wscale)++;
215 }
216 }
217
218 /* Set initial window to value enough for senders,
David S. Miller6b251852005-09-28 16:31:48 -0700219 * following RFC2414. Senders, not following this RFC,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 * will be satisfied with 2.
221 */
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800222 if (mss > (1 << *rcv_wscale)) {
David S. Miller01ff3672005-09-29 17:07:20 -0700223 int init_cwnd = 4;
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800224 if (mss > 1460 * 3)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 init_cwnd = 2;
David S. Miller01ff3672005-09-29 17:07:20 -0700226 else if (mss > 1460)
227 init_cwnd = 3;
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800228 if (*rcv_wnd > init_cwnd * mss)
229 *rcv_wnd = init_cwnd * mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 }
231
232 /* Set the clamp no higher than max representable value */
233 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
234}
235
236/* Chose a new window to advertise, update state in tcp_sock for the
237 * socket, and return result with RFC1323 scaling applied. The return
238 * value can be stuffed directly into th->window for an outgoing
239 * frame.
240 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800241static u16 tcp_select_window(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242{
243 struct tcp_sock *tp = tcp_sk(sk);
244 u32 cur_win = tcp_receive_window(tp);
245 u32 new_win = __tcp_select_window(sk);
246
247 /* Never shrink the offered window */
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800248 if (new_win < cur_win) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 /* Danger Will Robinson!
250 * Don't update rcv_wup/rcv_wnd here or else
251 * we will not be able to advertise a zero
252 * window in time. --DaveM
253 *
254 * Relax Will Robinson.
255 */
Patrick McHardy607bfbf2008-03-20 16:11:27 -0700256 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258 tp->rcv_wnd = new_win;
259 tp->rcv_wup = tp->rcv_nxt;
260
261 /* Make sure we do not exceed the maximum possible
262 * scaled window.
263 */
Rick Jones15d99e02006-03-20 22:40:29 -0800264 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 new_win = min(new_win, MAX_TCP_WINDOW);
266 else
267 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
268
269 /* RFC1323 scaling applied */
270 new_win >>= tp->rx_opt.rcv_wscale;
271
272 /* If we advertise zero window, disable fast path. */
273 if (new_win == 0)
274 tp->pred_flags = 0;
275
276 return new_win;
277}
278
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800279static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700280{
281 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800282 if (!(tp->ecn_flags & TCP_ECN_OK))
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700283 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
284}
285
286static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
287{
288 struct tcp_sock *tp = tcp_sk(sk);
289
290 tp->ecn_flags = 0;
291 if (sysctl_tcp_ecn) {
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800292 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR;
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700293 tp->ecn_flags = TCP_ECN_OK;
294 }
295}
296
297static __inline__ void
298TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
299{
300 if (inet_rsk(req)->ecn_ok)
301 th->ece = 1;
302}
303
304static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
305 int tcp_header_len)
306{
307 struct tcp_sock *tp = tcp_sk(sk);
308
309 if (tp->ecn_flags & TCP_ECN_OK) {
310 /* Not-retransmitted data segment: set ECT and inject CWR. */
311 if (skb->len != tcp_header_len &&
312 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
313 INET_ECN_xmit(sk);
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800314 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700315 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
316 tcp_hdr(skb)->cwr = 1;
317 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
318 }
319 } else {
320 /* ACK or retransmitted segment: clear ECT|CE */
321 INET_ECN_dontxmit(sk);
322 }
323 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
324 tcp_hdr(skb)->ece = 1;
325 }
326}
327
Ilpo Järvinene870a8e2008-01-03 20:39:01 -0800328/* Constructs common control bits of non-data skb. If SYN/FIN is present,
329 * auto increment end seqno.
330 */
331static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
332{
333 skb->csum = 0;
334
335 TCP_SKB_CB(skb)->flags = flags;
336 TCP_SKB_CB(skb)->sacked = 0;
337
338 skb_shinfo(skb)->gso_segs = 1;
339 skb_shinfo(skb)->gso_size = 0;
340 skb_shinfo(skb)->gso_type = 0;
341
342 TCP_SKB_CB(skb)->seq = seq;
343 if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN))
344 seq++;
345 TCP_SKB_CB(skb)->end_seq = seq;
346}
347
Adam Langley33ad7982008-07-19 00:04:31 -0700348#define OPTION_SACK_ADVERTISE (1 << 0)
349#define OPTION_TS (1 << 1)
350#define OPTION_MD5 (1 << 2)
351
352struct tcp_out_options {
353 u8 options; /* bit field of OPTION_* */
354 u8 ws; /* window scale, 0 to disable */
355 u8 num_sack_blocks; /* number of SACK blocks to include */
356 u16 mss; /* 0 to disable */
357 __u32 tsval, tsecr; /* need to include OPTION_TS */
358};
359
360static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
361 const struct tcp_out_options *opts,
362 __u8 **md5_hash) {
363 if (unlikely(OPTION_MD5 & opts->options)) {
YOSHIFUJI Hideaki496c98d2006-10-10 19:41:21 -0700364 *ptr++ = htonl((TCPOPT_NOP << 24) |
365 (TCPOPT_NOP << 16) |
Adam Langley33ad7982008-07-19 00:04:31 -0700366 (TCPOPT_MD5SIG << 8) |
367 TCPOLEN_MD5SIG);
368 *md5_hash = (__u8 *)ptr;
369 ptr += 4;
370 } else {
371 *md5_hash = NULL;
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800372 }
Adam Langley33ad7982008-07-19 00:04:31 -0700373
374 if (likely(OPTION_TS & opts->options)) {
375 if (unlikely(OPTION_SACK_ADVERTISE & opts->options)) {
376 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
377 (TCPOLEN_SACK_PERM << 16) |
378 (TCPOPT_TIMESTAMP << 8) |
379 TCPOLEN_TIMESTAMP);
380 } else {
381 *ptr++ = htonl((TCPOPT_NOP << 24) |
382 (TCPOPT_NOP << 16) |
383 (TCPOPT_TIMESTAMP << 8) |
384 TCPOLEN_TIMESTAMP);
385 }
386 *ptr++ = htonl(opts->tsval);
387 *ptr++ = htonl(opts->tsecr);
388 }
389
390 if (unlikely(opts->mss)) {
391 *ptr++ = htonl((TCPOPT_MSS << 24) |
392 (TCPOLEN_MSS << 16) |
393 opts->mss);
394 }
395
396 if (unlikely(OPTION_SACK_ADVERTISE & opts->options &&
397 !(OPTION_TS & opts->options))) {
398 *ptr++ = htonl((TCPOPT_NOP << 24) |
399 (TCPOPT_NOP << 16) |
400 (TCPOPT_SACK_PERM << 8) |
401 TCPOLEN_SACK_PERM);
402 }
403
404 if (unlikely(opts->ws)) {
405 *ptr++ = htonl((TCPOPT_NOP << 24) |
406 (TCPOPT_WINDOW << 16) |
407 (TCPOLEN_WINDOW << 8) |
408 opts->ws);
409 }
410
411 if (unlikely(opts->num_sack_blocks)) {
412 struct tcp_sack_block *sp = tp->rx_opt.dsack ?
413 tp->duplicate_sack : tp->selective_acks;
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800414 int this_sack;
415
416 *ptr++ = htonl((TCPOPT_NOP << 24) |
417 (TCPOPT_NOP << 16) |
418 (TCPOPT_SACK << 8) |
Adam Langley33ad7982008-07-19 00:04:31 -0700419 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800420 TCPOLEN_SACK_PERBLOCK)));
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800421
Adam Langley33ad7982008-07-19 00:04:31 -0700422 for (this_sack = 0; this_sack < opts->num_sack_blocks;
423 ++this_sack) {
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800424 *ptr++ = htonl(sp[this_sack].start_seq);
425 *ptr++ = htonl(sp[this_sack].end_seq);
426 }
Stephen Hemminger2de979b2007-03-08 20:45:19 -0800427
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800428 if (tp->rx_opt.dsack) {
429 tp->rx_opt.dsack = 0;
430 tp->rx_opt.eff_sacks--;
431 }
432 }
433}
434
Adam Langley33ad7982008-07-19 00:04:31 -0700435static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
436 struct tcp_out_options *opts,
437 struct tcp_md5sig_key **md5) {
438 struct tcp_sock *tp = tcp_sk(sk);
439 unsigned size = 0;
440
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800441#ifdef CONFIG_TCP_MD5SIG
Adam Langley33ad7982008-07-19 00:04:31 -0700442 *md5 = tp->af_specific->md5_lookup(sk, sk);
443 if (*md5) {
444 opts->options |= OPTION_MD5;
445 size += TCPOLEN_MD5SIG_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800446 }
Adam Langley33ad7982008-07-19 00:04:31 -0700447#else
448 *md5 = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800449#endif
Adam Langley33ad7982008-07-19 00:04:31 -0700450
451 /* We always get an MSS option. The option bytes which will be seen in
452 * normal data packets should timestamps be used, must be in the MSS
453 * advertised. But we subtract them from tp->mss_cache so that
454 * calculations in tcp_sendmsg are simpler etc. So account for this
455 * fact here if necessary. If we don't do this correctly, as a
456 * receiver we won't recognize data packets as being full sized when we
457 * should, and thus we won't abide by the delayed ACK rules correctly.
458 * SACKs don't matter, we never delay an ACK when we have any of those
459 * going out. */
460 opts->mss = tcp_advertise_mss(sk);
461 size += TCPOLEN_MSS_ALIGNED;
462
463 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
464 opts->options |= OPTION_TS;
465 opts->tsval = TCP_SKB_CB(skb)->when;
466 opts->tsecr = tp->rx_opt.ts_recent;
467 size += TCPOLEN_TSTAMP_ALIGNED;
468 }
469 if (likely(sysctl_tcp_window_scaling)) {
470 opts->ws = tp->rx_opt.rcv_wscale;
Philip Love7982d5e2008-08-27 02:33:50 -0700471 if(likely(opts->ws))
472 size += TCPOLEN_WSCALE_ALIGNED;
Adam Langley33ad7982008-07-19 00:04:31 -0700473 }
474 if (likely(sysctl_tcp_sack)) {
475 opts->options |= OPTION_SACK_ADVERTISE;
David S. Millerb32d1312008-07-21 18:45:34 -0700476 if (unlikely(!(OPTION_TS & opts->options)))
Adam Langley33ad7982008-07-19 00:04:31 -0700477 size += TCPOLEN_SACKPERM_ALIGNED;
478 }
479
480 return size;
481}
482
483static unsigned tcp_synack_options(struct sock *sk,
484 struct request_sock *req,
485 unsigned mss, struct sk_buff *skb,
486 struct tcp_out_options *opts,
487 struct tcp_md5sig_key **md5) {
488 unsigned size = 0;
489 struct inet_request_sock *ireq = inet_rsk(req);
490 char doing_ts;
491
492#ifdef CONFIG_TCP_MD5SIG
493 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
494 if (*md5) {
495 opts->options |= OPTION_MD5;
496 size += TCPOLEN_MD5SIG_ALIGNED;
497 }
498#else
499 *md5 = NULL;
500#endif
501
502 /* we can't fit any SACK blocks in a packet with MD5 + TS
503 options. There was discussion about disabling SACK rather than TS in
504 order to fit in better with old, buggy kernels, but that was deemed
505 to be unnecessary. */
506 doing_ts = ireq->tstamp_ok && !(*md5 && ireq->sack_ok);
507
508 opts->mss = mss;
509 size += TCPOLEN_MSS_ALIGNED;
510
511 if (likely(ireq->wscale_ok)) {
512 opts->ws = ireq->rcv_wscale;
Philip Love7982d5e2008-08-27 02:33:50 -0700513 if(likely(opts->ws))
514 size += TCPOLEN_WSCALE_ALIGNED;
Adam Langley33ad7982008-07-19 00:04:31 -0700515 }
516 if (likely(doing_ts)) {
517 opts->options |= OPTION_TS;
518 opts->tsval = TCP_SKB_CB(skb)->when;
519 opts->tsecr = req->ts_recent;
520 size += TCPOLEN_TSTAMP_ALIGNED;
521 }
522 if (likely(ireq->sack_ok)) {
523 opts->options |= OPTION_SACK_ADVERTISE;
524 if (unlikely(!doing_ts))
525 size += TCPOLEN_SACKPERM_ALIGNED;
526 }
527
528 return size;
529}
530
531static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
532 struct tcp_out_options *opts,
533 struct tcp_md5sig_key **md5) {
534 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
535 struct tcp_sock *tp = tcp_sk(sk);
536 unsigned size = 0;
537
538#ifdef CONFIG_TCP_MD5SIG
539 *md5 = tp->af_specific->md5_lookup(sk, sk);
540 if (unlikely(*md5)) {
541 opts->options |= OPTION_MD5;
542 size += TCPOLEN_MD5SIG_ALIGNED;
543 }
544#else
545 *md5 = NULL;
546#endif
547
548 if (likely(tp->rx_opt.tstamp_ok)) {
549 opts->options |= OPTION_TS;
550 opts->tsval = tcb ? tcb->when : 0;
551 opts->tsecr = tp->rx_opt.ts_recent;
552 size += TCPOLEN_TSTAMP_ALIGNED;
553 }
554
555 if (unlikely(tp->rx_opt.eff_sacks)) {
556 const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
557 opts->num_sack_blocks =
558 min_t(unsigned, tp->rx_opt.eff_sacks,
559 (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
560 TCPOLEN_SACK_PERBLOCK);
561 size += TCPOLEN_SACK_BASE_ALIGNED +
562 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
563 }
564
565 return size;
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800566}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
568/* This routine actually transmits TCP packets queued in by
569 * tcp_do_sendmsg(). This is used by both the initial
570 * transmission and possible later retransmissions.
571 * All SKB's seen here are completely headerless. It is our
572 * job to build the TCP header, and pass the packet down to
573 * IP so it can do the same plus pass the packet off to the
574 * device.
575 *
576 * We are working here with either a clone of the original
577 * SKB, or a fresh unique copy made by the retransmit engine.
578 */
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800579static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
580 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800582 const struct inet_connection_sock *icsk = inet_csk(sk);
583 struct inet_sock *inet;
584 struct tcp_sock *tp;
585 struct tcp_skb_cb *tcb;
Adam Langley33ad7982008-07-19 00:04:31 -0700586 struct tcp_out_options opts;
587 unsigned tcp_options_size, tcp_header_size;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800588 struct tcp_md5sig_key *md5;
589 __u8 *md5_hash_location;
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800590 struct tcphdr *th;
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800591 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800593 BUG_ON(!skb || !tcp_skb_pcount(skb));
594
595 /* If congestion control is doing timestamping, we must
596 * take such a timestamp before we potentially clone/copy.
597 */
Stephen Hemminger164891a2007-04-23 22:26:16 -0700598 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800599 __net_timestamp(skb);
600
601 if (likely(clone_it)) {
602 if (unlikely(skb_cloned(skb)))
603 skb = pskb_copy(skb, gfp_mask);
604 else
605 skb = skb_clone(skb, gfp_mask);
606 if (unlikely(!skb))
607 return -ENOBUFS;
608 }
609
610 inet = inet_sk(sk);
611 tp = tcp_sk(sk);
612 tcb = TCP_SKB_CB(skb);
Adam Langley33ad7982008-07-19 00:04:31 -0700613 memset(&opts, 0, sizeof(opts));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
Adam Langley33ad7982008-07-19 00:04:31 -0700615 if (unlikely(tcb->flags & TCPCB_FLAG_SYN))
616 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
617 else
618 tcp_options_size = tcp_established_options(sk, skb, &opts,
619 &md5);
620 tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900621
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800622 if (tcp_packets_in_flight(tp) == 0)
623 tcp_ca_event(sk, CA_EVENT_TX_START);
624
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700625 skb_push(skb, tcp_header_size);
626 skb_reset_transport_header(skb);
David S. Millere89862f2007-01-26 01:04:55 -0800627 skb_set_owner_w(skb, sk);
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800628
629 /* Build TCP header and checksum it. */
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700630 th = tcp_hdr(skb);
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800631 th->source = inet->sport;
632 th->dest = inet->dport;
633 th->seq = htonl(tcb->seq);
634 th->ack_seq = htonl(tp->rcv_nxt);
Al Virodf7a3b02006-09-27 18:38:52 -0700635 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800636 tcb->flags);
637
638 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
639 /* RFC1323: The window in SYN & SYN/ACK segments
640 * is never scaled.
641 */
Ilpo Järvinen600ff0c2007-02-13 12:42:11 -0800642 th->window = htons(min(tp->rcv_wnd, 65535U));
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800643 } else {
644 th->window = htons(tcp_select_window(sk));
645 }
646 th->check = 0;
647 th->urg_ptr = 0;
648
649 if (unlikely(tp->urg_mode &&
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800650 between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF))) {
651 th->urg_ptr = htons(tp->snd_up - tcb->seq);
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800652 th->urg = 1;
653 }
654
Adam Langley33ad7982008-07-19 00:04:31 -0700655 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location);
656 if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0))
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700657 TCP_ECN_send(sk, skb, tcp_header_size);
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800658
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800659#ifdef CONFIG_TCP_MD5SIG
660 /* Calculate the MD5 hash, as we have all we need now */
661 if (md5) {
Adam Langley33ad7982008-07-19 00:04:31 -0700662 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800663 tp->af_specific->calc_md5_hash(md5_hash_location,
Adam Langley49a72df2008-07-19 00:01:42 -0700664 md5, sk, NULL, skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800665 }
666#endif
667
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -0800668 icsk->icsk_af_ops->send_check(sk, skb->len, skb);
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800669
670 if (likely(tcb->flags & TCPCB_FLAG_ACK))
671 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
672
673 if (skb->len != tcp_header_size)
674 tcp_event_data_sent(tp, skb, sk);
675
Wei Yongjunbd37a082006-08-07 21:04:15 -0700676 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
Pavel Emelyanov81cc8a72008-07-16 20:22:04 -0700677 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800678
David S. Millere89862f2007-01-26 01:04:55 -0800679 err = icsk->icsk_af_ops->queue_xmit(skb, 0);
Hua Zhong83de47c2006-04-28 15:26:50 -0700680 if (likely(err <= 0))
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800681 return err;
682
Ilpo Järvinen3cfe3ba2007-02-27 10:09:49 -0800683 tcp_enter_cwr(sk, 1);
David S. Millerdfb4b9d2005-12-06 16:24:52 -0800684
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200685 return net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686}
687
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900688/* This routine just queue's the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 *
690 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
691 * otherwise socket can stall.
692 */
693static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
694{
695 struct tcp_sock *tp = tcp_sk(sk);
696
697 /* Advance write_seq and place onto the write_queue. */
698 tp->write_seq = TCP_SKB_CB(skb)->end_seq;
699 skb_header_release(skb);
David S. Millerfe067e82007-03-07 12:12:44 -0800700 tcp_add_write_queue_tail(sk, skb);
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800701 sk->sk_wmem_queued += skb->truesize;
702 sk_mem_charge(sk, skb->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703}
704
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800705static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
706 unsigned int mss_now)
David S. Millerf6302d12005-07-05 15:18:03 -0700707{
Herbert Xubcd76112006-06-30 13:36:35 -0700708 if (skb->len <= mss_now || !sk_can_gso(sk)) {
David S. Millerf6302d12005-07-05 15:18:03 -0700709 /* Avoid the costly divide in the normal
710 * non-TSO case.
711 */
Herbert Xu79671682006-06-22 02:40:14 -0700712 skb_shinfo(skb)->gso_segs = 1;
713 skb_shinfo(skb)->gso_size = 0;
714 skb_shinfo(skb)->gso_type = 0;
David S. Millerf6302d12005-07-05 15:18:03 -0700715 } else {
Ilpo Järvinen356f89e2007-08-24 23:00:31 -0700716 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
Herbert Xu79671682006-06-22 02:40:14 -0700717 skb_shinfo(skb)->gso_size = mss_now;
Herbert Xubcd76112006-06-30 13:36:35 -0700718 skb_shinfo(skb)->gso_type = sk->sk_gso_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 }
720}
721
Ilpo Järvinen91fed7a2007-10-09 01:24:15 -0700722/* When a modification to fackets out becomes necessary, we need to check
Ilpo Järvinen68f83532007-11-15 19:50:37 -0800723 * skb is counted to fackets_out or not.
Ilpo Järvinen91fed7a2007-10-09 01:24:15 -0700724 */
Ilpo Järvinena47e5a92007-11-15 19:41:46 -0800725static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
Ilpo Järvinen91fed7a2007-10-09 01:24:15 -0700726 int decr)
727{
Ilpo Järvinena47e5a92007-11-15 19:41:46 -0800728 struct tcp_sock *tp = tcp_sk(sk);
729
Ilpo Järvinendc869672007-10-01 15:27:19 -0700730 if (!tp->sacked_out || tcp_is_reno(tp))
Ilpo Järvinen91fed7a2007-10-09 01:24:15 -0700731 return;
732
Ilpo Järvinen6859d492007-12-02 00:48:06 +0200733 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
Ilpo Järvinen91fed7a2007-10-09 01:24:15 -0700734 tp->fackets_out -= decr;
Ilpo Järvinen91fed7a2007-10-09 01:24:15 -0700735}
736
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737/* Function to create two new TCP segments. Shrinks the given segment
738 * to the specified size and appends a new segment with the rest of the
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900739 * packet to the list. This won't be called frequently, I hope.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 * Remember, these are still headerless SKBs at this point.
741 */
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800742int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
743 unsigned int mss_now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744{
745 struct tcp_sock *tp = tcp_sk(sk);
746 struct sk_buff *buff;
David S. Miller6475be12005-09-01 22:47:01 -0700747 int nsize, old_factor;
Herbert Xub60b49e2006-04-19 21:35:00 -0700748 int nlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 u16 flags;
750
Herbert Xub2cc99f02005-10-20 17:13:13 -0200751 BUG_ON(len > skb->len);
Stephen Hemminger6a438bb2005-11-10 17:14:59 -0800752
Ilpo Järvinen64edc272008-09-20 21:18:32 -0700753 tcp_clear_all_retrans_hints(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 nsize = skb_headlen(skb) - len;
755 if (nsize < 0)
756 nsize = 0;
757
758 if (skb_cloned(skb) &&
759 skb_is_nonlinear(skb) &&
760 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
761 return -ENOMEM;
762
763 /* Get a new skb... force flag on. */
764 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
765 if (buff == NULL)
766 return -ENOMEM; /* We'll just try again later. */
Herbert Xuef5cb972006-04-18 13:24:14 -0700767
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800768 sk->sk_wmem_queued += buff->truesize;
769 sk_mem_charge(sk, buff->truesize);
Herbert Xub60b49e2006-04-19 21:35:00 -0700770 nlen = skb->len - len - nsize;
771 buff->truesize += nlen;
772 skb->truesize -= nlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
774 /* Correct the sequence numbers. */
775 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
776 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
777 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
778
779 /* PSH and FIN should only be set in the second packet. */
780 flags = TCP_SKB_CB(skb)->flags;
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800781 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 TCP_SKB_CB(buff)->flags = flags;
Herbert Xue14c3ca2005-09-19 18:18:38 -0700783 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
Patrick McHardy84fa7932006-08-29 16:44:56 -0700785 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 /* Copy and checksum data tail into the new buffer. */
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800787 buff->csum = csum_partial_copy_nocheck(skb->data + len,
788 skb_put(buff, nsize),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 nsize, 0);
790
791 skb_trim(skb, len);
792
793 skb->csum = csum_block_sub(skb->csum, buff->csum, len);
794 } else {
Patrick McHardy84fa7932006-08-29 16:44:56 -0700795 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 skb_split(skb, buff, len);
797 }
798
799 buff->ip_summed = skb->ip_summed;
800
801 /* Looks stupid, but our code really uses when of
802 * skbs, which it never sent before. --ANK
803 */
804 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
Patrick McHardya61bbcf2005-08-14 17:24:31 -0700805 buff->tstamp = skb->tstamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806
David S. Miller6475be12005-09-01 22:47:01 -0700807 old_factor = tcp_skb_pcount(skb);
808
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 /* Fix up tso_factor for both original and new SKB. */
David S. Miller846998a2005-08-04 19:52:01 -0700810 tcp_set_skb_tso_segs(sk, skb, mss_now);
811 tcp_set_skb_tso_segs(sk, buff, mss_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
David S. Miller6475be12005-09-01 22:47:01 -0700813 /* If this packet has been sent out already, we must
814 * adjust the various packet counters.
815 */
Herbert Xucf0b4502005-09-08 15:10:52 -0700816 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
David S. Miller6475be12005-09-01 22:47:01 -0700817 int diff = old_factor - tcp_skb_pcount(skb) -
818 tcp_skb_pcount(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
David S. Miller6475be12005-09-01 22:47:01 -0700820 tp->packets_out -= diff;
Herbert Xue14c3ca2005-09-19 18:18:38 -0700821
822 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
823 tp->sacked_out -= diff;
824 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
825 tp->retrans_out -= diff;
826
Ilpo Järvinenb5860bb2007-08-09 14:33:18 +0300827 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
David S. Miller6475be12005-09-01 22:47:01 -0700828 tp->lost_out -= diff;
Herbert Xu83ca28b2005-09-22 23:32:56 -0700829
Ilpo Järvinen91fed7a2007-10-09 01:24:15 -0700830 /* Adjust Reno SACK estimate. */
831 if (tcp_is_reno(tp) && diff > 0) {
832 tcp_dec_pcount_approx_int(&tp->sacked_out, diff);
833 tcp_verify_left_out(tp);
David S. Miller6475be12005-09-01 22:47:01 -0700834 }
Ilpo Järvinena47e5a92007-11-15 19:41:46 -0800835 tcp_adjust_fackets_out(sk, skb, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 }
837
838 /* Link BUFF into the send queue. */
David S. Millerf44b5272005-07-05 15:18:34 -0700839 skb_header_release(buff);
David S. Millerfe067e82007-03-07 12:12:44 -0800840 tcp_insert_write_queue_after(skb, buff, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
842 return 0;
843}
844
845/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
846 * eventually). The difference is that pulled data not copied, but
847 * immediately discarded.
848 */
Herbert Xu ~{PmVHI~}f2911962006-06-05 15:03:37 -0700849static void __pskb_trim_head(struct sk_buff *skb, int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850{
851 int i, k, eat;
852
853 eat = len;
854 k = 0;
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800855 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 if (skb_shinfo(skb)->frags[i].size <= eat) {
857 put_page(skb_shinfo(skb)->frags[i].page);
858 eat -= skb_shinfo(skb)->frags[i].size;
859 } else {
860 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
861 if (eat) {
862 skb_shinfo(skb)->frags[k].page_offset += eat;
863 skb_shinfo(skb)->frags[k].size -= eat;
864 eat = 0;
865 }
866 k++;
867 }
868 }
869 skb_shinfo(skb)->nr_frags = k;
870
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700871 skb_reset_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 skb->data_len -= len;
873 skb->len = skb->data_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874}
875
876int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
877{
Ilpo Järvinen056834d2007-12-31 14:57:14 -0800878 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 return -ENOMEM;
880
Herbert Xu ~{PmVHI~}f2911962006-06-05 15:03:37 -0700881 /* If len == headlen, we avoid __skb_pull to preserve alignment. */
882 if (unlikely(len < skb_headlen(skb)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 __skb_pull(skb, len);
Herbert Xu ~{PmVHI~}f2911962006-06-05 15:03:37 -0700884 else
885 __pskb_trim_head(skb, len - skb_headlen(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
887 TCP_SKB_CB(skb)->seq += len;
Patrick McHardy84fa7932006-08-29 16:44:56 -0700888 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890 skb->truesize -= len;
891 sk->sk_wmem_queued -= len;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800892 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
894
895 /* Any change of skb->len requires recalculation of tso
896 * factor and mss.
897 */
898 if (tcp_skb_pcount(skb) > 1)
David S. Miller846998a2005-08-04 19:52:01 -0700899 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900
901 return 0;
902}
903
John Heffner5d424d52006-03-20 17:53:41 -0800904/* Not accounting for SACKs here. */
905int tcp_mtu_to_mss(struct sock *sk, int pmtu)
906{
907 struct tcp_sock *tp = tcp_sk(sk);
908 struct inet_connection_sock *icsk = inet_csk(sk);
909 int mss_now;
910
911 /* Calculate base mss without TCP options:
912 It is MMS_S - sizeof(tcphdr) of rfc1122
913 */
914 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
915
916 /* Clamp it (mss_clamp does not include tcp options) */
917 if (mss_now > tp->rx_opt.mss_clamp)
918 mss_now = tp->rx_opt.mss_clamp;
919
920 /* Now subtract optional transport overhead */
921 mss_now -= icsk->icsk_ext_hdr_len;
922
923 /* Then reserve room for full set of TCP options and 8 bytes of data */
924 if (mss_now < 48)
925 mss_now = 48;
926
927 /* Now subtract TCP options size, not including SACKs */
928 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
929
930 return mss_now;
931}
932
933/* Inverse of above */
934int tcp_mss_to_mtu(struct sock *sk, int mss)
935{
936 struct tcp_sock *tp = tcp_sk(sk);
937 struct inet_connection_sock *icsk = inet_csk(sk);
938 int mtu;
939
940 mtu = mss +
941 tp->tcp_header_len +
942 icsk->icsk_ext_hdr_len +
943 icsk->icsk_af_ops->net_header_len;
944
945 return mtu;
946}
947
948void tcp_mtup_init(struct sock *sk)
949{
950 struct tcp_sock *tp = tcp_sk(sk);
951 struct inet_connection_sock *icsk = inet_csk(sk);
952
953 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
954 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900955 icsk->icsk_af_ops->net_header_len;
John Heffner5d424d52006-03-20 17:53:41 -0800956 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
957 icsk->icsk_mtup.probe_size = 0;
958}
959
Ilpo Järvinen409d22b2007-12-31 14:57:40 -0800960/* Bound MSS / TSO packet size with the half of the window */
961static int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
962{
963 if (tp->max_window && pktsize > (tp->max_window >> 1))
964 return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
965 else
966 return pktsize;
967}
968
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969/* This function synchronize snd mss to current pmtu/exthdr set.
970
971 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
972 for TCP options, but includes only bare TCP header.
973
974 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800975 It is minimum of user_mss and mss received with SYN.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 It also does not include TCP options.
977
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800978 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980 tp->mss_cache is current effective sending mss, including
981 all tcp options except for SACKs. It is evaluated,
982 taking into account current pmtu, but never exceeds
983 tp->rx_opt.mss_clamp.
984
985 NOTE1. rfc1122 clearly states that advertised MSS
986 DOES NOT include either tcp or ip options.
987
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800988 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
989 are READ ONLY outside this function. --ANK (980731)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
992{
993 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800994 struct inet_connection_sock *icsk = inet_csk(sk);
John Heffner5d424d52006-03-20 17:53:41 -0800995 int mss_now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
John Heffner5d424d52006-03-20 17:53:41 -0800997 if (icsk->icsk_mtup.search_high > pmtu)
998 icsk->icsk_mtup.search_high = pmtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
John Heffner5d424d52006-03-20 17:53:41 -08001000 mss_now = tcp_mtu_to_mss(sk, pmtu);
Ilpo Järvinen409d22b2007-12-31 14:57:40 -08001001 mss_now = tcp_bound_to_half_wnd(tp, mss_now);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
1003 /* And store cached results */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001004 icsk->icsk_pmtu_cookie = pmtu;
John Heffner5d424d52006-03-20 17:53:41 -08001005 if (icsk->icsk_mtup.enabled)
1006 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001007 tp->mss_cache = mss_now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
1009 return mss_now;
1010}
1011
1012/* Compute the current effective MSS, taking SACKs and IP options,
1013 * and even PMTU discovery events into account.
1014 *
1015 * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
1016 * cannot be large. However, taking into account rare use of URG, this
1017 * is not a big flaw.
1018 */
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001019unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020{
1021 struct tcp_sock *tp = tcp_sk(sk);
1022 struct dst_entry *dst = __sk_dst_get(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001023 u32 mss_now;
1024 u16 xmit_size_goal;
1025 int doing_tso = 0;
Adam Langley33ad7982008-07-19 00:04:31 -07001026 unsigned header_len;
1027 struct tcp_out_options opts;
1028 struct tcp_md5sig_key *md5;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001030 mss_now = tp->mss_cache;
1031
Herbert Xubcd76112006-06-30 13:36:35 -07001032 if (large_allowed && sk_can_gso(sk) && !tp->urg_mode)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001033 doing_tso = 1;
1034
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 if (dst) {
1036 u32 mtu = dst_mtu(dst);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001037 if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 mss_now = tcp_sync_mss(sk, mtu);
1039 }
1040
Adam Langley33ad7982008-07-19 00:04:31 -07001041 header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1042 sizeof(struct tcphdr);
1043 /* The mss_cache is sized based on tp->tcp_header_len, which assumes
1044 * some common options. If this is an odd packet (because we have SACK
1045 * blocks etc) then our calculated header_len will be different, and
1046 * we have to adjust mss_now correspondingly */
1047 if (header_len != tp->tcp_header_len) {
1048 int delta = (int) header_len - tp->tcp_header_len;
1049 mss_now -= delta;
1050 }
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001051
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001052 xmit_size_goal = mss_now;
1053
1054 if (doing_tso) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001055 xmit_size_goal = ((sk->sk_gso_max_size - 1) -
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001056 inet_csk(sk)->icsk_af_ops->net_header_len -
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001057 inet_csk(sk)->icsk_ext_hdr_len -
1058 tp->tcp_header_len);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001059
Ilpo Järvinen409d22b2007-12-31 14:57:40 -08001060 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001061 xmit_size_goal -= (xmit_size_goal % mss_now);
1062 }
1063 tp->xmit_size_goal = xmit_size_goal;
1064
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 return mss_now;
1066}
1067
David S. Millera762a982005-07-05 15:18:51 -07001068/* Congestion window validation. (RFC2861) */
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001069static void tcp_cwnd_validate(struct sock *sk)
David S. Millera762a982005-07-05 15:18:51 -07001070{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001071 struct tcp_sock *tp = tcp_sk(sk);
David S. Millera762a982005-07-05 15:18:51 -07001072
Ilpo Järvinend436d682007-12-31 14:58:00 -08001073 if (tp->packets_out >= tp->snd_cwnd) {
David S. Millera762a982005-07-05 15:18:51 -07001074 /* Network is feed fully. */
1075 tp->snd_cwnd_used = 0;
1076 tp->snd_cwnd_stamp = tcp_time_stamp;
1077 } else {
1078 /* Network starves. */
1079 if (tp->packets_out > tp->snd_cwnd_used)
1080 tp->snd_cwnd_used = tp->packets_out;
1081
David S. Miller15d33c02007-04-09 13:23:14 -07001082 if (sysctl_tcp_slow_start_after_idle &&
1083 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
David S. Millera762a982005-07-05 15:18:51 -07001084 tcp_cwnd_application_limited(sk);
1085 }
1086}
1087
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001088/* Returns the portion of skb which can be sent right away without
1089 * introducing MSS oddities to segment boundaries. In rare cases where
1090 * mss_now != mss_cache, we will request caller to create a small skb
1091 * per input skb which could be mostly avoided here (if desired).
Ilpo Järvinen5ea3a742008-03-11 17:55:27 -07001092 *
1093 * We explicitly want to create a request for splitting write queue tail
1094 * to a small skb for Nagle purposes while avoiding unnecessary modulos,
1095 * thus all the complexity (cwnd_len is always MSS multiple which we
1096 * return whenever allowed by the other factors). Basically we need the
1097 * modulo only when the receiver window alone is the limiting factor or
1098 * when we would be allowed to send the split-due-to-Nagle skb fully.
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001099 */
1100static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001101 unsigned int mss_now, unsigned int cwnd)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001102{
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001103 struct tcp_sock *tp = tcp_sk(sk);
1104 u32 needed, window, cwnd_len;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001105
Ilpo Järvinen90840de2007-12-31 04:48:41 -08001106 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001107 cwnd_len = mss_now * cwnd;
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001108
1109 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
1110 return cwnd_len;
1111
Ilpo Järvinen5ea3a742008-03-11 17:55:27 -07001112 needed = min(skb->len, window);
1113
Ilpo Järvinen17515402008-04-15 20:36:55 -07001114 if (cwnd_len <= needed)
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001115 return cwnd_len;
1116
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001117 return needed - needed % mss_now;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001118}
1119
1120/* Can at least one segment of SKB be sent right now, according to the
1121 * congestion window rules? If so, return how many segments are allowed.
1122 */
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001123static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
1124 struct sk_buff *skb)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001125{
1126 u32 in_flight, cwnd;
1127
1128 /* Don't be strict about the congestion window for the final FIN. */
John Heffner104439a2007-02-05 17:53:11 -08001129 if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1130 tcp_skb_pcount(skb) == 1)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001131 return 1;
1132
1133 in_flight = tcp_packets_in_flight(tp);
1134 cwnd = tp->snd_cwnd;
1135 if (in_flight < cwnd)
1136 return (cwnd - in_flight);
1137
1138 return 0;
1139}
1140
1141/* This must be invoked the first time we consider transmitting
1142 * SKB onto the wire.
1143 */
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001144static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
1145 unsigned int mss_now)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001146{
1147 int tso_segs = tcp_skb_pcount(skb);
1148
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001149 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
David S. Miller846998a2005-08-04 19:52:01 -07001150 tcp_set_skb_tso_segs(sk, skb, mss_now);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001151 tso_segs = tcp_skb_pcount(skb);
1152 }
1153 return tso_segs;
1154}
1155
1156static inline int tcp_minshall_check(const struct tcp_sock *tp)
1157{
1158 return after(tp->snd_sml,tp->snd_una) &&
1159 !after(tp->snd_sml, tp->snd_nxt);
1160}
1161
1162/* Return 0, if packet can be sent now without violation Nagle's rules:
1163 * 1. It is full sized.
1164 * 2. Or it contains FIN. (already checked by caller)
1165 * 3. Or TCP_NODELAY was set.
1166 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1167 * With Minshall's modification: all sent small packets are ACKed.
1168 */
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001169static inline int tcp_nagle_check(const struct tcp_sock *tp,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001170 const struct sk_buff *skb,
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001171 unsigned mss_now, int nonagle)
1172{
1173 return (skb->len < mss_now &&
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001174 ((nonagle & TCP_NAGLE_CORK) ||
1175 (!nonagle && tp->packets_out && tcp_minshall_check(tp))));
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001176}
1177
1178/* Return non-zero if the Nagle test allows this packet to be
1179 * sent now.
1180 */
1181static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1182 unsigned int cur_mss, int nonagle)
1183{
1184 /* Nagle rule does not apply to frames, which sit in the middle of the
1185 * write_queue (they have no chances to get new data).
1186 *
1187 * This is implemented in the callers, where they modify the 'nonagle'
1188 * argument based upon the location of SKB in the send queue.
1189 */
1190 if (nonagle & TCP_NAGLE_PUSH)
1191 return 1;
1192
Ilpo Järvinend551e452007-04-30 00:42:20 -07001193 /* Don't use the nagle rule for urgent data (or for the final FIN).
1194 * Nagle can be ignored during F-RTO too (see RFC4138).
1195 */
1196 if (tp->urg_mode || (tp->frto_counter == 2) ||
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001197 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
1198 return 1;
1199
1200 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1201 return 1;
1202
1203 return 0;
1204}
1205
1206/* Does at least the first segment of SKB fit into the send window? */
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001207static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb,
1208 unsigned int cur_mss)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001209{
1210 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1211
1212 if (skb->len > cur_mss)
1213 end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1214
Ilpo Järvinen90840de2007-12-31 04:48:41 -08001215 return !after(end_seq, tcp_wnd_end(tp));
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001216}
1217
David S. Millerfe067e82007-03-07 12:12:44 -08001218/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001219 * should be put on the wire right now. If so, it returns the number of
1220 * packets allowed by the congestion window.
1221 */
1222static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1223 unsigned int cur_mss, int nonagle)
1224{
1225 struct tcp_sock *tp = tcp_sk(sk);
1226 unsigned int cwnd_quota;
1227
David S. Miller846998a2005-08-04 19:52:01 -07001228 tcp_init_tso_segs(sk, skb, cur_mss);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001229
1230 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1231 return 0;
1232
1233 cwnd_quota = tcp_cwnd_test(tp, skb);
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001234 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001235 cwnd_quota = 0;
1236
1237 return cwnd_quota;
1238}
1239
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001240int tcp_may_send_now(struct sock *sk)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001241{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001242 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08001243 struct sk_buff *skb = tcp_send_head(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001244
1245 return (skb &&
1246 tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
1247 (tcp_skb_is_last(sk, skb) ?
Ilpo Järvinen4e67d872007-12-05 02:25:32 -08001248 tp->nonagle : TCP_NAGLE_PUSH)));
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001249}
1250
1251/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1252 * which is put after SKB on the list. It is very much like
1253 * tcp_fragment() except that it may make several kinds of assumptions
1254 * in order to speed up the splitting operation. In particular, we
1255 * know that all the data is in scatter-gather pages, and that the
1256 * packet has never been sent out before (and thus is not cloned).
1257 */
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001258static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1259 unsigned int mss_now)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001260{
1261 struct sk_buff *buff;
1262 int nlen = skb->len - len;
1263 u16 flags;
1264
1265 /* All of a TSO frame must be composed of paged data. */
Herbert Xuc8ac3772005-08-16 20:43:40 -07001266 if (skb->len != skb->data_len)
1267 return tcp_fragment(sk, skb, len, mss_now);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001268
Pavel Emelyanovdf97c702007-11-29 21:22:33 +11001269 buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001270 if (unlikely(buff == NULL))
1271 return -ENOMEM;
1272
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001273 sk->sk_wmem_queued += buff->truesize;
1274 sk_mem_charge(sk, buff->truesize);
Herbert Xub60b49e2006-04-19 21:35:00 -07001275 buff->truesize += nlen;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001276 skb->truesize -= nlen;
1277
1278 /* Correct the sequence numbers. */
1279 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1280 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1281 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1282
1283 /* PSH and FIN should only be set in the second packet. */
1284 flags = TCP_SKB_CB(skb)->flags;
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001285 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001286 TCP_SKB_CB(buff)->flags = flags;
1287
1288 /* This packet was never sent out yet, so no SACK bits. */
1289 TCP_SKB_CB(buff)->sacked = 0;
1290
Patrick McHardy84fa7932006-08-29 16:44:56 -07001291 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001292 skb_split(skb, buff, len);
1293
1294 /* Fix up tso_factor for both original and new SKB. */
David S. Miller846998a2005-08-04 19:52:01 -07001295 tcp_set_skb_tso_segs(sk, skb, mss_now);
1296 tcp_set_skb_tso_segs(sk, buff, mss_now);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001297
1298 /* Link BUFF into the send queue. */
1299 skb_header_release(buff);
David S. Millerfe067e82007-03-07 12:12:44 -08001300 tcp_insert_write_queue_after(skb, buff, sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001301
1302 return 0;
1303}
1304
1305/* Try to defer sending, if possible, in order to minimize the amount
1306 * of TSO splitting we do. View it as a kind of TSO Nagle test.
1307 *
1308 * This algorithm is from John Heffner.
1309 */
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001310static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001311{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001312 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001313 const struct inet_connection_sock *icsk = inet_csk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001314 u32 send_win, cong_win, limit, in_flight;
1315
1316 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
John Heffnerae8064a2006-10-18 20:36:48 -07001317 goto send_now;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001318
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001319 if (icsk->icsk_ca_state != TCP_CA_Open)
John Heffnerae8064a2006-10-18 20:36:48 -07001320 goto send_now;
1321
1322 /* Defer for less than two clock ticks. */
Ilpo Järvinenbd515c32007-12-20 20:36:03 -08001323 if (tp->tso_deferred &&
1324 ((jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
John Heffnerae8064a2006-10-18 20:36:48 -07001325 goto send_now;
David S. Miller908a75c2005-07-05 15:43:58 -07001326
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001327 in_flight = tcp_packets_in_flight(tp);
1328
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001329 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001330
Ilpo Järvinen90840de2007-12-31 04:48:41 -08001331 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001332
1333 /* From in_flight test above, we know that cwnd > in_flight. */
1334 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1335
1336 limit = min(send_win, cong_win);
1337
David S. Millerba244fe2006-03-11 18:51:49 -08001338 /* If a full-sized TSO skb can be sent, do it. */
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001339 if (limit >= sk->sk_gso_max_size)
John Heffnerae8064a2006-10-18 20:36:48 -07001340 goto send_now;
David S. Millerba244fe2006-03-11 18:51:49 -08001341
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001342 if (sysctl_tcp_tso_win_divisor) {
1343 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1344
1345 /* If at least some fraction of a window is available,
1346 * just use it.
1347 */
1348 chunk /= sysctl_tcp_tso_win_divisor;
1349 if (limit >= chunk)
John Heffnerae8064a2006-10-18 20:36:48 -07001350 goto send_now;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001351 } else {
1352 /* Different approach, try not to defer past a single
1353 * ACK. Receiver should ACK every other full sized
1354 * frame, so if we have space for more than 3 frames
1355 * then send now.
1356 */
1357 if (limit > tcp_max_burst(tp) * tp->mss_cache)
John Heffnerae8064a2006-10-18 20:36:48 -07001358 goto send_now;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001359 }
1360
1361 /* Ok, it looks like it is advisable to defer. */
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001362 tp->tso_deferred = 1 | (jiffies << 1);
John Heffnerae8064a2006-10-18 20:36:48 -07001363
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001364 return 1;
John Heffnerae8064a2006-10-18 20:36:48 -07001365
1366send_now:
1367 tp->tso_deferred = 0;
1368 return 0;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001369}
1370
John Heffner5d424d52006-03-20 17:53:41 -08001371/* Create a new MTU probe if we are ready.
1372 * Returns 0 if we should wait to probe (no cwnd available),
1373 * 1 if a probe was sent,
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001374 * -1 otherwise
1375 */
John Heffner5d424d52006-03-20 17:53:41 -08001376static int tcp_mtu_probe(struct sock *sk)
1377{
1378 struct tcp_sock *tp = tcp_sk(sk);
1379 struct inet_connection_sock *icsk = inet_csk(sk);
1380 struct sk_buff *skb, *nskb, *next;
1381 int len;
1382 int probe_size;
Ilpo Järvinen91cc17c2007-11-23 19:08:16 +08001383 int size_needed;
John Heffner5d424d52006-03-20 17:53:41 -08001384 int copy;
1385 int mss_now;
1386
1387 /* Not currently probing/verifying,
1388 * not in recovery,
1389 * have enough cwnd, and
1390 * not SACKing (the variable headers throw things off) */
1391 if (!icsk->icsk_mtup.enabled ||
1392 icsk->icsk_mtup.probe_size ||
1393 inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1394 tp->snd_cwnd < 11 ||
1395 tp->rx_opt.eff_sacks)
1396 return -1;
1397
1398 /* Very simple search strategy: just double the MSS. */
1399 mss_now = tcp_current_mss(sk, 0);
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001400 probe_size = 2 * tp->mss_cache;
Ilpo Järvinen91cc17c2007-11-23 19:08:16 +08001401 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
John Heffner5d424d52006-03-20 17:53:41 -08001402 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1403 /* TODO: set timer for probe_converge_event */
1404 return -1;
1405 }
1406
1407 /* Have enough data in the send queue to probe? */
Ilpo Järvinen7f9c33e2007-11-23 19:10:56 +08001408 if (tp->write_seq - tp->snd_nxt < size_needed)
John Heffner5d424d52006-03-20 17:53:41 -08001409 return -1;
1410
Ilpo Järvinen91cc17c2007-11-23 19:08:16 +08001411 if (tp->snd_wnd < size_needed)
1412 return -1;
Ilpo Järvinen90840de2007-12-31 04:48:41 -08001413 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
Ilpo Järvinen91cc17c2007-11-23 19:08:16 +08001414 return 0;
John Heffner5d424d52006-03-20 17:53:41 -08001415
Ilpo Järvinend67c58e2007-12-02 00:48:01 +02001416 /* Do we need to wait to drain cwnd? With none in flight, don't stall */
1417 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1418 if (!tcp_packets_in_flight(tp))
John Heffner5d424d52006-03-20 17:53:41 -08001419 return -1;
1420 else
1421 return 0;
1422 }
1423
1424 /* We're allowed to probe. Build it now. */
1425 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1426 return -1;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001427 sk->sk_wmem_queued += nskb->truesize;
1428 sk_mem_charge(sk, nskb->truesize);
John Heffner5d424d52006-03-20 17:53:41 -08001429
David S. Millerfe067e82007-03-07 12:12:44 -08001430 skb = tcp_send_head(sk);
John Heffner5d424d52006-03-20 17:53:41 -08001431
1432 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1433 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1434 TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
1435 TCP_SKB_CB(nskb)->sacked = 0;
1436 nskb->csum = 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07001437 nskb->ip_summed = skb->ip_summed;
John Heffner5d424d52006-03-20 17:53:41 -08001438
Ilpo Järvinen50c48172007-12-02 00:48:00 +02001439 tcp_insert_write_queue_before(nskb, skb, sk);
1440
John Heffner5d424d52006-03-20 17:53:41 -08001441 len = 0;
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001442 tcp_for_write_queue_from_safe(skb, next, sk) {
John Heffner5d424d52006-03-20 17:53:41 -08001443 copy = min_t(int, skb->len, probe_size - len);
1444 if (nskb->ip_summed)
1445 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1446 else
1447 nskb->csum = skb_copy_and_csum_bits(skb, 0,
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001448 skb_put(nskb, copy),
1449 copy, nskb->csum);
John Heffner5d424d52006-03-20 17:53:41 -08001450
1451 if (skb->len <= copy) {
1452 /* We've eaten all the data from this skb.
1453 * Throw it away. */
1454 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
David S. Millerfe067e82007-03-07 12:12:44 -08001455 tcp_unlink_write_queue(skb, sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001456 sk_wmem_free_skb(sk, skb);
John Heffner5d424d52006-03-20 17:53:41 -08001457 } else {
1458 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001459 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
John Heffner5d424d52006-03-20 17:53:41 -08001460 if (!skb_shinfo(skb)->nr_frags) {
1461 skb_pull(skb, copy);
Patrick McHardy84fa7932006-08-29 16:44:56 -07001462 if (skb->ip_summed != CHECKSUM_PARTIAL)
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001463 skb->csum = csum_partial(skb->data,
1464 skb->len, 0);
John Heffner5d424d52006-03-20 17:53:41 -08001465 } else {
1466 __pskb_trim_head(skb, copy);
1467 tcp_set_skb_tso_segs(sk, skb, mss_now);
1468 }
1469 TCP_SKB_CB(skb)->seq += copy;
1470 }
1471
1472 len += copy;
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001473
1474 if (len >= probe_size)
1475 break;
John Heffner5d424d52006-03-20 17:53:41 -08001476 }
1477 tcp_init_tso_segs(sk, nskb, nskb->len);
1478
1479 /* We're ready to send. If this fails, the probe will
1480 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
1481 TCP_SKB_CB(nskb)->when = tcp_time_stamp;
1482 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1483 /* Decrement cwnd here because we are sending
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001484 * effectively two packets. */
John Heffner5d424d52006-03-20 17:53:41 -08001485 tp->snd_cwnd--;
Ilpo Järvinen66f5fe62007-12-31 04:43:57 -08001486 tcp_event_new_data_sent(sk, nskb);
John Heffner5d424d52006-03-20 17:53:41 -08001487
1488 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
John Heffner0e7b1362006-03-20 21:32:58 -08001489 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1490 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
John Heffner5d424d52006-03-20 17:53:41 -08001491
1492 return 1;
1493 }
1494
1495 return -1;
1496}
1497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498/* This routine writes packets to the network. It advances the
1499 * send_head. This happens as incoming acks open up the remote
1500 * window for us.
1501 *
1502 * Returns 1, if no segments are in flight and we have queued segments, but
1503 * cannot send anything now because of SWS or another problem.
1504 */
David S. Millera2e2a592005-07-05 15:19:23 -07001505static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506{
1507 struct tcp_sock *tp = tcp_sk(sk);
David S. Miller92df7b52005-07-05 15:19:06 -07001508 struct sk_buff *skb;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001509 unsigned int tso_segs, sent_pkts;
1510 int cwnd_quota;
John Heffner5d424d52006-03-20 17:53:41 -08001511 int result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
1513 /* If we are closed, the bytes will have to remain here.
1514 * In time closedown will finish, we empty the write queue and all
1515 * will be happy.
1516 */
David S. Miller92df7b52005-07-05 15:19:06 -07001517 if (unlikely(sk->sk_state == TCP_CLOSE))
1518 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
David S. Miller92df7b52005-07-05 15:19:06 -07001520 sent_pkts = 0;
John Heffner5d424d52006-03-20 17:53:41 -08001521
1522 /* Do MTU probing. */
1523 if ((result = tcp_mtu_probe(sk)) == 0) {
1524 return 0;
1525 } else if (result > 0) {
1526 sent_pkts = 1;
1527 }
1528
David S. Millerfe067e82007-03-07 12:12:44 -08001529 while ((skb = tcp_send_head(sk))) {
Herbert Xuc8ac3772005-08-16 20:43:40 -07001530 unsigned int limit;
1531
Herbert Xub68e9f82005-08-04 19:52:02 -07001532 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001533 BUG_ON(!tso_segs);
David S. Milleraa934662005-07-05 15:20:09 -07001534
Herbert Xub68e9f82005-08-04 19:52:02 -07001535 cwnd_quota = tcp_cwnd_test(tp, skb);
1536 if (!cwnd_quota)
1537 break;
1538
1539 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1540 break;
1541
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001542 if (tso_segs == 1) {
1543 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1544 (tcp_skb_is_last(sk, skb) ?
1545 nonagle : TCP_NAGLE_PUSH))))
1546 break;
1547 } else {
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001548 if (tcp_tso_should_defer(sk, skb))
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001549 break;
1550 }
David S. Milleraa934662005-07-05 15:20:09 -07001551
Herbert Xuc8ac3772005-08-16 20:43:40 -07001552 limit = mss_now;
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001553 if (tso_segs > 1)
1554 limit = tcp_mss_split_point(sk, skb, mss_now,
1555 cwnd_quota);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556
Herbert Xuc8ac3772005-08-16 20:43:40 -07001557 if (skb->len > limit &&
1558 unlikely(tso_fragment(sk, skb, limit, mss_now)))
1559 break;
1560
David S. Miller92df7b52005-07-05 15:19:06 -07001561 TCP_SKB_CB(skb)->when = tcp_time_stamp;
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001562
David S. Millerdfb4b9d2005-12-06 16:24:52 -08001563 if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
David S. Miller92df7b52005-07-05 15:19:06 -07001564 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565
David S. Miller92df7b52005-07-05 15:19:06 -07001566 /* Advance the send_head. This one is sent out.
1567 * This call will increment packets_out.
1568 */
Ilpo Järvinen66f5fe62007-12-31 04:43:57 -08001569 tcp_event_new_data_sent(sk, skb);
David S. Miller92df7b52005-07-05 15:19:06 -07001570
1571 tcp_minshall_update(tp, mss_now, skb);
David S. Milleraa934662005-07-05 15:20:09 -07001572 sent_pkts++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 }
David S. Miller92df7b52005-07-05 15:19:06 -07001574
David S. Milleraa934662005-07-05 15:20:09 -07001575 if (likely(sent_pkts)) {
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001576 tcp_cwnd_validate(sk);
David S. Miller92df7b52005-07-05 15:19:06 -07001577 return 0;
1578 }
David S. Millerfe067e82007-03-07 12:12:44 -08001579 return !tp->packets_out && tcp_send_head(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580}
1581
David S. Millera762a982005-07-05 15:18:51 -07001582/* Push out any pending frames which were held back due to
1583 * TCP_CORK or attempt at coalescing tiny packets.
1584 * The socket must be locked by the caller.
1585 */
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001586void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1587 int nonagle)
David S. Millera762a982005-07-05 15:18:51 -07001588{
David S. Millerfe067e82007-03-07 12:12:44 -08001589 struct sk_buff *skb = tcp_send_head(sk);
David S. Millera762a982005-07-05 15:18:51 -07001590
1591 if (skb) {
David S. Miller55c97f32005-07-05 15:19:38 -07001592 if (tcp_write_xmit(sk, cur_mss, nonagle))
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001593 tcp_check_probe_timer(sk);
David S. Millera762a982005-07-05 15:18:51 -07001594 }
1595}
1596
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001597/* Send _single_ skb sitting at the send head. This function requires
1598 * true push pending frames to setup probe timer etc.
1599 */
1600void tcp_push_one(struct sock *sk, unsigned int mss_now)
1601{
David S. Millerfe067e82007-03-07 12:12:44 -08001602 struct sk_buff *skb = tcp_send_head(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001603 unsigned int tso_segs, cwnd_quota;
1604
1605 BUG_ON(!skb || skb->len < mss_now);
1606
David S. Miller846998a2005-08-04 19:52:01 -07001607 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001608 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1609
1610 if (likely(cwnd_quota)) {
Herbert Xuc8ac3772005-08-16 20:43:40 -07001611 unsigned int limit;
1612
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001613 BUG_ON(!tso_segs);
1614
Herbert Xuc8ac3772005-08-16 20:43:40 -07001615 limit = mss_now;
Ilpo Järvinen0e3a4802007-12-24 21:33:45 -08001616 if (tso_segs > 1)
1617 limit = tcp_mss_split_point(sk, skb, mss_now,
1618 cwnd_quota);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001619
Herbert Xuc8ac3772005-08-16 20:43:40 -07001620 if (skb->len > limit &&
1621 unlikely(tso_fragment(sk, skb, limit, mss_now)))
1622 return;
1623
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001624 /* Send it out now. */
1625 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1626
David S. Millerdfb4b9d2005-12-06 16:24:52 -08001627 if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
Ilpo Järvinen66f5fe62007-12-31 04:43:57 -08001628 tcp_event_new_data_sent(sk, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001629 tcp_cwnd_validate(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001630 return;
1631 }
1632 }
1633}
1634
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635/* This function returns the amount that we can raise the
1636 * usable window based on the following constraints
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001637 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 * 1. The window can never be shrunk once it is offered (RFC 793)
1639 * 2. We limit memory per socket
1640 *
1641 * RFC 1122:
1642 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
1643 * RECV.NEXT + RCV.WIN fixed until:
1644 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
1645 *
1646 * i.e. don't raise the right edge of the window until you can raise
1647 * it at least MSS bytes.
1648 *
1649 * Unfortunately, the recommended algorithm breaks header prediction,
1650 * since header prediction assumes th->window stays fixed.
1651 *
1652 * Strictly speaking, keeping th->window fixed violates the receiver
1653 * side SWS prevention criteria. The problem is that under this rule
1654 * a stream of single byte packets will cause the right side of the
1655 * window to always advance by a single byte.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001656 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 * Of course, if the sender implements sender side SWS prevention
1658 * then this will not be a problem.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001659 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 * BSD seems to make the following compromise:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001661 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 * If the free space is less than the 1/4 of the maximum
1663 * space available and the free space is less than 1/2 mss,
1664 * then set the window to 0.
1665 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
1666 * Otherwise, just prevent the window from shrinking
1667 * and from being larger than the largest representable value.
1668 *
1669 * This prevents incremental opening of the window in the regime
1670 * where TCP is limited by the speed of the reader side taking
1671 * data out of the TCP receive queue. It does nothing about
1672 * those cases where the window is constrained on the sender side
1673 * because the pipeline is full.
1674 *
1675 * BSD also seems to "accidentally" limit itself to windows that are a
1676 * multiple of MSS, at least until the free space gets quite small.
1677 * This would appear to be a side effect of the mbuf implementation.
1678 * Combining these two algorithms results in the observed behavior
1679 * of having a fixed window size at almost all times.
1680 *
1681 * Below we obtain similar behavior by forcing the offered window to
1682 * a multiple of the mss when it is feasible to do so.
1683 *
1684 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
1685 * Regular options like TIMESTAMP are taken into account.
1686 */
1687u32 __tcp_select_window(struct sock *sk)
1688{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001689 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 struct tcp_sock *tp = tcp_sk(sk);
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001691 /* MSS for the peer's data. Previous versions used mss_clamp
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 * here. I don't know if the value based on our guesses
1693 * of peer's MSS is better for the performance. It's more correct
1694 * but may be worse for the performance because of rcv_mss
1695 * fluctuations. --SAW 1998/11/1
1696 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001697 int mss = icsk->icsk_ack.rcv_mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 int free_space = tcp_space(sk);
1699 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
1700 int window;
1701
1702 if (mss > full_space)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001703 mss = full_space;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
Eric Dumazetb92edbe2007-12-20 21:48:32 -08001705 if (free_space < (full_space >> 1)) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001706 icsk->icsk_ack.quick = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
1708 if (tcp_memory_pressure)
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001709 tp->rcv_ssthresh = min(tp->rcv_ssthresh,
1710 4U * tp->advmss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711
1712 if (free_space < mss)
1713 return 0;
1714 }
1715
1716 if (free_space > tp->rcv_ssthresh)
1717 free_space = tp->rcv_ssthresh;
1718
1719 /* Don't do rounding if we are using window scaling, since the
1720 * scaled window will not line up with the MSS boundary anyway.
1721 */
1722 window = tp->rcv_wnd;
1723 if (tp->rx_opt.rcv_wscale) {
1724 window = free_space;
1725
1726 /* Advertise enough space so that it won't get scaled away.
1727 * Import case: prevent zero window announcement if
1728 * 1<<rcv_wscale > mss.
1729 */
1730 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
1731 window = (((window >> tp->rx_opt.rcv_wscale) + 1)
1732 << tp->rx_opt.rcv_wscale);
1733 } else {
1734 /* Get the largest window that is a nice multiple of mss.
1735 * Window clamp already applied above.
1736 * If our current window offering is within 1 mss of the
1737 * free space we just keep it. This prevents the divide
1738 * and multiply from happening most of the time.
1739 * We also don't do any window rounding when the free space
1740 * is too small.
1741 */
1742 if (window <= free_space - mss || window > free_space)
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001743 window = (free_space / mss) * mss;
John Heffner84565072007-04-02 13:56:32 -07001744 else if (mss == full_space &&
Eric Dumazetb92edbe2007-12-20 21:48:32 -08001745 free_space > window + (full_space >> 1))
John Heffner84565072007-04-02 13:56:32 -07001746 window = free_space;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 }
1748
1749 return window;
1750}
1751
1752/* Attempt to collapse two adjacent SKB's during retransmission. */
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001753static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb,
1754 int mss_now)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755{
1756 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08001757 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001758 int skb_size, next_skb_size;
1759 u16 flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760
1761 /* The first test we must make is that neither of these two
1762 * SKB's are still referenced by someone else.
1763 */
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001764 if (skb_cloned(skb) || skb_cloned(next_skb))
1765 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001767 skb_size = skb->len;
1768 next_skb_size = next_skb->len;
1769 flags = TCP_SKB_CB(skb)->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001771 /* Also punt if next skb has been SACK'd. */
1772 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
1773 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001775 /* Next skb is out of window. */
1776 if (after(TCP_SKB_CB(next_skb)->end_seq, tcp_wnd_end(tp)))
1777 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001779 /* Punt if not enough space exists in the first SKB for
1780 * the data in the second, or the total combined payload
1781 * would exceed the MSS.
1782 */
1783 if ((next_skb_size > skb_tailroom(skb)) ||
1784 ((skb_size + next_skb_size) > mss_now))
1785 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001787 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
Ilpo Järvinena6963a62007-09-25 22:44:14 -07001788
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001789 tcp_highest_sack_combine(sk, next_skb, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001791 /* Ok. We will be able to collapse the packet. */
1792 tcp_unlink_write_queue(next_skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001794 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
1795 next_skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001797 if (next_skb->ip_summed == CHECKSUM_PARTIAL)
1798 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001800 if (skb->ip_summed != CHECKSUM_PARTIAL)
1801 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001803 /* Update sequence range on original skb. */
1804 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001806 /* Merge over control information. */
1807 flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
1808 TCP_SKB_CB(skb)->flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001810 /* All done, get rid of second SKB and account for it so
1811 * packet counting does not break.
1812 */
1813 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
1814 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_RETRANS)
1815 tp->retrans_out -= tcp_skb_pcount(next_skb);
1816 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_LOST)
1817 tp->lost_out -= tcp_skb_pcount(next_skb);
1818 /* Reno case is special. Sigh... */
1819 if (tcp_is_reno(tp) && tp->sacked_out)
1820 tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
Ilpo Järvinenb7689202007-09-20 11:37:19 -07001821
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001822 tcp_adjust_fackets_out(sk, next_skb, tcp_skb_pcount(next_skb));
1823 tp->packets_out -= tcp_skb_pcount(next_skb);
Ilpo Järvinenb7689202007-09-20 11:37:19 -07001824
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001825 /* changed transmit queue under us so clear hints */
Ilpo Järvinen64edc272008-09-20 21:18:32 -07001826 tcp_clear_all_retrans_hints(tp);
Ilpo Järvinen058dc332007-12-31 04:51:11 -08001827
1828 sk_wmem_free_skb(sk, next_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829}
1830
1831/* Do a simple retransmit without using the backoff mechanisms in
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001832 * tcp_timer. This is used for path mtu discovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 * The socket is already locked here.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001834 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835void tcp_simple_retransmit(struct sock *sk)
1836{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001837 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 struct tcp_sock *tp = tcp_sk(sk);
1839 struct sk_buff *skb;
1840 unsigned int mss = tcp_current_mss(sk, 0);
Ilpo Järvinen006f5822008-09-20 21:20:20 -07001841 u32 prior_lost = tp->lost_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
David S. Millerfe067e82007-03-07 12:12:44 -08001843 tcp_for_write_queue(skb, sk) {
1844 if (skb == tcp_send_head(sk))
1845 break;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001846 if (skb->len > mss &&
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001847 !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
1848 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1850 tp->retrans_out -= tcp_skb_pcount(skb);
1851 }
Ilpo Järvinen006f5822008-09-20 21:20:20 -07001852 tcp_skb_mark_lost_uncond_verify(tp, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 }
1854 }
1855
Ilpo Järvinen5af4ec22007-09-20 11:30:48 -07001856 tcp_clear_all_retrans_hints(tp);
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001857
Ilpo Järvinen006f5822008-09-20 21:20:20 -07001858 if (prior_lost == tp->lost_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 return;
1860
Ilpo Järvinen882beba2008-04-07 22:33:07 -07001861 if (tcp_is_reno(tp))
1862 tcp_limit_reno_sacked(tp);
1863
Ilpo Järvinen005903b2007-08-09 14:44:16 +03001864 tcp_verify_left_out(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001866 /* Don't muck with the congestion window here.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 * Reason is that we do not increase amount of _data_
1868 * in network, but units changed and effective
1869 * cwnd/ssthresh really reduced now.
1870 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001871 if (icsk->icsk_ca_state != TCP_CA_Loss) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 tp->high_seq = tp->snd_nxt;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001873 tp->snd_ssthresh = tcp_current_ssthresh(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 tp->prior_ssthresh = 0;
1875 tp->undo_marker = 0;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001876 tcp_set_ca_state(sk, TCP_CA_Loss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 }
1878 tcp_xmit_retransmit_queue(sk);
1879}
1880
1881/* This retransmits one SKB. Policy decisions and retransmit queue
1882 * state updates are done by the caller. Returns non-zero if an
1883 * error occurred which prevented the send.
1884 */
1885int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1886{
1887 struct tcp_sock *tp = tcp_sk(sk);
John Heffner5d424d52006-03-20 17:53:41 -08001888 struct inet_connection_sock *icsk = inet_csk(sk);
Sridhar Samudrala7d227cd22008-05-21 16:42:20 -07001889 unsigned int cur_mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 int err;
1891
John Heffner5d424d52006-03-20 17:53:41 -08001892 /* Inconslusive MTU probe */
1893 if (icsk->icsk_mtup.probe_size) {
1894 icsk->icsk_mtup.probe_size = 0;
1895 }
1896
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 /* Do not sent more than we queued. 1/4 is reserved for possible
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001898 * copying overhead: fragmentation, tunneling, mangling etc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 */
1900 if (atomic_read(&sk->sk_wmem_alloc) >
1901 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
1902 return -EAGAIN;
1903
1904 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
1905 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1906 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
1908 return -ENOMEM;
1909 }
1910
Sridhar Samudrala7d227cd22008-05-21 16:42:20 -07001911 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
1912 return -EHOSTUNREACH; /* Routing failure or similar. */
1913
1914 cur_mss = tcp_current_mss(sk, 0);
1915
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 /* If receiver has shrunk his window, and skb is out of
1917 * new window, do not retransmit it. The exception is the
1918 * case, when window is shrunk to zero. In this case
1919 * our retransmit serves as a zero window probe.
1920 */
Ilpo Järvinen90840de2007-12-31 04:48:41 -08001921 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 && TCP_SKB_CB(skb)->seq != tp->snd_una)
1923 return -EAGAIN;
1924
1925 if (skb->len > cur_mss) {
David S. Miller846998a2005-08-04 19:52:01 -07001926 if (tcp_fragment(sk, skb, cur_mss, cur_mss))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 return -ENOMEM; /* We'll try again later. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 }
1929
1930 /* Collapse two adjacent packets if worthwhile and we can. */
Stephen Hemminger2de979b2007-03-08 20:45:19 -08001931 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
1932 (skb->len < (cur_mss >> 1)) &&
1933 (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
1934 (!tcp_skb_is_last(sk, skb)) &&
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001935 (skb_shinfo(skb)->nr_frags == 0 &&
1936 skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
1937 (tcp_skb_pcount(skb) == 1 &&
1938 tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) &&
Stephen Hemminger2de979b2007-03-08 20:45:19 -08001939 (sysctl_tcp_retrans_collapse != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 tcp_retrans_try_collapse(sk, skb, cur_mss);
1941
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 /* Some Solaris stacks overoptimize and ignore the FIN on a
1943 * retransmit when old data is attached. So strip it off
1944 * since it is cheap to do so and saves bytes on the network.
1945 */
Stephen Hemminger2de979b2007-03-08 20:45:19 -08001946 if (skb->len > 0 &&
1947 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1948 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 if (!pskb_trim(skb, 0)) {
Ilpo Järvinene870a8e2008-01-03 20:39:01 -08001950 /* Reuse, even though it does some unnecessary work */
1951 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
1952 TCP_SKB_CB(skb)->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 skb->ip_summed = CHECKSUM_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 }
1955 }
1956
1957 /* Make a copy, if the first transmission SKB clone we made
1958 * is still in somebody's hands, else make a clone.
1959 */
1960 TCP_SKB_CB(skb)->when = tcp_time_stamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
David S. Millerdfb4b9d2005-12-06 16:24:52 -08001962 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
1964 if (err == 0) {
1965 /* Update global TCP statistics. */
Pavel Emelyanov81cc8a72008-07-16 20:22:04 -07001966 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
1968 tp->total_retrans++;
1969
1970#if FASTRETRANS_DEBUG > 0
Ilpo Järvinen056834d2007-12-31 14:57:14 -08001971 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 if (net_ratelimit())
1973 printk(KERN_DEBUG "retrans_out leaked.\n");
1974 }
1975#endif
Ilpo Järvinenb08d6cb2007-10-11 17:36:13 -07001976 if (!tp->retrans_out)
1977 tp->lost_retrans_low = tp->snd_nxt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
1979 tp->retrans_out += tcp_skb_pcount(skb);
1980
1981 /* Save stamp of the first retransmit. */
1982 if (!tp->retrans_stamp)
1983 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
1984
1985 tp->undo_retrans++;
1986
1987 /* snd_nxt is stored to detect loss of retransmitted segment,
1988 * see tcp_input.c tcp_sacktag_write_queue().
1989 */
1990 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
1991 }
1992 return err;
1993}
1994
Ilpo Järvinenb5afe7b2008-09-20 21:21:54 -07001995static int tcp_can_forward_retransmit(struct sock *sk)
1996{
1997 const struct inet_connection_sock *icsk = inet_csk(sk);
1998 struct tcp_sock *tp = tcp_sk(sk);
1999
2000 /* Forward retransmissions are possible only during Recovery. */
2001 if (icsk->icsk_ca_state != TCP_CA_Recovery)
2002 return 0;
2003
2004 /* No forward retransmissions in Reno are possible. */
2005 if (tcp_is_reno(tp))
2006 return 0;
2007
2008 /* Yeah, we have to make difficult choice between forward transmission
2009 * and retransmission... Both ways have their merits...
2010 *
2011 * For now we do not retransmit anything, while we have some new
2012 * segments to send. In the other cases, follow rule 3 for
2013 * NextSeg() specified in RFC3517.
2014 */
2015
2016 if (tcp_may_send_now(sk))
2017 return 0;
2018
2019 return 1;
2020}
2021
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022/* This gets called after a retransmit timeout, and the initially
2023 * retransmitted data is acknowledged. It tries to continue
2024 * resending the rest of the retransmit queue, until either
2025 * we've sent it all or the congestion window limit is reached.
2026 * If doing SACK, the first ACK which comes back for a timeout
2027 * based retransmit packet might feed us FACK information again.
2028 * If so, we use it to avoid unnecessarily retransmissions.
2029 */
2030void tcp_xmit_retransmit_queue(struct sock *sk)
2031{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002032 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 struct tcp_sock *tp = tcp_sk(sk);
2034 struct sk_buff *skb;
Ilpo Järvinen0e1c54c2008-09-20 21:24:21 -07002035 struct sk_buff *hole = NULL;
Ilpo Järvinen61eb55f2008-09-20 21:22:59 -07002036 int mib_idx;
Ilpo Järvinen0e1c54c2008-09-20 21:24:21 -07002037 int fwd_rexmitting = 0;
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08002038
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002039 if (!tp->lost_out)
2040 tp->retransmit_high = tp->snd_una;
2041
Ilpo Järvinen006f5822008-09-20 21:20:20 -07002042 if (tp->retransmit_skb_hint)
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08002043 skb = tp->retransmit_skb_hint;
Ilpo Järvinen006f5822008-09-20 21:20:20 -07002044 else
David S. Millerfe067e82007-03-07 12:12:44 -08002045 skb = tcp_write_queue_head(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
2047 /* First pass: retransmit lost packets. */
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002048 tcp_for_write_queue_from(skb, sk) {
2049 __u8 sacked = TCP_SKB_CB(skb)->sacked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002051 if (skb == tcp_send_head(sk))
2052 break;
2053 /* we could do better than to assign each time */
Ilpo Järvinen0e1c54c2008-09-20 21:24:21 -07002054 if (hole == NULL)
2055 tp->retransmit_skb_hint = skb;
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08002056
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002057 /* Assume this retransmit will generate
2058 * only one packet for congestion window
2059 * calculation purposes. This works because
2060 * tcp_retransmit_skb() will chop up the
2061 * packet to be MSS sized and all the
2062 * packet counting works out.
2063 */
2064 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2065 return;
Ilpo Järvinen0e1c54c2008-09-20 21:24:21 -07002066
2067 if (fwd_rexmitting) {
2068begin_fwd:
2069 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2070 break;
2071 mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
2072
2073 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2074 if (!tcp_can_forward_retransmit(sk))
2075 break;
2076 /* Backtrack if necessary to non-L'ed skb */
2077 if (hole != NULL) {
2078 skb = hole;
2079 hole = NULL;
2080 }
2081 fwd_rexmitting = 1;
2082 goto begin_fwd;
2083
2084 } else if (!(sacked & TCPCB_LOST)) {
2085 if (hole == NULL && !(sacked & TCPCB_SACKED_RETRANS))
2086 hole = skb;
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002087 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088
Ilpo Järvinen0e1c54c2008-09-20 21:24:21 -07002089 } else {
2090 if (icsk->icsk_ca_state != TCP_CA_Loss)
2091 mib_idx = LINUX_MIB_TCPFASTRETRANS;
2092 else
2093 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2094 }
2095
2096 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002097 continue;
Pavel Emelyanov40b215e2008-07-03 01:05:41 -07002098
Ilpo Järvinenf0ceb0e2008-09-20 21:24:49 -07002099 if (tcp_retransmit_skb(sk, skb))
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002100 return;
Ilpo Järvinen08ebd172008-09-20 21:23:49 -07002101 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2102
2103 if (skb == tcp_write_queue_head(sk))
2104 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2105 inet_csk(sk)->icsk_rto,
2106 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108}
2109
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110/* Send a fin. The caller locks the socket for us. This cannot be
2111 * allowed to fail queueing a FIN frame under any circumstances.
2112 */
2113void tcp_send_fin(struct sock *sk)
2114{
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002115 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08002116 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 int mss_now;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002118
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 /* Optimization, tack on the FIN if we have a queue of
2120 * unsent frames. But be careful about outgoing SACKS
2121 * and IP options.
2122 */
2123 mss_now = tcp_current_mss(sk, 1);
2124
David S. Millerfe067e82007-03-07 12:12:44 -08002125 if (tcp_send_head(sk) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
2127 TCP_SKB_CB(skb)->end_seq++;
2128 tp->write_seq++;
2129 } else {
2130 /* Socket is locked, keep trying until memory is available. */
2131 for (;;) {
David S. Millerd179cd12005-08-17 14:57:30 -07002132 skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 if (skb)
2134 break;
2135 yield();
2136 }
2137
2138 /* Reserve space for headers and prepare control bits. */
2139 skb_reserve(skb, MAX_TCP_HEADER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
Ilpo Järvinene870a8e2008-01-03 20:39:01 -08002141 tcp_init_nondata_skb(skb, tp->write_seq,
2142 TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 tcp_queue_skb(sk, skb);
2144 }
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07002145 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146}
2147
2148/* We get here when a process closes a file descriptor (either due to
2149 * an explicit close() or as a byproduct of exit()'ing) and there
2150 * was unread data in the receive queue. This behavior is recommended
Gerrit Renker65bb7232007-04-28 21:21:46 -07002151 * by RFC 2525, section 2.17. -DaveM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 */
Al Virodd0fc662005-10-07 07:46:04 +01002153void tcp_send_active_reset(struct sock *sk, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 struct sk_buff *skb;
2156
2157 /* NOTE: No TCP options attached and we never retransmit this. */
2158 skb = alloc_skb(MAX_TCP_HEADER, priority);
2159 if (!skb) {
Pavel Emelyanov4e673442008-07-16 20:30:14 -07002160 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 return;
2162 }
2163
2164 /* Reserve space for headers and prepare control bits. */
2165 skb_reserve(skb, MAX_TCP_HEADER);
Ilpo Järvinene870a8e2008-01-03 20:39:01 -08002166 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2167 TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 /* Send it off. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 TCP_SKB_CB(skb)->when = tcp_time_stamp;
David S. Millerdfb4b9d2005-12-06 16:24:52 -08002170 if (tcp_transmit_skb(sk, skb, 0, priority))
Pavel Emelyanov4e673442008-07-16 20:30:14 -07002171 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
Sridhar Samudrala26af65c2008-06-04 15:19:35 -07002172
Pavel Emelyanov81cc8a72008-07-16 20:22:04 -07002173 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174}
2175
2176/* WARNING: This routine must only be called when we have already sent
2177 * a SYN packet that crossed the incoming SYN that caused this routine
2178 * to get called. If this assumption fails then the initial rcv_wnd
2179 * and rcv_wscale values will not be correct.
2180 */
2181int tcp_send_synack(struct sock *sk)
2182{
Ilpo Järvinen056834d2007-12-31 14:57:14 -08002183 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
David S. Millerfe067e82007-03-07 12:12:44 -08002185 skb = tcp_write_queue_head(sk);
Ilpo Järvinen056834d2007-12-31 14:57:14 -08002186 if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
2188 return -EFAULT;
2189 }
Ilpo Järvinen056834d2007-12-31 14:57:14 -08002190 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 if (skb_cloned(skb)) {
2192 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2193 if (nskb == NULL)
2194 return -ENOMEM;
David S. Millerfe067e82007-03-07 12:12:44 -08002195 tcp_unlink_write_queue(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 skb_header_release(nskb);
David S. Millerfe067e82007-03-07 12:12:44 -08002197 __tcp_add_write_queue_head(sk, nskb);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002198 sk_wmem_free_skb(sk, skb);
2199 sk->sk_wmem_queued += nskb->truesize;
2200 sk_mem_charge(sk, nskb->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 skb = nskb;
2202 }
2203
2204 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
2205 TCP_ECN_send_synack(tcp_sk(sk), skb);
2206 }
2207 TCP_SKB_CB(skb)->when = tcp_time_stamp;
David S. Millerdfb4b9d2005-12-06 16:24:52 -08002208 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209}
2210
2211/*
2212 * Prepare a SYN-ACK.
2213 */
Ilpo Järvinen056834d2007-12-31 14:57:14 -08002214struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2215 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002217 struct inet_request_sock *ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 struct tcp_sock *tp = tcp_sk(sk);
2219 struct tcphdr *th;
2220 int tcp_header_size;
Adam Langley33ad7982008-07-19 00:04:31 -07002221 struct tcp_out_options opts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 struct sk_buff *skb;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002223 struct tcp_md5sig_key *md5;
2224 __u8 *md5_hash_location;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225
2226 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2227 if (skb == NULL)
2228 return NULL;
2229
2230 /* Reserve space for headers. */
2231 skb_reserve(skb, MAX_TCP_HEADER);
2232
2233 skb->dst = dst_clone(dst);
2234
Adam Langley33ad7982008-07-19 00:04:31 -07002235 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2236 __u8 rcv_wscale;
2237 /* Set this up on the first call only */
2238 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2239 /* tcp_full_space because it is guaranteed to be the first packet */
2240 tcp_select_initial_window(tcp_full_space(sk),
2241 dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2242 &req->rcv_wnd,
2243 &req->window_clamp,
2244 ireq->wscale_ok,
2245 &rcv_wscale);
2246 ireq->rcv_wscale = rcv_wscale;
2247 }
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002248
Adam Langley33ad7982008-07-19 00:04:31 -07002249 memset(&opts, 0, sizeof(opts));
2250 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2251 tcp_header_size = tcp_synack_options(sk, req,
2252 dst_metric(dst, RTAX_ADVMSS),
2253 skb, &opts, &md5) +
2254 sizeof(struct tcphdr);
2255
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002256 skb_push(skb, tcp_header_size);
2257 skb_reset_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002259 th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 memset(th, 0, sizeof(struct tcphdr));
2261 th->syn = 1;
2262 th->ack = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 TCP_ECN_make_synack(req, th);
2264 th->source = inet_sk(sk)->sport;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002265 th->dest = ireq->rmt_port;
Ilpo Järvinene870a8e2008-01-03 20:39:01 -08002266 /* Setting of flags are superfluous here for callers (and ECE is
2267 * not even correctly set)
2268 */
2269 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2270 TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 th->seq = htonl(TCP_SKB_CB(skb)->seq);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002272 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273
2274 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
Ilpo Järvinen600ff0c2007-02-13 12:42:11 -08002275 th->window = htons(min(req->rcv_wnd, 65535U));
Florian Westphal4dfc2812008-04-10 03:12:40 -07002276#ifdef CONFIG_SYN_COOKIES
2277 if (unlikely(req->cookie_ts))
2278 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
2279 else
2280#endif
Adam Langley33ad7982008-07-19 00:04:31 -07002281 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 th->doff = (tcp_header_size >> 2);
Pavel Emelyanov81cc8a72008-07-16 20:22:04 -07002283 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002284
2285#ifdef CONFIG_TCP_MD5SIG
2286 /* Okay, we have all we need - do the md5 hash if needed */
2287 if (md5) {
2288 tp->af_specific->calc_md5_hash(md5_hash_location,
Adam Langley49a72df2008-07-19 00:01:42 -07002289 md5, NULL, req, skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002290 }
2291#endif
2292
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 return skb;
2294}
2295
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002296/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 * Do all connect socket setups that can be done AF independent.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002298 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -08002299static void tcp_connect_init(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300{
2301 struct dst_entry *dst = __sk_dst_get(sk);
2302 struct tcp_sock *tp = tcp_sk(sk);
2303 __u8 rcv_wscale;
2304
2305 /* We'll fix this up when we get a response from the other end.
2306 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2307 */
2308 tp->tcp_header_len = sizeof(struct tcphdr) +
2309 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2310
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002311#ifdef CONFIG_TCP_MD5SIG
2312 if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2313 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2314#endif
2315
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 /* If user gave his TCP_MAXSEG, record it to clamp */
2317 if (tp->rx_opt.user_mss)
2318 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2319 tp->max_window = 0;
John Heffner5d424d52006-03-20 17:53:41 -08002320 tcp_mtup_init(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 tcp_sync_mss(sk, dst_mtu(dst));
2322
2323 if (!tp->window_clamp)
2324 tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2325 tp->advmss = dst_metric(dst, RTAX_ADVMSS);
2326 tcp_initialize_rcv_mss(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327
2328 tcp_select_initial_window(tcp_full_space(sk),
2329 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2330 &tp->rcv_wnd,
2331 &tp->window_clamp,
2332 sysctl_tcp_window_scaling,
2333 &rcv_wscale);
2334
2335 tp->rx_opt.rcv_wscale = rcv_wscale;
2336 tp->rcv_ssthresh = tp->rcv_wnd;
2337
2338 sk->sk_err = 0;
2339 sock_reset_flag(sk, SOCK_DONE);
2340 tp->snd_wnd = 0;
2341 tcp_init_wl(tp, tp->write_seq, 0);
2342 tp->snd_una = tp->write_seq;
2343 tp->snd_sml = tp->write_seq;
2344 tp->rcv_nxt = 0;
2345 tp->rcv_wup = 0;
2346 tp->copied_seq = 0;
2347
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002348 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2349 inet_csk(sk)->icsk_retransmits = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 tcp_clear_retrans(tp);
2351}
2352
2353/*
2354 * Build a SYN and send it off.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002355 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356int tcp_connect(struct sock *sk)
2357{
2358 struct tcp_sock *tp = tcp_sk(sk);
2359 struct sk_buff *buff;
2360
2361 tcp_connect_init(sk);
2362
David S. Millerd179cd12005-08-17 14:57:30 -07002363 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 if (unlikely(buff == NULL))
2365 return -ENOBUFS;
2366
2367 /* Reserve space for headers. */
2368 skb_reserve(buff, MAX_TCP_HEADER);
2369
Wei Yongjunbd37a082006-08-07 21:04:15 -07002370 tp->snd_nxt = tp->write_seq;
Ilpo Järvinene870a8e2008-01-03 20:39:01 -08002371 tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN);
2372 TCP_ECN_send_syn(sk, buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373
2374 /* Send it off. */
2375 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2376 tp->retrans_stamp = TCP_SKB_CB(buff)->when;
2377 skb_header_release(buff);
David S. Millerfe067e82007-03-07 12:12:44 -08002378 __tcp_add_write_queue_tail(sk, buff);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002379 sk->sk_wmem_queued += buff->truesize;
2380 sk_mem_charge(sk, buff->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 tp->packets_out += tcp_skb_pcount(buff);
David S. Millerdfb4b9d2005-12-06 16:24:52 -08002382 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
Wei Yongjunbd37a082006-08-07 21:04:15 -07002383
2384 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2385 * in order to make this packet get counted in tcpOutSegs.
2386 */
2387 tp->snd_nxt = tp->write_seq;
2388 tp->pushed_seq = tp->write_seq;
Pavel Emelyanov81cc8a72008-07-16 20:22:04 -07002389 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
2391 /* Timer for repeating the SYN until an answer. */
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -07002392 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2393 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 return 0;
2395}
2396
2397/* Send out a delayed ack, the caller does the policy checking
2398 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
2399 * for details.
2400 */
2401void tcp_send_delayed_ack(struct sock *sk)
2402{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002403 struct inet_connection_sock *icsk = inet_csk(sk);
2404 int ato = icsk->icsk_ack.ato;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 unsigned long timeout;
2406
2407 if (ato > TCP_DELACK_MIN) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002408 const struct tcp_sock *tp = tcp_sk(sk);
Ilpo Järvinen056834d2007-12-31 14:57:14 -08002409 int max_ato = HZ / 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
Ilpo Järvinen056834d2007-12-31 14:57:14 -08002411 if (icsk->icsk_ack.pingpong ||
2412 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 max_ato = TCP_DELACK_MAX;
2414
2415 /* Slow path, intersegment interval is "high". */
2416
2417 /* If some rtt estimate is known, use it to bound delayed ack.
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002418 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 * directly.
2420 */
2421 if (tp->srtt) {
Ilpo Järvinen056834d2007-12-31 14:57:14 -08002422 int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423
2424 if (rtt < max_ato)
2425 max_ato = rtt;
2426 }
2427
2428 ato = min(ato, max_ato);
2429 }
2430
2431 /* Stay within the limit we were given */
2432 timeout = jiffies + ato;
2433
2434 /* Use new timeout only if there wasn't a older one earlier. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002435 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 /* If delack timer was blocked or is about to expire,
2437 * send ACK now.
2438 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002439 if (icsk->icsk_ack.blocked ||
2440 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 tcp_send_ack(sk);
2442 return;
2443 }
2444
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002445 if (!time_before(timeout, icsk->icsk_ack.timeout))
2446 timeout = icsk->icsk_ack.timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 }
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002448 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2449 icsk->icsk_ack.timeout = timeout;
2450 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451}
2452
2453/* This routine sends an ack and also updates the window. */
2454void tcp_send_ack(struct sock *sk)
2455{
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002456 struct sk_buff *buff;
2457
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 /* If we have been reset, we may not send again. */
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002459 if (sk->sk_state == TCP_CLOSE)
2460 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002462 /* We are not putting this on the write queue, so
2463 * tcp_transmit_skb() will set the ownership to this
2464 * sock.
2465 */
2466 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2467 if (buff == NULL) {
2468 inet_csk_schedule_ack(sk);
2469 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
2470 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
2471 TCP_DELACK_MAX, TCP_RTO_MAX);
2472 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 }
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002474
2475 /* Reserve space for headers and prepare control bits. */
2476 skb_reserve(buff, MAX_TCP_HEADER);
Ilpo Järvinene870a8e2008-01-03 20:39:01 -08002477 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK);
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002478
2479 /* Send it off, this clears delayed acks for us. */
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002480 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2481 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482}
2483
2484/* This routine sends a packet with an out of date sequence
2485 * number. It assumes the other end will try to ack it.
2486 *
2487 * Question: what should we make while urgent mode?
2488 * 4.4BSD forces sending single byte of data. We cannot send
2489 * out of window data, because we have SND.NXT==SND.MAX...
2490 *
2491 * Current solution: to send TWO zero-length segments in urgent mode:
2492 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
2493 * out-of-date with SND.UNA-1 to probe window.
2494 */
2495static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2496{
2497 struct tcp_sock *tp = tcp_sk(sk);
2498 struct sk_buff *skb;
2499
2500 /* We don't queue it, tcp_transmit_skb() sets ownership. */
2501 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002502 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 return -1;
2504
2505 /* Reserve space for headers and set control bits. */
2506 skb_reserve(skb, MAX_TCP_HEADER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 /* Use a previous sequence. This should cause the other
2508 * end to send an ack. Don't queue or clone SKB, just
2509 * send it.
2510 */
Ilpo Järvinene870a8e2008-01-03 20:39:01 -08002511 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 TCP_SKB_CB(skb)->when = tcp_time_stamp;
David S. Millerdfb4b9d2005-12-06 16:24:52 -08002513 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514}
2515
2516int tcp_write_wakeup(struct sock *sk)
2517{
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002518 struct tcp_sock *tp = tcp_sk(sk);
2519 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002521 if (sk->sk_state == TCP_CLOSE)
2522 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002524 if ((skb = tcp_send_head(sk)) != NULL &&
2525 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
2526 int err;
2527 unsigned int mss = tcp_current_mss(sk, 0);
2528 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002530 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
2531 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002533 /* We are probing the opening of a window
2534 * but the window size is != 0
2535 * must have been a result SWS avoidance ( sender )
2536 */
2537 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2538 skb->len > mss) {
2539 seg_size = min(seg_size, mss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
Ilpo Järvinen058dc332007-12-31 04:51:11 -08002541 if (tcp_fragment(sk, skb, seg_size, mss))
2542 return -1;
2543 } else if (!tcp_skb_pcount(skb))
2544 tcp_set_skb_tso_segs(sk, skb, mss);
2545
2546 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2547 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2548 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2549 if (!err)
2550 tcp_event_new_data_sent(sk, skb);
2551 return err;
2552 } else {
2553 if (tp->urg_mode &&
2554 between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
2555 tcp_xmit_probe_skb(sk, 1);
2556 return tcp_xmit_probe_skb(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558}
2559
2560/* A window probe timeout has occurred. If window is not closed send
2561 * a partial packet else a zero probe.
2562 */
2563void tcp_send_probe0(struct sock *sk)
2564{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002565 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 struct tcp_sock *tp = tcp_sk(sk);
2567 int err;
2568
2569 err = tcp_write_wakeup(sk);
2570
David S. Millerfe067e82007-03-07 12:12:44 -08002571 if (tp->packets_out || !tcp_send_head(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 /* Cancel probe timer, if it is not required. */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002573 icsk->icsk_probes_out = 0;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002574 icsk->icsk_backoff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 return;
2576 }
2577
2578 if (err <= 0) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002579 if (icsk->icsk_backoff < sysctl_tcp_retries2)
2580 icsk->icsk_backoff++;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002581 icsk->icsk_probes_out++;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002582 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -07002583 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2584 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 } else {
2586 /* If packet was not sent due to local congestion,
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002587 * do not backoff and do not remember icsk_probes_out.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 * Let local senders to fight for local resources.
2589 *
2590 * Use accumulated backoff yet.
2591 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002592 if (!icsk->icsk_probes_out)
2593 icsk->icsk_probes_out = 1;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002594 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002595 min(icsk->icsk_rto << icsk->icsk_backoff,
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -07002596 TCP_RESOURCE_PROBE_INTERVAL),
2597 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 }
2599}
2600
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08002601EXPORT_SYMBOL(tcp_select_initial_window);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602EXPORT_SYMBOL(tcp_connect);
2603EXPORT_SYMBOL(tcp_make_synack);
2604EXPORT_SYMBOL(tcp_simple_retransmit);
2605EXPORT_SYMBOL(tcp_sync_mss);
John Heffner5d424d52006-03-20 17:53:41 -08002606EXPORT_SYMBOL(tcp_mtup_init);