blob: 2cf9a898ce50e8cbb34a6eb19c55e396a2f7a84b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250#include <linux/module.h>
251#include <linux/types.h>
252#include <linux/fcntl.h>
253#include <linux/poll.h>
254#include <linux/init.h>
255#include <linux/smp_lock.h>
256#include <linux/fs.h>
257#include <linux/random.h>
258#include <linux/bootmem.h>
David S. Millerb8059ea2006-03-25 01:36:56 -0800259#include <linux/cache.h>
Herbert Xuf4c50d92006-06-22 03:02:40 -0700260#include <linux/err.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800261#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263#include <net/icmp.h>
264#include <net/tcp.h>
265#include <net/xfrm.h>
266#include <net/ip.h>
Chris Leech1a2449a2006-05-23 18:05:53 -0700267#include <net/netdma.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
269#include <asm/uaccess.h>
270#include <asm/ioctls.h>
271
Brian Haleyab32ea52006-09-22 14:15:41 -0700272int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Eric Dumazetba899662005-08-26 12:05:31 -0700274DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276atomic_t tcp_orphan_count = ATOMIC_INIT(0);
277
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -0700278EXPORT_SYMBOL_GPL(tcp_orphan_count);
279
David S. Millerb8059ea2006-03-25 01:36:56 -0800280int sysctl_tcp_mem[3] __read_mostly;
281int sysctl_tcp_wmem[3] __read_mostly;
282int sysctl_tcp_rmem[3] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284EXPORT_SYMBOL(sysctl_tcp_mem);
285EXPORT_SYMBOL(sysctl_tcp_rmem);
286EXPORT_SYMBOL(sysctl_tcp_wmem);
287
288atomic_t tcp_memory_allocated; /* Current allocated memory. */
289atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
290
291EXPORT_SYMBOL(tcp_memory_allocated);
292EXPORT_SYMBOL(tcp_sockets_allocated);
293
294/*
295 * Pressure flag: try to collapse.
296 * Technical note: it is used by multiple contexts non atomically.
297 * All the sk_stream_mem_schedule() is of this nature: accounting
298 * is strict, actions are advisory and have some latency.
299 */
Eric Dumazet4103f8c2007-03-27 13:58:31 -0700300int tcp_memory_pressure __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
302EXPORT_SYMBOL(tcp_memory_pressure);
303
304void tcp_enter_memory_pressure(void)
305{
306 if (!tcp_memory_pressure) {
307 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
308 tcp_memory_pressure = 1;
309 }
310}
311
312EXPORT_SYMBOL(tcp_enter_memory_pressure);
313
314/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 * Wait for a TCP event.
316 *
317 * Note that we don't need to lock the socket, as the upper poll layers
318 * take care of normal races (between the test and the event) and we don't
319 * go look at any of the socket buffers directly.
320 */
321unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
322{
323 unsigned int mask;
324 struct sock *sk = sock->sk;
325 struct tcp_sock *tp = tcp_sk(sk);
326
327 poll_wait(file, sk->sk_sleep, wait);
328 if (sk->sk_state == TCP_LISTEN)
Arnaldo Carvalho de Melodc40c7b2005-08-23 21:52:58 -0700329 return inet_csk_listen_poll(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331 /* Socket is not locked. We are protected from async events
332 by poll logic and correct handling of state changes
333 made by another threads is impossible in any case.
334 */
335
336 mask = 0;
337 if (sk->sk_err)
338 mask = POLLERR;
339
340 /*
341 * POLLHUP is certainly not done right. But poll() doesn't
342 * have a notion of HUP in just one direction, and for a
343 * socket the read side is more interesting.
344 *
345 * Some poll() documentation says that POLLHUP is incompatible
346 * with the POLLOUT/POLLWR flags, so somebody should check this
347 * all. But careful, it tends to be safer to return too many
348 * bits than too few, and you can easily break real applications
349 * if you don't tell them that something has hung up!
350 *
351 * Check-me.
352 *
353 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
354 * our fs/select.c). It means that after we received EOF,
355 * poll always returns immediately, making impossible poll() on write()
356 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
357 * if and only if shutdown has been made in both directions.
358 * Actually, it is interesting to look how Solaris and DUX
359 * solve this dilemma. I would prefer, if PULLHUP were maskable,
360 * then we could set it on SND_SHUTDOWN. BTW examples given
361 * in Stevens' books assume exactly this behaviour, it explains
362 * why PULLHUP is incompatible with POLLOUT. --ANK
363 *
364 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
365 * blocking on fresh not-connected or disconnected socket. --ANK
366 */
367 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
368 mask |= POLLHUP;
369 if (sk->sk_shutdown & RCV_SHUTDOWN)
Davide Libenzif348d702006-03-25 03:07:39 -0800370 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
372 /* Connected? */
373 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
374 /* Potential race condition. If read of tp below will
375 * escape above sk->sk_state, we can be illegally awaken
376 * in SYN_* states. */
377 if ((tp->rcv_nxt != tp->copied_seq) &&
378 (tp->urg_seq != tp->copied_seq ||
379 tp->rcv_nxt != tp->copied_seq + 1 ||
380 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
381 mask |= POLLIN | POLLRDNORM;
382
383 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
384 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
385 mask |= POLLOUT | POLLWRNORM;
386 } else { /* send SIGIO later */
387 set_bit(SOCK_ASYNC_NOSPACE,
388 &sk->sk_socket->flags);
389 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
390
391 /* Race breaker. If space is freed after
392 * wspace test but before the flags are set,
393 * IO signal will be lost.
394 */
395 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
396 mask |= POLLOUT | POLLWRNORM;
397 }
398 }
399
400 if (tp->urg_data & TCP_URG_VALID)
401 mask |= POLLPRI;
402 }
403 return mask;
404}
405
406int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
407{
408 struct tcp_sock *tp = tcp_sk(sk);
409 int answ;
410
411 switch (cmd) {
412 case SIOCINQ:
413 if (sk->sk_state == TCP_LISTEN)
414 return -EINVAL;
415
416 lock_sock(sk);
417 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
418 answ = 0;
419 else if (sock_flag(sk, SOCK_URGINLINE) ||
420 !tp->urg_data ||
421 before(tp->urg_seq, tp->copied_seq) ||
422 !before(tp->urg_seq, tp->rcv_nxt)) {
423 answ = tp->rcv_nxt - tp->copied_seq;
424
425 /* Subtract 1, if FIN is in queue. */
426 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
427 answ -=
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700428 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 } else
430 answ = tp->urg_seq - tp->copied_seq;
431 release_sock(sk);
432 break;
433 case SIOCATMARK:
434 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
435 break;
436 case SIOCOUTQ:
437 if (sk->sk_state == TCP_LISTEN)
438 return -EINVAL;
439
440 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
441 answ = 0;
442 else
443 answ = tp->write_seq - tp->snd_una;
444 break;
445 default:
446 return -ENOIOCTLCMD;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700447 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449 return put_user(answ, (int __user *)arg);
450}
451
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
453{
454 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
455 tp->pushed_seq = tp->write_seq;
456}
457
458static inline int forced_push(struct tcp_sock *tp)
459{
460 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
461}
462
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700463static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700465 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo352d4802006-11-17 19:59:12 -0200466 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
467
468 skb->csum = 0;
469 tcb->seq = tcb->end_seq = tp->write_seq;
470 tcb->flags = TCPCB_FLAG_ACK;
471 tcb->sacked = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 skb_header_release(skb);
David S. Millerfe067e82007-03-07 12:12:44 -0800473 tcp_add_write_queue_tail(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 sk_charge_skb(sk, skb);
David S. Miller89ebd192005-08-23 10:13:06 -0700475 if (tp->nonagle & TCP_NAGLE_PUSH)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900476 tp->nonagle &= ~TCP_NAGLE_PUSH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477}
478
479static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
480 struct sk_buff *skb)
481{
482 if (flags & MSG_OOB) {
483 tp->urg_mode = 1;
484 tp->snd_up = tp->write_seq;
485 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
486 }
487}
488
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700489static inline void tcp_push(struct sock *sk, int flags, int mss_now,
490 int nonagle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700492 struct tcp_sock *tp = tcp_sk(sk);
493
David S. Millerfe067e82007-03-07 12:12:44 -0800494 if (tcp_send_head(sk)) {
495 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 if (!(flags & MSG_MORE) || forced_push(tp))
497 tcp_mark_push(tp, skb);
498 tcp_mark_urg(tp, flags, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700499 __tcp_push_pending_frames(sk, mss_now,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
501 }
502}
503
504static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
505 size_t psize, int flags)
506{
507 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700508 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 int err;
510 ssize_t copied;
511 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
512
513 /* Wait for a connection to finish. */
514 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
515 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
516 goto out_err;
517
518 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
519
520 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700521 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 copied = 0;
523
524 err = -EPIPE;
525 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
526 goto do_error;
527
528 while (psize > 0) {
David S. Millerfe067e82007-03-07 12:12:44 -0800529 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 struct page *page = pages[poffset / PAGE_SIZE];
531 int copy, i, can_coalesce;
532 int offset = poffset % PAGE_SIZE;
533 int size = min_t(size_t, psize, PAGE_SIZE - offset);
534
David S. Millerfe067e82007-03-07 12:12:44 -0800535 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536new_segment:
537 if (!sk_stream_memory_free(sk))
538 goto wait_for_sndbuf;
539
540 skb = sk_stream_alloc_pskb(sk, 0, 0,
541 sk->sk_allocation);
542 if (!skb)
543 goto wait_for_memory;
544
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700545 skb_entail(sk, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700546 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 }
548
549 if (copy > size)
550 copy = size;
551
552 i = skb_shinfo(skb)->nr_frags;
553 can_coalesce = skb_can_coalesce(skb, i, page, offset);
554 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
555 tcp_mark_push(tp, skb);
556 goto new_segment;
557 }
Herbert Xud80d99d2005-09-01 17:48:23 -0700558 if (!sk_stream_wmem_schedule(sk, copy))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 goto wait_for_memory;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 if (can_coalesce) {
562 skb_shinfo(skb)->frags[i - 1].size += copy;
563 } else {
564 get_page(page);
565 skb_fill_page_desc(skb, i, page, offset, copy);
566 }
567
568 skb->len += copy;
569 skb->data_len += copy;
570 skb->truesize += copy;
571 sk->sk_wmem_queued += copy;
572 sk->sk_forward_alloc -= copy;
Patrick McHardy84fa7932006-08-29 16:44:56 -0700573 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 tp->write_seq += copy;
575 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700576 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578 if (!copied)
579 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
580
581 copied += copy;
582 poffset += copy;
583 if (!(psize -= copy))
584 goto out;
585
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700586 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 continue;
588
589 if (forced_push(tp)) {
590 tcp_mark_push(tp, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700591 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
David S. Millerfe067e82007-03-07 12:12:44 -0800592 } else if (skb == tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 tcp_push_one(sk, mss_now);
594 continue;
595
596wait_for_sndbuf:
597 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
598wait_for_memory:
599 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700600 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
602 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
603 goto do_error;
604
605 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700606 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 }
608
609out:
610 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700611 tcp_push(sk, flags, mss_now, tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 return copied;
613
614do_error:
615 if (copied)
616 goto out;
617out_err:
618 return sk_stream_error(sk, flags, err);
619}
620
621ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
622 size_t size, int flags)
623{
624 ssize_t res;
625 struct sock *sk = sock->sk;
626
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 if (!(sk->sk_route_caps & NETIF_F_SG) ||
Herbert Xu8648b302006-06-17 22:06:05 -0700628 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 return sock_no_sendpage(sock, page, offset, size, flags);
630
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 lock_sock(sk);
632 TCP_CHECK_TIMER(sk);
633 res = do_tcp_sendpages(sk, &page, offset, size, flags);
634 TCP_CHECK_TIMER(sk);
635 release_sock(sk);
636 return res;
637}
638
639#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
640#define TCP_OFF(sk) (sk->sk_sndmsg_off)
641
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700642static inline int select_size(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700644 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700645 int tmp = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
David S. Millerb4e26f52005-07-05 15:20:27 -0700647 if (sk->sk_route_caps & NETIF_F_SG) {
Herbert Xubcd76112006-06-30 13:36:35 -0700648 if (sk_can_gso(sk))
David S. Millerb4e26f52005-07-05 15:20:27 -0700649 tmp = 0;
650 else {
651 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
652
653 if (tmp >= pgbreak &&
654 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
655 tmp = pgbreak;
656 }
657 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 return tmp;
660}
661
662int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
663 size_t size)
664{
665 struct iovec *iov;
666 struct tcp_sock *tp = tcp_sk(sk);
667 struct sk_buff *skb;
668 int iovlen, flags;
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700669 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 int err, copied;
671 long timeo;
672
673 lock_sock(sk);
674 TCP_CHECK_TIMER(sk);
675
676 flags = msg->msg_flags;
677 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
678
679 /* Wait for a connection to finish. */
680 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
681 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
682 goto out_err;
683
684 /* This should be in poll */
685 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
686
687 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700688 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
690 /* Ok commence sending. */
691 iovlen = msg->msg_iovlen;
692 iov = msg->msg_iov;
693 copied = 0;
694
695 err = -EPIPE;
696 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
697 goto do_error;
698
699 while (--iovlen >= 0) {
700 int seglen = iov->iov_len;
701 unsigned char __user *from = iov->iov_base;
702
703 iov++;
704
705 while (seglen > 0) {
706 int copy;
707
David S. Millerfe067e82007-03-07 12:12:44 -0800708 skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
David S. Millerfe067e82007-03-07 12:12:44 -0800710 if (!tcp_send_head(sk) ||
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700711 (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
713new_segment:
714 /* Allocate new segment. If the interface is SG,
715 * allocate skb fitting to single page.
716 */
717 if (!sk_stream_memory_free(sk))
718 goto wait_for_sndbuf;
719
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700720 skb = sk_stream_alloc_pskb(sk, select_size(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 0, sk->sk_allocation);
722 if (!skb)
723 goto wait_for_memory;
724
725 /*
726 * Check whether we can use HW checksum.
727 */
Herbert Xu8648b302006-06-17 22:06:05 -0700728 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700729 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700731 skb_entail(sk, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700732 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 }
734
735 /* Try to append data to the end of skb. */
736 if (copy > seglen)
737 copy = seglen;
738
739 /* Where to copy to? */
740 if (skb_tailroom(skb) > 0) {
741 /* We have some space in skb head. Superb! */
742 if (copy > skb_tailroom(skb))
743 copy = skb_tailroom(skb);
744 if ((err = skb_add_data(skb, from, copy)) != 0)
745 goto do_fault;
746 } else {
747 int merge = 0;
748 int i = skb_shinfo(skb)->nr_frags;
749 struct page *page = TCP_PAGE(sk);
750 int off = TCP_OFF(sk);
751
752 if (skb_can_coalesce(skb, i, page, off) &&
753 off != PAGE_SIZE) {
754 /* We can extend the last page
755 * fragment. */
756 merge = 1;
757 } else if (i == MAX_SKB_FRAGS ||
758 (!i &&
759 !(sk->sk_route_caps & NETIF_F_SG))) {
760 /* Need to add new fragment and cannot
761 * do this because interface is non-SG,
762 * or because all the page slots are
763 * busy. */
764 tcp_mark_push(tp, skb);
765 goto new_segment;
766 } else if (page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 if (off == PAGE_SIZE) {
768 put_page(page);
769 TCP_PAGE(sk) = page = NULL;
Herbert Xufb5f5e62005-09-05 18:55:48 -0700770 off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 }
Herbert Xuef015782005-09-01 17:48:59 -0700772 } else
Herbert Xufb5f5e62005-09-05 18:55:48 -0700773 off = 0;
Herbert Xuef015782005-09-01 17:48:59 -0700774
775 if (copy > PAGE_SIZE - off)
776 copy = PAGE_SIZE - off;
777
778 if (!sk_stream_wmem_schedule(sk, copy))
779 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
781 if (!page) {
782 /* Allocate new cache page. */
783 if (!(page = sk_stream_alloc_page(sk)))
784 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 }
786
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 /* Time to copy data. We are close to
788 * the end! */
789 err = skb_copy_to_page(sk, from, skb, page,
790 off, copy);
791 if (err) {
792 /* If this page was new, give it to the
793 * socket so it does not get leaked.
794 */
795 if (!TCP_PAGE(sk)) {
796 TCP_PAGE(sk) = page;
797 TCP_OFF(sk) = 0;
798 }
799 goto do_error;
800 }
801
802 /* Update the skb. */
803 if (merge) {
804 skb_shinfo(skb)->frags[i - 1].size +=
805 copy;
806 } else {
807 skb_fill_page_desc(skb, i, page, off, copy);
808 if (TCP_PAGE(sk)) {
809 get_page(page);
810 } else if (off + copy < PAGE_SIZE) {
811 get_page(page);
812 TCP_PAGE(sk) = page;
813 }
814 }
815
816 TCP_OFF(sk) = off + copy;
817 }
818
819 if (!copied)
820 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
821
822 tp->write_seq += copy;
823 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700824 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
826 from += copy;
827 copied += copy;
828 if ((seglen -= copy) == 0 && iovlen == 0)
829 goto out;
830
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700831 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 continue;
833
834 if (forced_push(tp)) {
835 tcp_mark_push(tp, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700836 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
David S. Millerfe067e82007-03-07 12:12:44 -0800837 } else if (skb == tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 tcp_push_one(sk, mss_now);
839 continue;
840
841wait_for_sndbuf:
842 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
843wait_for_memory:
844 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700845 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
847 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
848 goto do_error;
849
850 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700851 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 }
853 }
854
855out:
856 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700857 tcp_push(sk, flags, mss_now, tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 TCP_CHECK_TIMER(sk);
859 release_sock(sk);
860 return copied;
861
862do_fault:
863 if (!skb->len) {
David S. Millerfe067e82007-03-07 12:12:44 -0800864 tcp_unlink_write_queue(skb, sk);
865 /* It is the one place in all of TCP, except connection
866 * reset, where we can be unlinking the send_head.
867 */
868 tcp_check_send_head(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 sk_stream_free_skb(sk, skb);
870 }
871
872do_error:
873 if (copied)
874 goto out;
875out_err:
876 err = sk_stream_error(sk, flags, err);
877 TCP_CHECK_TIMER(sk);
878 release_sock(sk);
879 return err;
880}
881
882/*
883 * Handle reading urgent data. BSD has very simple semantics for
884 * this, no blocking and very strange errors 8)
885 */
886
887static int tcp_recv_urg(struct sock *sk, long timeo,
888 struct msghdr *msg, int len, int flags,
889 int *addr_len)
890{
891 struct tcp_sock *tp = tcp_sk(sk);
892
893 /* No URG data to read. */
894 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
895 tp->urg_data == TCP_URG_READ)
896 return -EINVAL; /* Yes this is right ! */
897
898 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
899 return -ENOTCONN;
900
901 if (tp->urg_data & TCP_URG_VALID) {
902 int err = 0;
903 char c = tp->urg_data;
904
905 if (!(flags & MSG_PEEK))
906 tp->urg_data = TCP_URG_READ;
907
908 /* Read urgent data. */
909 msg->msg_flags |= MSG_OOB;
910
911 if (len > 0) {
912 if (!(flags & MSG_TRUNC))
913 err = memcpy_toiovec(msg->msg_iov, &c, 1);
914 len = 1;
915 } else
916 msg->msg_flags |= MSG_TRUNC;
917
918 return err ? -EFAULT : len;
919 }
920
921 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
922 return 0;
923
924 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
925 * the available implementations agree in this case:
926 * this call should never block, independent of the
927 * blocking state of the socket.
928 * Mike <pall@rz.uni-karlsruhe.de>
929 */
930 return -EAGAIN;
931}
932
933/* Clean up the receive buffer for full frames taken by the user,
934 * then send an ACK if necessary. COPIED is the number of bytes
935 * tcp_recvmsg has given to the user so far, it speeds up the
936 * calculation of whether or not we must ACK for the sake of
937 * a window update.
938 */
Chris Leech0e4b4992006-05-23 18:00:16 -0700939void tcp_cleanup_rbuf(struct sock *sk, int copied)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940{
941 struct tcp_sock *tp = tcp_sk(sk);
942 int time_to_ack = 0;
943
944#if TCP_DEBUG
945 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
946
947 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
948#endif
949
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700950 if (inet_csk_ack_scheduled(sk)) {
951 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 /* Delayed ACKs frequently hit locked sockets during bulk
953 * receive. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700954 if (icsk->icsk_ack.blocked ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 /* Once-per-two-segments ACK was not sent by tcp_input.c */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700956 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 /*
958 * If this read emptied read buffer, we send ACK, if
959 * connection is not bidirectional, user drained
960 * receive buffer and there was a small segment
961 * in queue.
962 */
Alexey Kuznetsov1ef96962006-09-19 12:52:50 -0700963 (copied > 0 &&
964 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
965 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
966 !icsk->icsk_ack.pingpong)) &&
967 !atomic_read(&sk->sk_rmem_alloc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 time_to_ack = 1;
969 }
970
971 /* We send an ACK if we can now advertise a non-zero window
972 * which has been raised "significantly".
973 *
974 * Even if window raised up to infinity, do not send window open ACK
975 * in states, where we will not receive more. It is useless.
976 */
977 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
978 __u32 rcv_window_now = tcp_receive_window(tp);
979
980 /* Optimize, __tcp_select_window() is not cheap. */
981 if (2*rcv_window_now <= tp->window_clamp) {
982 __u32 new_window = __tcp_select_window(sk);
983
984 /* Send ACK now, if this read freed lots of space
985 * in our buffer. Certainly, new_window is new window.
986 * We can advertise it now, if it is not less than current one.
987 * "Lots" means "at least twice" here.
988 */
989 if (new_window && new_window >= 2 * rcv_window_now)
990 time_to_ack = 1;
991 }
992 }
993 if (time_to_ack)
994 tcp_send_ack(sk);
995}
996
997static void tcp_prequeue_process(struct sock *sk)
998{
999 struct sk_buff *skb;
1000 struct tcp_sock *tp = tcp_sk(sk);
1001
David S. Millerb03efcf2005-07-08 14:57:23 -07001002 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
1004 /* RX process wants to run with disabled BHs, though it is not
1005 * necessary */
1006 local_bh_disable();
1007 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1008 sk->sk_backlog_rcv(sk, skb);
1009 local_bh_enable();
1010
1011 /* Clear memory counter. */
1012 tp->ucopy.memory = 0;
1013}
1014
1015static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1016{
1017 struct sk_buff *skb;
1018 u32 offset;
1019
1020 skb_queue_walk(&sk->sk_receive_queue, skb) {
1021 offset = seq - TCP_SKB_CB(skb)->seq;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001022 if (tcp_hdr(skb)->syn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 offset--;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001024 if (offset < skb->len || tcp_hdr(skb)->fin) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 *off = offset;
1026 return skb;
1027 }
1028 }
1029 return NULL;
1030}
1031
1032/*
1033 * This routine provides an alternative to tcp_recvmsg() for routines
1034 * that would like to handle copying from skbuffs directly in 'sendfile'
1035 * fashion.
1036 * Note:
1037 * - It is assumed that the socket was locked by the caller.
1038 * - The routine does not block.
1039 * - At present, there is no support for reading OOB data
1040 * or for 'peeking' the socket using this routine
1041 * (although both would be easy to implement).
1042 */
1043int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1044 sk_read_actor_t recv_actor)
1045{
1046 struct sk_buff *skb;
1047 struct tcp_sock *tp = tcp_sk(sk);
1048 u32 seq = tp->copied_seq;
1049 u32 offset;
1050 int copied = 0;
1051
1052 if (sk->sk_state == TCP_LISTEN)
1053 return -ENOTCONN;
1054 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1055 if (offset < skb->len) {
1056 size_t used, len;
1057
1058 len = skb->len - offset;
1059 /* Stop reading if we hit a patch of urgent data */
1060 if (tp->urg_data) {
1061 u32 urg_offset = tp->urg_seq - seq;
1062 if (urg_offset < len)
1063 len = urg_offset;
1064 if (!len)
1065 break;
1066 }
1067 used = recv_actor(desc, skb, offset, len);
1068 if (used <= len) {
1069 seq += used;
1070 copied += used;
1071 offset += used;
1072 }
1073 if (offset != skb->len)
1074 break;
1075 }
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001076 if (tcp_hdr(skb)->fin) {
Chris Leech624d1162006-05-23 18:01:28 -07001077 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 ++seq;
1079 break;
1080 }
Chris Leech624d1162006-05-23 18:01:28 -07001081 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 if (!desc->count)
1083 break;
1084 }
1085 tp->copied_seq = seq;
1086
1087 tcp_rcv_space_adjust(sk);
1088
1089 /* Clean up data we have read: This will do ACK frames. */
1090 if (copied)
Chris Leech0e4b4992006-05-23 18:00:16 -07001091 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 return copied;
1093}
1094
1095/*
1096 * This routine copies from a sock struct into the user buffer.
1097 *
1098 * Technical note: in 2.3 we work on _locked_ socket, so that
1099 * tricks with *seq access order and skb->users are not required.
1100 * Probably, code can be easily improved even more.
1101 */
1102
1103int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1104 size_t len, int nonblock, int flags, int *addr_len)
1105{
1106 struct tcp_sock *tp = tcp_sk(sk);
1107 int copied = 0;
1108 u32 peek_seq;
1109 u32 *seq;
1110 unsigned long used;
1111 int err;
1112 int target; /* Read at least this many bytes */
1113 long timeo;
1114 struct task_struct *user_recv = NULL;
Chris Leech1a2449a2006-05-23 18:05:53 -07001115 int copied_early = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
1117 lock_sock(sk);
1118
1119 TCP_CHECK_TIMER(sk);
1120
1121 err = -ENOTCONN;
1122 if (sk->sk_state == TCP_LISTEN)
1123 goto out;
1124
1125 timeo = sock_rcvtimeo(sk, nonblock);
1126
1127 /* Urgent data needs to be handled specially. */
1128 if (flags & MSG_OOB)
1129 goto recv_urg;
1130
1131 seq = &tp->copied_seq;
1132 if (flags & MSG_PEEK) {
1133 peek_seq = tp->copied_seq;
1134 seq = &peek_seq;
1135 }
1136
1137 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1138
Chris Leech1a2449a2006-05-23 18:05:53 -07001139#ifdef CONFIG_NET_DMA
1140 tp->ucopy.dma_chan = NULL;
1141 preempt_disable();
1142 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07001143 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001144 preempt_enable_no_resched();
1145 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
1146 } else
1147 preempt_enable_no_resched();
1148#endif
1149
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 do {
1151 struct sk_buff *skb;
1152 u32 offset;
1153
1154 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1155 if (tp->urg_data && tp->urg_seq == *seq) {
1156 if (copied)
1157 break;
1158 if (signal_pending(current)) {
1159 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1160 break;
1161 }
1162 }
1163
1164 /* Next get a buffer. */
1165
1166 skb = skb_peek(&sk->sk_receive_queue);
1167 do {
1168 if (!skb)
1169 break;
1170
1171 /* Now that we have two receive queues this
1172 * shouldn't happen.
1173 */
1174 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1175 printk(KERN_INFO "recvmsg bug: copied %X "
1176 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1177 break;
1178 }
1179 offset = *seq - TCP_SKB_CB(skb)->seq;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001180 if (tcp_hdr(skb)->syn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 offset--;
1182 if (offset < skb->len)
1183 goto found_ok_skb;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001184 if (tcp_hdr(skb)->fin)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 goto found_fin_ok;
1186 BUG_TRAP(flags & MSG_PEEK);
1187 skb = skb->next;
1188 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1189
1190 /* Well, if we have backlog, try to process it now yet. */
1191
1192 if (copied >= target && !sk->sk_backlog.tail)
1193 break;
1194
1195 if (copied) {
1196 if (sk->sk_err ||
1197 sk->sk_state == TCP_CLOSE ||
1198 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1199 !timeo ||
1200 signal_pending(current) ||
1201 (flags & MSG_PEEK))
1202 break;
1203 } else {
1204 if (sock_flag(sk, SOCK_DONE))
1205 break;
1206
1207 if (sk->sk_err) {
1208 copied = sock_error(sk);
1209 break;
1210 }
1211
1212 if (sk->sk_shutdown & RCV_SHUTDOWN)
1213 break;
1214
1215 if (sk->sk_state == TCP_CLOSE) {
1216 if (!sock_flag(sk, SOCK_DONE)) {
1217 /* This occurs when user tries to read
1218 * from never connected socket.
1219 */
1220 copied = -ENOTCONN;
1221 break;
1222 }
1223 break;
1224 }
1225
1226 if (!timeo) {
1227 copied = -EAGAIN;
1228 break;
1229 }
1230
1231 if (signal_pending(current)) {
1232 copied = sock_intr_errno(timeo);
1233 break;
1234 }
1235 }
1236
Chris Leech0e4b4992006-05-23 18:00:16 -07001237 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
David S. Miller7df55122005-06-18 23:01:10 -07001239 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 /* Install new reader */
1241 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1242 user_recv = current;
1243 tp->ucopy.task = user_recv;
1244 tp->ucopy.iov = msg->msg_iov;
1245 }
1246
1247 tp->ucopy.len = len;
1248
1249 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1250 (flags & (MSG_PEEK | MSG_TRUNC)));
1251
1252 /* Ugly... If prequeue is not empty, we have to
1253 * process it before releasing socket, otherwise
1254 * order will be broken at second iteration.
1255 * More elegant solution is required!!!
1256 *
1257 * Look: we have the following (pseudo)queues:
1258 *
1259 * 1. packets in flight
1260 * 2. backlog
1261 * 3. prequeue
1262 * 4. receive_queue
1263 *
1264 * Each queue can be processed only if the next ones
1265 * are empty. At this point we have empty receive_queue.
1266 * But prequeue _can_ be not empty after 2nd iteration,
1267 * when we jumped to start of loop because backlog
1268 * processing added something to receive_queue.
1269 * We cannot release_sock(), because backlog contains
1270 * packets arrived _after_ prequeued ones.
1271 *
1272 * Shortly, algorithm is clear --- to process all
1273 * the queues in order. We could make it more directly,
1274 * requeueing packets from backlog to prequeue, if
1275 * is not empty. It is more elegant, but eats cycles,
1276 * unfortunately.
1277 */
David S. Millerb03efcf2005-07-08 14:57:23 -07001278 if (!skb_queue_empty(&tp->ucopy.prequeue))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 goto do_prequeue;
1280
1281 /* __ Set realtime policy in scheduler __ */
1282 }
1283
1284 if (copied >= target) {
1285 /* Do not sleep, just process backlog. */
1286 release_sock(sk);
1287 lock_sock(sk);
1288 } else
1289 sk_wait_data(sk, &timeo);
1290
Chris Leech1a2449a2006-05-23 18:05:53 -07001291#ifdef CONFIG_NET_DMA
1292 tp->ucopy.wakeup = 0;
1293#endif
1294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 if (user_recv) {
1296 int chunk;
1297
1298 /* __ Restore normal policy in scheduler __ */
1299
1300 if ((chunk = len - tp->ucopy.len) != 0) {
1301 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1302 len -= chunk;
1303 copied += chunk;
1304 }
1305
1306 if (tp->rcv_nxt == tp->copied_seq &&
David S. Millerb03efcf2005-07-08 14:57:23 -07001307 !skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308do_prequeue:
1309 tcp_prequeue_process(sk);
1310
1311 if ((chunk = len - tp->ucopy.len) != 0) {
1312 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1313 len -= chunk;
1314 copied += chunk;
1315 }
1316 }
1317 }
1318 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1319 if (net_ratelimit())
1320 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1321 current->comm, current->pid);
1322 peek_seq = tp->copied_seq;
1323 }
1324 continue;
1325
1326 found_ok_skb:
1327 /* Ok so how much can we use? */
1328 used = skb->len - offset;
1329 if (len < used)
1330 used = len;
1331
1332 /* Do we have urgent data here? */
1333 if (tp->urg_data) {
1334 u32 urg_offset = tp->urg_seq - *seq;
1335 if (urg_offset < used) {
1336 if (!urg_offset) {
1337 if (!sock_flag(sk, SOCK_URGINLINE)) {
1338 ++*seq;
1339 offset++;
1340 used--;
1341 if (!used)
1342 goto skip_copy;
1343 }
1344 } else
1345 used = urg_offset;
1346 }
1347 }
1348
1349 if (!(flags & MSG_TRUNC)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001350#ifdef CONFIG_NET_DMA
1351 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1352 tp->ucopy.dma_chan = get_softnet_dma();
1353
1354 if (tp->ucopy.dma_chan) {
1355 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1356 tp->ucopy.dma_chan, skb, offset,
1357 msg->msg_iov, used,
1358 tp->ucopy.pinned_list);
1359
1360 if (tp->ucopy.dma_cookie < 0) {
1361
1362 printk(KERN_ALERT "dma_cookie < 0\n");
1363
1364 /* Exception. Bailout! */
1365 if (!copied)
1366 copied = -EFAULT;
1367 break;
1368 }
1369 if ((offset + used) == skb->len)
1370 copied_early = 1;
1371
1372 } else
1373#endif
1374 {
1375 err = skb_copy_datagram_iovec(skb, offset,
1376 msg->msg_iov, used);
1377 if (err) {
1378 /* Exception. Bailout! */
1379 if (!copied)
1380 copied = -EFAULT;
1381 break;
1382 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 }
1384 }
1385
1386 *seq += used;
1387 copied += used;
1388 len -= used;
1389
1390 tcp_rcv_space_adjust(sk);
1391
1392skip_copy:
1393 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1394 tp->urg_data = 0;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001395 tcp_fast_path_check(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 }
1397 if (used + offset < skb->len)
1398 continue;
1399
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001400 if (tcp_hdr(skb)->fin)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 goto found_fin_ok;
Chris Leech1a2449a2006-05-23 18:05:53 -07001402 if (!(flags & MSG_PEEK)) {
1403 sk_eat_skb(sk, skb, copied_early);
1404 copied_early = 0;
1405 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 continue;
1407
1408 found_fin_ok:
1409 /* Process the FIN. */
1410 ++*seq;
Chris Leech1a2449a2006-05-23 18:05:53 -07001411 if (!(flags & MSG_PEEK)) {
1412 sk_eat_skb(sk, skb, copied_early);
1413 copied_early = 0;
1414 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 break;
1416 } while (len > 0);
1417
1418 if (user_recv) {
David S. Millerb03efcf2005-07-08 14:57:23 -07001419 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 int chunk;
1421
1422 tp->ucopy.len = copied > 0 ? len : 0;
1423
1424 tcp_prequeue_process(sk);
1425
1426 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1427 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1428 len -= chunk;
1429 copied += chunk;
1430 }
1431 }
1432
1433 tp->ucopy.task = NULL;
1434 tp->ucopy.len = 0;
1435 }
1436
Chris Leech1a2449a2006-05-23 18:05:53 -07001437#ifdef CONFIG_NET_DMA
1438 if (tp->ucopy.dma_chan) {
1439 struct sk_buff *skb;
1440 dma_cookie_t done, used;
1441
1442 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1443
1444 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001445 tp->ucopy.dma_cookie, &done,
1446 &used) == DMA_IN_PROGRESS) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001447 /* do partial cleanup of sk_async_wait_queue */
1448 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1449 (dma_async_is_complete(skb->dma_cookie, done,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001450 used) == DMA_SUCCESS)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001451 __skb_dequeue(&sk->sk_async_wait_queue);
1452 kfree_skb(skb);
1453 }
1454 }
1455
1456 /* Safe to free early-copied skbs now */
1457 __skb_queue_purge(&sk->sk_async_wait_queue);
1458 dma_chan_put(tp->ucopy.dma_chan);
1459 tp->ucopy.dma_chan = NULL;
1460 }
1461 if (tp->ucopy.pinned_list) {
1462 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1463 tp->ucopy.pinned_list = NULL;
1464 }
1465#endif
1466
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 /* According to UNIX98, msg_name/msg_namelen are ignored
1468 * on connected socket. I was just happy when found this 8) --ANK
1469 */
1470
1471 /* Clean up data we have read: This will do ACK frames. */
Chris Leech0e4b4992006-05-23 18:00:16 -07001472 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
1474 TCP_CHECK_TIMER(sk);
1475 release_sock(sk);
1476 return copied;
1477
1478out:
1479 TCP_CHECK_TIMER(sk);
1480 release_sock(sk);
1481 return err;
1482
1483recv_urg:
1484 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1485 goto out;
1486}
1487
1488/*
1489 * State processing on a close. This implements the state shift for
1490 * sending our FIN frame. Note that we only send a FIN for some
1491 * states. A shutdown() may have already sent the FIN, or we may be
1492 * closed.
1493 */
1494
Arjan van de Ven9b5b5cf2005-11-29 16:21:38 -08001495static const unsigned char new_state[16] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 /* current state: new state: action: */
1497 /* (Invalid) */ TCP_CLOSE,
1498 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1499 /* TCP_SYN_SENT */ TCP_CLOSE,
1500 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1501 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1502 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1503 /* TCP_TIME_WAIT */ TCP_CLOSE,
1504 /* TCP_CLOSE */ TCP_CLOSE,
1505 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1506 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1507 /* TCP_LISTEN */ TCP_CLOSE,
1508 /* TCP_CLOSING */ TCP_CLOSING,
1509};
1510
1511static int tcp_close_state(struct sock *sk)
1512{
1513 int next = (int)new_state[sk->sk_state];
1514 int ns = next & TCP_STATE_MASK;
1515
1516 tcp_set_state(sk, ns);
1517
1518 return next & TCP_ACTION_FIN;
1519}
1520
1521/*
1522 * Shutdown the sending side of a connection. Much like close except
1523 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1524 */
1525
1526void tcp_shutdown(struct sock *sk, int how)
1527{
1528 /* We need to grab some memory, and put together a FIN,
1529 * and then put it into the queue to be sent.
1530 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1531 */
1532 if (!(how & SEND_SHUTDOWN))
1533 return;
1534
1535 /* If we've already sent a FIN, or it's a closed state, skip this. */
1536 if ((1 << sk->sk_state) &
1537 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1538 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1539 /* Clear out any half completed packets. FIN if needed. */
1540 if (tcp_close_state(sk))
1541 tcp_send_fin(sk);
1542 }
1543}
1544
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545void tcp_close(struct sock *sk, long timeout)
1546{
1547 struct sk_buff *skb;
1548 int data_was_unread = 0;
Herbert Xu75c2d9072006-05-03 23:31:35 -07001549 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
1551 lock_sock(sk);
1552 sk->sk_shutdown = SHUTDOWN_MASK;
1553
1554 if (sk->sk_state == TCP_LISTEN) {
1555 tcp_set_state(sk, TCP_CLOSE);
1556
1557 /* Special case. */
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001558 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
1560 goto adjudge_to_death;
1561 }
1562
1563 /* We need to flush the recv. buffs. We do this only on the
1564 * descriptor close, not protocol-sourced closes, because the
1565 * reader process may not have drained the data yet!
1566 */
1567 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1568 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001569 tcp_hdr(skb)->fin;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 data_was_unread += len;
1571 __kfree_skb(skb);
1572 }
1573
1574 sk_stream_mem_reclaim(sk);
1575
1576 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1577 * 3.10, we send a RST here because data was lost. To
1578 * witness the awful effects of the old behavior of always
1579 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1580 * a bulk GET in an FTP client, suspend the process, wait
1581 * for the client to advertise a zero window, then kill -9
1582 * the FTP client, wheee... Note: timeout is always zero
1583 * in such a case.
1584 */
1585 if (data_was_unread) {
1586 /* Unread data was tossed, zap the connection. */
1587 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1588 tcp_set_state(sk, TCP_CLOSE);
1589 tcp_send_active_reset(sk, GFP_KERNEL);
1590 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1591 /* Check zero linger _after_ checking for unread data. */
1592 sk->sk_prot->disconnect(sk, 0);
1593 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1594 } else if (tcp_close_state(sk)) {
1595 /* We FIN if the application ate all the data before
1596 * zapping the connection.
1597 */
1598
1599 /* RED-PEN. Formally speaking, we have broken TCP state
1600 * machine. State transitions:
1601 *
1602 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1603 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1604 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1605 *
1606 * are legal only when FIN has been sent (i.e. in window),
1607 * rather than queued out of window. Purists blame.
1608 *
1609 * F.e. "RFC state" is ESTABLISHED,
1610 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1611 *
1612 * The visible declinations are that sometimes
1613 * we enter time-wait state, when it is not required really
1614 * (harmless), do not send active resets, when they are
1615 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1616 * they look as CLOSING or LAST_ACK for Linux)
1617 * Probably, I missed some more holelets.
1618 * --ANK
1619 */
1620 tcp_send_fin(sk);
1621 }
1622
1623 sk_stream_wait_close(sk, timeout);
1624
1625adjudge_to_death:
Herbert Xu75c2d9072006-05-03 23:31:35 -07001626 state = sk->sk_state;
1627 sock_hold(sk);
1628 sock_orphan(sk);
1629 atomic_inc(sk->sk_prot->orphan_count);
1630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 /* It is the last release_sock in its life. It will remove backlog. */
1632 release_sock(sk);
1633
1634
1635 /* Now socket is owned by kernel and we acquire BH lock
1636 to finish close. No need to check for user refs.
1637 */
1638 local_bh_disable();
1639 bh_lock_sock(sk);
1640 BUG_TRAP(!sock_owned_by_user(sk));
1641
Herbert Xu75c2d9072006-05-03 23:31:35 -07001642 /* Have we already been destroyed by a softirq or backlog? */
1643 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1644 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645
1646 /* This is a (useful) BSD violating of the RFC. There is a
1647 * problem with TCP as specified in that the other end could
1648 * keep a socket open forever with no application left this end.
1649 * We use a 3 minute timeout (about the same as BSD) then kill
1650 * our end. If they send after that then tough - BUT: long enough
1651 * that we won't make the old 4*rto = almost no time - whoops
1652 * reset mistake.
1653 *
1654 * Nope, it was not mistake. It is really desired behaviour
1655 * f.e. on http servers, when such sockets are useless, but
1656 * consume significant resources. Let's do it with special
1657 * linger2 option. --ANK
1658 */
1659
1660 if (sk->sk_state == TCP_FIN_WAIT2) {
1661 struct tcp_sock *tp = tcp_sk(sk);
1662 if (tp->linger2 < 0) {
1663 tcp_set_state(sk, TCP_CLOSE);
1664 tcp_send_active_reset(sk, GFP_ATOMIC);
1665 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1666 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001667 const int tmo = tcp_fin_time(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668
1669 if (tmo > TCP_TIMEWAIT_LEN) {
David S. Miller52499af2006-07-31 22:32:09 -07001670 inet_csk_reset_keepalive_timer(sk,
1671 tmo - TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1674 goto out;
1675 }
1676 }
1677 }
1678 if (sk->sk_state != TCP_CLOSE) {
1679 sk_stream_mem_reclaim(sk);
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001680 if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1682 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1683 if (net_ratelimit())
1684 printk(KERN_INFO "TCP: too many of orphaned "
1685 "sockets\n");
1686 tcp_set_state(sk, TCP_CLOSE);
1687 tcp_send_active_reset(sk, GFP_ATOMIC);
1688 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1689 }
1690 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
1692 if (sk->sk_state == TCP_CLOSE)
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001693 inet_csk_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 /* Otherwise, socket is reprieved until protocol close. */
1695
1696out:
1697 bh_unlock_sock(sk);
1698 local_bh_enable();
1699 sock_put(sk);
1700}
1701
1702/* These states need RST on ABORT according to RFC793 */
1703
1704static inline int tcp_need_reset(int state)
1705{
1706 return (1 << state) &
1707 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1708 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1709}
1710
1711int tcp_disconnect(struct sock *sk, int flags)
1712{
1713 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001714 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 struct tcp_sock *tp = tcp_sk(sk);
1716 int err = 0;
1717 int old_state = sk->sk_state;
1718
1719 if (old_state != TCP_CLOSE)
1720 tcp_set_state(sk, TCP_CLOSE);
1721
1722 /* ABORT function of RFC793 */
1723 if (old_state == TCP_LISTEN) {
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001724 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 } else if (tcp_need_reset(old_state) ||
1726 (tp->snd_nxt != tp->write_seq &&
1727 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001728 /* The last check adjusts for discrepancy of Linux wrt. RFC
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 * states
1730 */
1731 tcp_send_active_reset(sk, gfp_any());
1732 sk->sk_err = ECONNRESET;
1733 } else if (old_state == TCP_SYN_SENT)
1734 sk->sk_err = ECONNRESET;
1735
1736 tcp_clear_xmit_timers(sk);
1737 __skb_queue_purge(&sk->sk_receive_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001738 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 __skb_queue_purge(&tp->out_of_order_queue);
Chris Leech1a2449a2006-05-23 18:05:53 -07001740#ifdef CONFIG_NET_DMA
1741 __skb_queue_purge(&sk->sk_async_wait_queue);
1742#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
1744 inet->dport = 0;
1745
1746 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1747 inet_reset_saddr(sk);
1748
1749 sk->sk_shutdown = 0;
1750 sock_reset_flag(sk, SOCK_DONE);
1751 tp->srtt = 0;
1752 if ((tp->write_seq += tp->max_window + 2) == 0)
1753 tp->write_seq = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001754 icsk->icsk_backoff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 tp->snd_cwnd = 2;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001756 icsk->icsk_probes_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 tp->packets_out = 0;
1758 tp->snd_ssthresh = 0x7fffffff;
1759 tp->snd_cwnd_cnt = 0;
Stephen Hemminger9772efb2005-11-10 17:09:53 -08001760 tp->bytes_acked = 0;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001761 tcp_set_ca_state(sk, TCP_CA_Open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 tcp_clear_retrans(tp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001763 inet_csk_delack_init(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08001764 tcp_init_send_head(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 tp->rx_opt.saw_tstamp = 0;
1766 tcp_sack_reset(&tp->rx_opt);
1767 __sk_dst_reset(sk);
1768
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001769 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
1771 sk->sk_error_report(sk);
1772 return err;
1773}
1774
1775/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 * Socket option code for TCP.
1777 */
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001778static int do_tcp_setsockopt(struct sock *sk, int level,
1779 int optname, char __user *optval, int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780{
1781 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001782 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 int val;
1784 int err = 0;
1785
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001786 /* This is a string value all the others are int's */
1787 if (optname == TCP_CONGESTION) {
1788 char name[TCP_CA_NAME_MAX];
1789
1790 if (optlen < 1)
1791 return -EINVAL;
1792
1793 val = strncpy_from_user(name, optval,
1794 min(TCP_CA_NAME_MAX-1, optlen));
1795 if (val < 0)
1796 return -EFAULT;
1797 name[val] = 0;
1798
1799 lock_sock(sk);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001800 err = tcp_set_congestion_control(sk, name);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001801 release_sock(sk);
1802 return err;
1803 }
1804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 if (optlen < sizeof(int))
1806 return -EINVAL;
1807
1808 if (get_user(val, (int __user *)optval))
1809 return -EFAULT;
1810
1811 lock_sock(sk);
1812
1813 switch (optname) {
1814 case TCP_MAXSEG:
1815 /* Values greater than interface MTU won't take effect. However
1816 * at the point when this call is done we typically don't yet
1817 * know which interface is going to be used */
1818 if (val < 8 || val > MAX_TCP_WINDOW) {
1819 err = -EINVAL;
1820 break;
1821 }
1822 tp->rx_opt.user_mss = val;
1823 break;
1824
1825 case TCP_NODELAY:
1826 if (val) {
1827 /* TCP_NODELAY is weaker than TCP_CORK, so that
1828 * this option on corked socket is remembered, but
1829 * it is not activated until cork is cleared.
1830 *
1831 * However, when TCP_NODELAY is set we make
1832 * an explicit push, which overrides even TCP_CORK
1833 * for currently queued segments.
1834 */
1835 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001836 tcp_push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 } else {
1838 tp->nonagle &= ~TCP_NAGLE_OFF;
1839 }
1840 break;
1841
1842 case TCP_CORK:
1843 /* When set indicates to always queue non-full frames.
1844 * Later the user clears this option and we transmit
1845 * any pending partial frames in the queue. This is
1846 * meant to be used alongside sendfile() to get properly
1847 * filled frames when the user (for example) must write
1848 * out headers with a write() call first and then use
1849 * sendfile to send out the data parts.
1850 *
1851 * TCP_CORK can be set together with TCP_NODELAY and it is
1852 * stronger than TCP_NODELAY.
1853 */
1854 if (val) {
1855 tp->nonagle |= TCP_NAGLE_CORK;
1856 } else {
1857 tp->nonagle &= ~TCP_NAGLE_CORK;
1858 if (tp->nonagle&TCP_NAGLE_OFF)
1859 tp->nonagle |= TCP_NAGLE_PUSH;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001860 tcp_push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 }
1862 break;
1863
1864 case TCP_KEEPIDLE:
1865 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1866 err = -EINVAL;
1867 else {
1868 tp->keepalive_time = val * HZ;
1869 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1870 !((1 << sk->sk_state) &
1871 (TCPF_CLOSE | TCPF_LISTEN))) {
1872 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1873 if (tp->keepalive_time > elapsed)
1874 elapsed = tp->keepalive_time - elapsed;
1875 else
1876 elapsed = 0;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001877 inet_csk_reset_keepalive_timer(sk, elapsed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 }
1879 }
1880 break;
1881 case TCP_KEEPINTVL:
1882 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1883 err = -EINVAL;
1884 else
1885 tp->keepalive_intvl = val * HZ;
1886 break;
1887 case TCP_KEEPCNT:
1888 if (val < 1 || val > MAX_TCP_KEEPCNT)
1889 err = -EINVAL;
1890 else
1891 tp->keepalive_probes = val;
1892 break;
1893 case TCP_SYNCNT:
1894 if (val < 1 || val > MAX_TCP_SYNCNT)
1895 err = -EINVAL;
1896 else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001897 icsk->icsk_syn_retries = val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 break;
1899
1900 case TCP_LINGER2:
1901 if (val < 0)
1902 tp->linger2 = -1;
1903 else if (val > sysctl_tcp_fin_timeout / HZ)
1904 tp->linger2 = 0;
1905 else
1906 tp->linger2 = val * HZ;
1907 break;
1908
1909 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001910 icsk->icsk_accept_queue.rskq_defer_accept = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 if (val > 0) {
1912 /* Translate value in seconds to number of
1913 * retransmits */
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001914 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 val > ((TCP_TIMEOUT_INIT / HZ) <<
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001916 icsk->icsk_accept_queue.rskq_defer_accept))
1917 icsk->icsk_accept_queue.rskq_defer_accept++;
1918 icsk->icsk_accept_queue.rskq_defer_accept++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 }
1920 break;
1921
1922 case TCP_WINDOW_CLAMP:
1923 if (!val) {
1924 if (sk->sk_state != TCP_CLOSE) {
1925 err = -EINVAL;
1926 break;
1927 }
1928 tp->window_clamp = 0;
1929 } else
1930 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1931 SOCK_MIN_RCVBUF / 2 : val;
1932 break;
1933
1934 case TCP_QUICKACK:
1935 if (!val) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001936 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001938 icsk->icsk_ack.pingpong = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 if ((1 << sk->sk_state) &
1940 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001941 inet_csk_ack_scheduled(sk)) {
1942 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
Chris Leech0e4b4992006-05-23 18:00:16 -07001943 tcp_cleanup_rbuf(sk, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 if (!(val & 1))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001945 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 }
1947 }
1948 break;
1949
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001950#ifdef CONFIG_TCP_MD5SIG
1951 case TCP_MD5SIG:
1952 /* Read the IP->Key mappings from userspace */
1953 err = tp->af_specific->md5_parse(sk, optval, optlen);
1954 break;
1955#endif
1956
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 default:
1958 err = -ENOPROTOOPT;
1959 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001960 }
1961
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 release_sock(sk);
1963 return err;
1964}
1965
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001966int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1967 int optlen)
1968{
1969 struct inet_connection_sock *icsk = inet_csk(sk);
1970
1971 if (level != SOL_TCP)
1972 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1973 optval, optlen);
1974 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1975}
1976
1977#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001978int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1979 char __user *optval, int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001980{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08001981 if (level != SOL_TCP)
1982 return inet_csk_compat_setsockopt(sk, level, optname,
1983 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001984 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1985}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001986
1987EXPORT_SYMBOL(compat_tcp_setsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001988#endif
1989
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990/* Return information about state of tcp endpoint in API format. */
1991void tcp_get_info(struct sock *sk, struct tcp_info *info)
1992{
1993 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001994 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 u32 now = tcp_time_stamp;
1996
1997 memset(info, 0, sizeof(*info));
1998
1999 info->tcpi_state = sk->sk_state;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002000 info->tcpi_ca_state = icsk->icsk_ca_state;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002001 info->tcpi_retransmits = icsk->icsk_retransmits;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002002 info->tcpi_probes = icsk->icsk_probes_out;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002003 info->tcpi_backoff = icsk->icsk_backoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
2005 if (tp->rx_opt.tstamp_ok)
2006 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2007 if (tp->rx_opt.sack_ok)
2008 info->tcpi_options |= TCPI_OPT_SACK;
2009 if (tp->rx_opt.wscale_ok) {
2010 info->tcpi_options |= TCPI_OPT_WSCALE;
2011 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2012 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002013 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
2015 if (tp->ecn_flags&TCP_ECN_OK)
2016 info->tcpi_options |= TCPI_OPT_ECN;
2017
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002018 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2019 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002020 info->tcpi_snd_mss = tp->mss_cache;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002021 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022
2023 info->tcpi_unacked = tp->packets_out;
2024 info->tcpi_sacked = tp->sacked_out;
2025 info->tcpi_lost = tp->lost_out;
2026 info->tcpi_retrans = tp->retrans_out;
2027 info->tcpi_fackets = tp->fackets_out;
2028
2029 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002030 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2032
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002033 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2035 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2036 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2037 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2038 info->tcpi_snd_cwnd = tp->snd_cwnd;
2039 info->tcpi_advmss = tp->advmss;
2040 info->tcpi_reordering = tp->reordering;
2041
2042 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2043 info->tcpi_rcv_space = tp->rcvq_space.space;
2044
2045 info->tcpi_total_retrans = tp->total_retrans;
2046}
2047
2048EXPORT_SYMBOL_GPL(tcp_get_info);
2049
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002050static int do_tcp_getsockopt(struct sock *sk, int level,
2051 int optname, char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052{
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002053 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 struct tcp_sock *tp = tcp_sk(sk);
2055 int val, len;
2056
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 if (get_user(len, optlen))
2058 return -EFAULT;
2059
2060 len = min_t(unsigned int, len, sizeof(int));
2061
2062 if (len < 0)
2063 return -EINVAL;
2064
2065 switch (optname) {
2066 case TCP_MAXSEG:
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002067 val = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2069 val = tp->rx_opt.user_mss;
2070 break;
2071 case TCP_NODELAY:
2072 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2073 break;
2074 case TCP_CORK:
2075 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2076 break;
2077 case TCP_KEEPIDLE:
2078 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2079 break;
2080 case TCP_KEEPINTVL:
2081 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2082 break;
2083 case TCP_KEEPCNT:
2084 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2085 break;
2086 case TCP_SYNCNT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002087 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 break;
2089 case TCP_LINGER2:
2090 val = tp->linger2;
2091 if (val >= 0)
2092 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2093 break;
2094 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002095 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2096 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 break;
2098 case TCP_WINDOW_CLAMP:
2099 val = tp->window_clamp;
2100 break;
2101 case TCP_INFO: {
2102 struct tcp_info info;
2103
2104 if (get_user(len, optlen))
2105 return -EFAULT;
2106
2107 tcp_get_info(sk, &info);
2108
2109 len = min_t(unsigned int, len, sizeof(info));
2110 if (put_user(len, optlen))
2111 return -EFAULT;
2112 if (copy_to_user(optval, &info, len))
2113 return -EFAULT;
2114 return 0;
2115 }
2116 case TCP_QUICKACK:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002117 val = !icsk->icsk_ack.pingpong;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 break;
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002119
2120 case TCP_CONGESTION:
2121 if (get_user(len, optlen))
2122 return -EFAULT;
2123 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2124 if (put_user(len, optlen))
2125 return -EFAULT;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002126 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002127 return -EFAULT;
2128 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 default:
2130 return -ENOPROTOOPT;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002131 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
2133 if (put_user(len, optlen))
2134 return -EFAULT;
2135 if (copy_to_user(optval, &val, len))
2136 return -EFAULT;
2137 return 0;
2138}
2139
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002140int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2141 int __user *optlen)
2142{
2143 struct inet_connection_sock *icsk = inet_csk(sk);
2144
2145 if (level != SOL_TCP)
2146 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2147 optval, optlen);
2148 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2149}
2150
2151#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002152int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2153 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002154{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08002155 if (level != SOL_TCP)
2156 return inet_csk_compat_getsockopt(sk, level, optname,
2157 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002158 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2159}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002160
2161EXPORT_SYMBOL(compat_tcp_getsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002162#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163
Herbert Xu576a30e2006-06-27 13:22:38 -07002164struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
Herbert Xuf4c50d92006-06-22 03:02:40 -07002165{
2166 struct sk_buff *segs = ERR_PTR(-EINVAL);
2167 struct tcphdr *th;
2168 unsigned thlen;
2169 unsigned int seq;
Al Virod3bc23e2006-11-14 21:24:49 -08002170 __be32 delta;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002171 unsigned int oldlen;
2172 unsigned int len;
2173
2174 if (!pskb_may_pull(skb, sizeof(*th)))
2175 goto out;
2176
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002177 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002178 thlen = th->doff * 4;
2179 if (thlen < sizeof(*th))
2180 goto out;
2181
2182 if (!pskb_may_pull(skb, thlen))
2183 goto out;
2184
Herbert Xu0718bcc2006-06-25 23:55:46 -07002185 oldlen = (u16)~skb->len;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002186 __skb_pull(skb, thlen);
2187
Herbert Xu3820c3f2006-06-29 20:11:25 -07002188 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2189 /* Packet is from an untrusted source, reset gso_segs. */
Herbert Xubbcf4672006-07-03 19:38:35 -07002190 int type = skb_shinfo(skb)->gso_type;
2191 int mss;
Herbert Xu3820c3f2006-06-29 20:11:25 -07002192
Herbert Xubbcf4672006-07-03 19:38:35 -07002193 if (unlikely(type &
2194 ~(SKB_GSO_TCPV4 |
2195 SKB_GSO_DODGY |
2196 SKB_GSO_TCP_ECN |
2197 SKB_GSO_TCPV6 |
2198 0) ||
2199 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2200 goto out;
2201
2202 mss = skb_shinfo(skb)->gso_size;
Herbert Xu3820c3f2006-06-29 20:11:25 -07002203 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
2204
2205 segs = NULL;
2206 goto out;
2207 }
2208
Herbert Xu576a30e2006-06-27 13:22:38 -07002209 segs = skb_segment(skb, features);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002210 if (IS_ERR(segs))
2211 goto out;
2212
2213 len = skb_shinfo(skb)->gso_size;
Herbert Xu0718bcc2006-06-25 23:55:46 -07002214 delta = htonl(oldlen + (thlen + len));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002215
2216 skb = segs;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002217 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002218 seq = ntohl(th->seq);
2219
2220 do {
2221 th->fin = th->psh = 0;
2222
Al Virod3bc23e2006-11-14 21:24:49 -08002223 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2224 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002225 if (skb->ip_summed != CHECKSUM_PARTIAL)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002226 th->check =
2227 csum_fold(csum_partial(skb_transport_header(skb),
2228 thlen, skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002229
2230 seq += len;
2231 skb = skb->next;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002232 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002233
2234 th->seq = htonl(seq);
2235 th->cwr = 0;
2236 } while (skb->next);
2237
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07002238 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002239 skb->data_len);
Al Virod3bc23e2006-11-14 21:24:49 -08002240 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2241 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002242 if (skb->ip_summed != CHECKSUM_PARTIAL)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002243 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2244 thlen, skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002245
2246out:
2247 return segs;
2248}
Herbert Xuadcfc7d2006-06-30 13:36:15 -07002249EXPORT_SYMBOL(tcp_tso_segment);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002250
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002251#ifdef CONFIG_TCP_MD5SIG
2252static unsigned long tcp_md5sig_users;
2253static struct tcp_md5sig_pool **tcp_md5sig_pool;
2254static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2255
2256static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2257{
2258 int cpu;
2259 for_each_possible_cpu(cpu) {
2260 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2261 if (p) {
2262 if (p->md5_desc.tfm)
2263 crypto_free_hash(p->md5_desc.tfm);
2264 kfree(p);
2265 p = NULL;
2266 }
2267 }
2268 free_percpu(pool);
2269}
2270
2271void tcp_free_md5sig_pool(void)
2272{
2273 struct tcp_md5sig_pool **pool = NULL;
2274
David S. Miller2c4f6212007-02-20 23:51:47 -08002275 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002276 if (--tcp_md5sig_users == 0) {
2277 pool = tcp_md5sig_pool;
2278 tcp_md5sig_pool = NULL;
2279 }
David S. Miller2c4f6212007-02-20 23:51:47 -08002280 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002281 if (pool)
2282 __tcp_free_md5sig_pool(pool);
2283}
2284
2285EXPORT_SYMBOL(tcp_free_md5sig_pool);
2286
Adrian Bunkf5b99bc2006-11-30 17:22:29 -08002287static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002288{
2289 int cpu;
2290 struct tcp_md5sig_pool **pool;
2291
2292 pool = alloc_percpu(struct tcp_md5sig_pool *);
2293 if (!pool)
2294 return NULL;
2295
2296 for_each_possible_cpu(cpu) {
2297 struct tcp_md5sig_pool *p;
2298 struct crypto_hash *hash;
2299
2300 p = kzalloc(sizeof(*p), GFP_KERNEL);
2301 if (!p)
2302 goto out_free;
2303 *per_cpu_ptr(pool, cpu) = p;
2304
2305 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2306 if (!hash || IS_ERR(hash))
2307 goto out_free;
2308
2309 p->md5_desc.tfm = hash;
2310 }
2311 return pool;
2312out_free:
2313 __tcp_free_md5sig_pool(pool);
2314 return NULL;
2315}
2316
2317struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2318{
2319 struct tcp_md5sig_pool **pool;
2320 int alloc = 0;
2321
2322retry:
David S. Miller2c4f6212007-02-20 23:51:47 -08002323 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002324 pool = tcp_md5sig_pool;
2325 if (tcp_md5sig_users++ == 0) {
2326 alloc = 1;
David S. Miller2c4f6212007-02-20 23:51:47 -08002327 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002328 } else if (!pool) {
2329 tcp_md5sig_users--;
David S. Miller2c4f6212007-02-20 23:51:47 -08002330 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002331 cpu_relax();
2332 goto retry;
2333 } else
David S. Miller2c4f6212007-02-20 23:51:47 -08002334 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002335
2336 if (alloc) {
2337 /* we cannot hold spinlock here because this may sleep. */
2338 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
David S. Miller2c4f6212007-02-20 23:51:47 -08002339 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002340 if (!p) {
2341 tcp_md5sig_users--;
David S. Miller2c4f6212007-02-20 23:51:47 -08002342 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002343 return NULL;
2344 }
2345 pool = tcp_md5sig_pool;
2346 if (pool) {
2347 /* oops, it has already been assigned. */
David S. Miller2c4f6212007-02-20 23:51:47 -08002348 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002349 __tcp_free_md5sig_pool(p);
2350 } else {
2351 tcp_md5sig_pool = pool = p;
David S. Miller2c4f6212007-02-20 23:51:47 -08002352 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002353 }
2354 }
2355 return pool;
2356}
2357
2358EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2359
2360struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2361{
2362 struct tcp_md5sig_pool **p;
David S. Miller2c4f6212007-02-20 23:51:47 -08002363 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002364 p = tcp_md5sig_pool;
2365 if (p)
2366 tcp_md5sig_users++;
David S. Miller2c4f6212007-02-20 23:51:47 -08002367 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002368 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2369}
2370
2371EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2372
David S. Miller6931ba72006-12-13 16:25:44 -08002373void __tcp_put_md5sig_pool(void)
2374{
2375 tcp_free_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002376}
2377
2378EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2379#endif
2380
Andi Kleen4ac02ba2007-04-20 17:11:46 -07002381void tcp_done(struct sock *sk)
2382{
2383 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2384 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
2385
2386 tcp_set_state(sk, TCP_CLOSE);
2387 tcp_clear_xmit_timers(sk);
2388
2389 sk->sk_shutdown = SHUTDOWN_MASK;
2390
2391 if (!sock_flag(sk, SOCK_DEAD))
2392 sk->sk_state_change(sk);
2393 else
2394 inet_csk_destroy_sock(sk);
2395}
2396EXPORT_SYMBOL_GPL(tcp_done);
2397
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398extern void __skb_cb_too_small_for_tcp(int, int);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002399extern struct tcp_congestion_ops tcp_reno;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400
2401static __initdata unsigned long thash_entries;
2402static int __init set_thash_entries(char *str)
2403{
2404 if (!str)
2405 return 0;
2406 thash_entries = simple_strtoul(str, &str, 0);
2407 return 1;
2408}
2409__setup("thash_entries=", set_thash_entries);
2410
2411void __init tcp_init(void)
2412{
2413 struct sk_buff *skb = NULL;
John Heffner7b4f4b52006-03-25 01:34:07 -08002414 unsigned long limit;
2415 int order, i, max_share;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416
2417 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2418 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2419 sizeof(skb->cb));
2420
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002421 tcp_hashinfo.bind_bucket_cachep =
2422 kmem_cache_create("tcp_bind_bucket",
2423 sizeof(struct inet_bind_bucket), 0,
Alexey Dobriyane5d679f2006-08-26 19:25:52 -07002424 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 /* Size and allocate the main established and bind bucket
2427 * hash tables.
2428 *
2429 * The methodology is similar to that of the buffer cache.
2430 */
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002431 tcp_hashinfo.ehash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 alloc_large_system_hash("TCP established",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002433 sizeof(struct inet_ehash_bucket),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 thash_entries,
2435 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002436 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002437 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002438 &tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 NULL,
2440 0);
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002441 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2442 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002443 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2444 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002445 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 }
2447
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002448 tcp_hashinfo.bhash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 alloc_large_system_hash("TCP bind",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002450 sizeof(struct inet_bind_hashbucket),
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002451 tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002453 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002454 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002455 &tcp_hashinfo.bhash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 NULL,
2457 64 * 1024);
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002458 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2459 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2460 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2461 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 }
2463
2464 /* Try to be a bit smarter and adjust defaults depending
2465 * on available memory.
2466 */
2467 for (order = 0; ((1 << order) << PAGE_SHIFT) <
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002468 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 order++)
2470 ;
Andi Kleene7626482005-06-13 14:24:52 -07002471 if (order >= 4) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 sysctl_local_port_range[0] = 32768;
2473 sysctl_local_port_range[1] = 61000;
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002474 tcp_death_row.sysctl_max_tw_buckets = 180000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 sysctl_tcp_max_orphans = 4096 << (order - 4);
2476 sysctl_max_syn_backlog = 1024;
2477 } else if (order < 3) {
2478 sysctl_local_port_range[0] = 1024 * (3 - order);
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002479 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 sysctl_tcp_max_orphans >>= (3 - order);
2481 sysctl_max_syn_backlog = 128;
2482 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
John Heffner53cdcc02007-03-16 15:04:03 -07002484 /* Set the pressure threshold to be a fraction of global memory that
2485 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2486 * memory, with a floor of 128 pages.
2487 */
2488 limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2489 limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2490 limit = max(limit, 128UL);
2491 sysctl_tcp_mem[0] = limit / 4 * 3;
2492 sysctl_tcp_mem[1] = limit;
John Heffner52bf3762006-11-14 20:25:17 -08002493 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494
John Heffner53cdcc02007-03-16 15:04:03 -07002495 /* Set per-socket limits to no more than 1/128 the pressure threshold */
John Heffner7b4f4b52006-03-25 01:34:07 -08002496 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2497 max_share = min(4UL*1024*1024, limit);
2498
2499 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2500 sysctl_tcp_wmem[1] = 16*1024;
2501 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2502
2503 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2504 sysctl_tcp_rmem[1] = 87380;
2505 sysctl_tcp_rmem[2] = max(87380, max_share);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506
2507 printk(KERN_INFO "TCP: Hash tables configured "
2508 "(established %d bind %d)\n",
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002509 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07002510
2511 tcp_register_congestion_control(&tcp_reno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512}
2513
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514EXPORT_SYMBOL(tcp_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515EXPORT_SYMBOL(tcp_disconnect);
2516EXPORT_SYMBOL(tcp_getsockopt);
2517EXPORT_SYMBOL(tcp_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518EXPORT_SYMBOL(tcp_poll);
2519EXPORT_SYMBOL(tcp_read_sock);
2520EXPORT_SYMBOL(tcp_recvmsg);
2521EXPORT_SYMBOL(tcp_sendmsg);
2522EXPORT_SYMBOL(tcp_sendpage);
2523EXPORT_SYMBOL(tcp_setsockopt);
2524EXPORT_SYMBOL(tcp_shutdown);
2525EXPORT_SYMBOL(tcp_statistics);