blob: da4c0b6ab79ab5f25b5e9accddf592796c93c4ba [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250#include <linux/module.h>
251#include <linux/types.h>
252#include <linux/fcntl.h>
253#include <linux/poll.h>
254#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255#include <linux/fs.h>
256#include <linux/random.h>
257#include <linux/bootmem.h>
David S. Millerb8059ea2006-03-25 01:36:56 -0800258#include <linux/cache.h>
Herbert Xuf4c50d92006-06-22 03:02:40 -0700259#include <linux/err.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800260#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
262#include <net/icmp.h>
263#include <net/tcp.h>
264#include <net/xfrm.h>
265#include <net/ip.h>
Chris Leech1a2449a2006-05-23 18:05:53 -0700266#include <net/netdma.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268#include <asm/uaccess.h>
269#include <asm/ioctls.h>
270
Brian Haleyab32ea52006-09-22 14:15:41 -0700271int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Eric Dumazetba899662005-08-26 12:05:31 -0700273DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275atomic_t tcp_orphan_count = ATOMIC_INIT(0);
276
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -0700277EXPORT_SYMBOL_GPL(tcp_orphan_count);
278
David S. Millerb8059ea2006-03-25 01:36:56 -0800279int sysctl_tcp_mem[3] __read_mostly;
280int sysctl_tcp_wmem[3] __read_mostly;
281int sysctl_tcp_rmem[3] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
283EXPORT_SYMBOL(sysctl_tcp_mem);
284EXPORT_SYMBOL(sysctl_tcp_rmem);
285EXPORT_SYMBOL(sysctl_tcp_wmem);
286
287atomic_t tcp_memory_allocated; /* Current allocated memory. */
288atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
289
290EXPORT_SYMBOL(tcp_memory_allocated);
291EXPORT_SYMBOL(tcp_sockets_allocated);
292
293/*
294 * Pressure flag: try to collapse.
295 * Technical note: it is used by multiple contexts non atomically.
296 * All the sk_stream_mem_schedule() is of this nature: accounting
297 * is strict, actions are advisory and have some latency.
298 */
Eric Dumazet4103f8c2007-03-27 13:58:31 -0700299int tcp_memory_pressure __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
301EXPORT_SYMBOL(tcp_memory_pressure);
302
303void tcp_enter_memory_pressure(void)
304{
305 if (!tcp_memory_pressure) {
306 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
307 tcp_memory_pressure = 1;
308 }
309}
310
311EXPORT_SYMBOL(tcp_enter_memory_pressure);
312
313/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 * Wait for a TCP event.
315 *
316 * Note that we don't need to lock the socket, as the upper poll layers
317 * take care of normal races (between the test and the event) and we don't
318 * go look at any of the socket buffers directly.
319 */
320unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
321{
322 unsigned int mask;
323 struct sock *sk = sock->sk;
324 struct tcp_sock *tp = tcp_sk(sk);
325
326 poll_wait(file, sk->sk_sleep, wait);
327 if (sk->sk_state == TCP_LISTEN)
Arnaldo Carvalho de Melodc40c7b2005-08-23 21:52:58 -0700328 return inet_csk_listen_poll(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330 /* Socket is not locked. We are protected from async events
331 by poll logic and correct handling of state changes
332 made by another threads is impossible in any case.
333 */
334
335 mask = 0;
336 if (sk->sk_err)
337 mask = POLLERR;
338
339 /*
340 * POLLHUP is certainly not done right. But poll() doesn't
341 * have a notion of HUP in just one direction, and for a
342 * socket the read side is more interesting.
343 *
344 * Some poll() documentation says that POLLHUP is incompatible
345 * with the POLLOUT/POLLWR flags, so somebody should check this
346 * all. But careful, it tends to be safer to return too many
347 * bits than too few, and you can easily break real applications
348 * if you don't tell them that something has hung up!
349 *
350 * Check-me.
351 *
352 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
353 * our fs/select.c). It means that after we received EOF,
354 * poll always returns immediately, making impossible poll() on write()
355 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
356 * if and only if shutdown has been made in both directions.
357 * Actually, it is interesting to look how Solaris and DUX
358 * solve this dilemma. I would prefer, if PULLHUP were maskable,
359 * then we could set it on SND_SHUTDOWN. BTW examples given
360 * in Stevens' books assume exactly this behaviour, it explains
361 * why PULLHUP is incompatible with POLLOUT. --ANK
362 *
363 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
364 * blocking on fresh not-connected or disconnected socket. --ANK
365 */
366 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
367 mask |= POLLHUP;
368 if (sk->sk_shutdown & RCV_SHUTDOWN)
Davide Libenzif348d702006-03-25 03:07:39 -0800369 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371 /* Connected? */
372 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
373 /* Potential race condition. If read of tp below will
374 * escape above sk->sk_state, we can be illegally awaken
375 * in SYN_* states. */
376 if ((tp->rcv_nxt != tp->copied_seq) &&
377 (tp->urg_seq != tp->copied_seq ||
378 tp->rcv_nxt != tp->copied_seq + 1 ||
379 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
380 mask |= POLLIN | POLLRDNORM;
381
382 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
383 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
384 mask |= POLLOUT | POLLWRNORM;
385 } else { /* send SIGIO later */
386 set_bit(SOCK_ASYNC_NOSPACE,
387 &sk->sk_socket->flags);
388 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
389
390 /* Race breaker. If space is freed after
391 * wspace test but before the flags are set,
392 * IO signal will be lost.
393 */
394 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
395 mask |= POLLOUT | POLLWRNORM;
396 }
397 }
398
399 if (tp->urg_data & TCP_URG_VALID)
400 mask |= POLLPRI;
401 }
402 return mask;
403}
404
405int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
406{
407 struct tcp_sock *tp = tcp_sk(sk);
408 int answ;
409
410 switch (cmd) {
411 case SIOCINQ:
412 if (sk->sk_state == TCP_LISTEN)
413 return -EINVAL;
414
415 lock_sock(sk);
416 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
417 answ = 0;
418 else if (sock_flag(sk, SOCK_URGINLINE) ||
419 !tp->urg_data ||
420 before(tp->urg_seq, tp->copied_seq) ||
421 !before(tp->urg_seq, tp->rcv_nxt)) {
422 answ = tp->rcv_nxt - tp->copied_seq;
423
424 /* Subtract 1, if FIN is in queue. */
425 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
426 answ -=
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700427 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 } else
429 answ = tp->urg_seq - tp->copied_seq;
430 release_sock(sk);
431 break;
432 case SIOCATMARK:
433 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
434 break;
435 case SIOCOUTQ:
436 if (sk->sk_state == TCP_LISTEN)
437 return -EINVAL;
438
439 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
440 answ = 0;
441 else
442 answ = tp->write_seq - tp->snd_una;
443 break;
444 default:
445 return -ENOIOCTLCMD;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 return put_user(answ, (int __user *)arg);
449}
450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
452{
453 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
454 tp->pushed_seq = tp->write_seq;
455}
456
457static inline int forced_push(struct tcp_sock *tp)
458{
459 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
460}
461
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700462static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700464 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo352d4802006-11-17 19:59:12 -0200465 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
466
467 skb->csum = 0;
468 tcb->seq = tcb->end_seq = tp->write_seq;
469 tcb->flags = TCPCB_FLAG_ACK;
470 tcb->sacked = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 skb_header_release(skb);
David S. Millerfe067e82007-03-07 12:12:44 -0800472 tcp_add_write_queue_tail(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 sk_charge_skb(sk, skb);
David S. Miller89ebd192005-08-23 10:13:06 -0700474 if (tp->nonagle & TCP_NAGLE_PUSH)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900475 tp->nonagle &= ~TCP_NAGLE_PUSH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476}
477
478static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
479 struct sk_buff *skb)
480{
481 if (flags & MSG_OOB) {
482 tp->urg_mode = 1;
483 tp->snd_up = tp->write_seq;
484 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
485 }
486}
487
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700488static inline void tcp_push(struct sock *sk, int flags, int mss_now,
489 int nonagle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700491 struct tcp_sock *tp = tcp_sk(sk);
492
David S. Millerfe067e82007-03-07 12:12:44 -0800493 if (tcp_send_head(sk)) {
494 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 if (!(flags & MSG_MORE) || forced_push(tp))
496 tcp_mark_push(tp, skb);
497 tcp_mark_urg(tp, flags, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700498 __tcp_push_pending_frames(sk, mss_now,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
500 }
501}
502
503static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
504 size_t psize, int flags)
505{
506 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700507 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 int err;
509 ssize_t copied;
510 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
511
512 /* Wait for a connection to finish. */
513 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
514 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
515 goto out_err;
516
517 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
518
519 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700520 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 copied = 0;
522
523 err = -EPIPE;
524 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
525 goto do_error;
526
527 while (psize > 0) {
David S. Millerfe067e82007-03-07 12:12:44 -0800528 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 struct page *page = pages[poffset / PAGE_SIZE];
530 int copy, i, can_coalesce;
531 int offset = poffset % PAGE_SIZE;
532 int size = min_t(size_t, psize, PAGE_SIZE - offset);
533
David S. Millerfe067e82007-03-07 12:12:44 -0800534 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535new_segment:
536 if (!sk_stream_memory_free(sk))
537 goto wait_for_sndbuf;
538
539 skb = sk_stream_alloc_pskb(sk, 0, 0,
540 sk->sk_allocation);
541 if (!skb)
542 goto wait_for_memory;
543
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700544 skb_entail(sk, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700545 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 }
547
548 if (copy > size)
549 copy = size;
550
551 i = skb_shinfo(skb)->nr_frags;
552 can_coalesce = skb_can_coalesce(skb, i, page, offset);
553 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
554 tcp_mark_push(tp, skb);
555 goto new_segment;
556 }
Herbert Xud80d99d2005-09-01 17:48:23 -0700557 if (!sk_stream_wmem_schedule(sk, copy))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 goto wait_for_memory;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 if (can_coalesce) {
561 skb_shinfo(skb)->frags[i - 1].size += copy;
562 } else {
563 get_page(page);
564 skb_fill_page_desc(skb, i, page, offset, copy);
565 }
566
567 skb->len += copy;
568 skb->data_len += copy;
569 skb->truesize += copy;
570 sk->sk_wmem_queued += copy;
571 sk->sk_forward_alloc -= copy;
Patrick McHardy84fa7932006-08-29 16:44:56 -0700572 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 tp->write_seq += copy;
574 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700575 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577 if (!copied)
578 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
579
580 copied += copy;
581 poffset += copy;
582 if (!(psize -= copy))
583 goto out;
584
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700585 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 continue;
587
588 if (forced_push(tp)) {
589 tcp_mark_push(tp, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700590 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
David S. Millerfe067e82007-03-07 12:12:44 -0800591 } else if (skb == tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 tcp_push_one(sk, mss_now);
593 continue;
594
595wait_for_sndbuf:
596 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
597wait_for_memory:
598 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700599 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
602 goto do_error;
603
604 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700605 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 }
607
608out:
609 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700610 tcp_push(sk, flags, mss_now, tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 return copied;
612
613do_error:
614 if (copied)
615 goto out;
616out_err:
617 return sk_stream_error(sk, flags, err);
618}
619
620ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
621 size_t size, int flags)
622{
623 ssize_t res;
624 struct sock *sk = sock->sk;
625
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 if (!(sk->sk_route_caps & NETIF_F_SG) ||
Herbert Xu8648b302006-06-17 22:06:05 -0700627 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 return sock_no_sendpage(sock, page, offset, size, flags);
629
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 lock_sock(sk);
631 TCP_CHECK_TIMER(sk);
632 res = do_tcp_sendpages(sk, &page, offset, size, flags);
633 TCP_CHECK_TIMER(sk);
634 release_sock(sk);
635 return res;
636}
637
638#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
639#define TCP_OFF(sk) (sk->sk_sndmsg_off)
640
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700641static inline int select_size(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700643 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700644 int tmp = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
David S. Millerb4e26f52005-07-05 15:20:27 -0700646 if (sk->sk_route_caps & NETIF_F_SG) {
Herbert Xubcd76112006-06-30 13:36:35 -0700647 if (sk_can_gso(sk))
David S. Millerb4e26f52005-07-05 15:20:27 -0700648 tmp = 0;
649 else {
650 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
651
652 if (tmp >= pgbreak &&
653 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
654 tmp = pgbreak;
655 }
656 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 return tmp;
659}
660
661int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
662 size_t size)
663{
664 struct iovec *iov;
665 struct tcp_sock *tp = tcp_sk(sk);
666 struct sk_buff *skb;
667 int iovlen, flags;
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700668 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 int err, copied;
670 long timeo;
671
672 lock_sock(sk);
673 TCP_CHECK_TIMER(sk);
674
675 flags = msg->msg_flags;
676 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
677
678 /* Wait for a connection to finish. */
679 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
680 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
681 goto out_err;
682
683 /* This should be in poll */
684 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
685
686 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700687 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689 /* Ok commence sending. */
690 iovlen = msg->msg_iovlen;
691 iov = msg->msg_iov;
692 copied = 0;
693
694 err = -EPIPE;
695 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
696 goto do_error;
697
698 while (--iovlen >= 0) {
699 int seglen = iov->iov_len;
700 unsigned char __user *from = iov->iov_base;
701
702 iov++;
703
704 while (seglen > 0) {
705 int copy;
706
David S. Millerfe067e82007-03-07 12:12:44 -0800707 skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
David S. Millerfe067e82007-03-07 12:12:44 -0800709 if (!tcp_send_head(sk) ||
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700710 (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712new_segment:
713 /* Allocate new segment. If the interface is SG,
714 * allocate skb fitting to single page.
715 */
716 if (!sk_stream_memory_free(sk))
717 goto wait_for_sndbuf;
718
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700719 skb = sk_stream_alloc_pskb(sk, select_size(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 0, sk->sk_allocation);
721 if (!skb)
722 goto wait_for_memory;
723
724 /*
725 * Check whether we can use HW checksum.
726 */
Herbert Xu8648b302006-06-17 22:06:05 -0700727 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700728 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700730 skb_entail(sk, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700731 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 }
733
734 /* Try to append data to the end of skb. */
735 if (copy > seglen)
736 copy = seglen;
737
738 /* Where to copy to? */
739 if (skb_tailroom(skb) > 0) {
740 /* We have some space in skb head. Superb! */
741 if (copy > skb_tailroom(skb))
742 copy = skb_tailroom(skb);
743 if ((err = skb_add_data(skb, from, copy)) != 0)
744 goto do_fault;
745 } else {
746 int merge = 0;
747 int i = skb_shinfo(skb)->nr_frags;
748 struct page *page = TCP_PAGE(sk);
749 int off = TCP_OFF(sk);
750
751 if (skb_can_coalesce(skb, i, page, off) &&
752 off != PAGE_SIZE) {
753 /* We can extend the last page
754 * fragment. */
755 merge = 1;
756 } else if (i == MAX_SKB_FRAGS ||
757 (!i &&
758 !(sk->sk_route_caps & NETIF_F_SG))) {
759 /* Need to add new fragment and cannot
760 * do this because interface is non-SG,
761 * or because all the page slots are
762 * busy. */
763 tcp_mark_push(tp, skb);
764 goto new_segment;
765 } else if (page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 if (off == PAGE_SIZE) {
767 put_page(page);
768 TCP_PAGE(sk) = page = NULL;
Herbert Xufb5f5e62005-09-05 18:55:48 -0700769 off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 }
Herbert Xuef015782005-09-01 17:48:59 -0700771 } else
Herbert Xufb5f5e62005-09-05 18:55:48 -0700772 off = 0;
Herbert Xuef015782005-09-01 17:48:59 -0700773
774 if (copy > PAGE_SIZE - off)
775 copy = PAGE_SIZE - off;
776
777 if (!sk_stream_wmem_schedule(sk, copy))
778 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 if (!page) {
781 /* Allocate new cache page. */
782 if (!(page = sk_stream_alloc_page(sk)))
783 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 }
785
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 /* Time to copy data. We are close to
787 * the end! */
788 err = skb_copy_to_page(sk, from, skb, page,
789 off, copy);
790 if (err) {
791 /* If this page was new, give it to the
792 * socket so it does not get leaked.
793 */
794 if (!TCP_PAGE(sk)) {
795 TCP_PAGE(sk) = page;
796 TCP_OFF(sk) = 0;
797 }
798 goto do_error;
799 }
800
801 /* Update the skb. */
802 if (merge) {
803 skb_shinfo(skb)->frags[i - 1].size +=
804 copy;
805 } else {
806 skb_fill_page_desc(skb, i, page, off, copy);
807 if (TCP_PAGE(sk)) {
808 get_page(page);
809 } else if (off + copy < PAGE_SIZE) {
810 get_page(page);
811 TCP_PAGE(sk) = page;
812 }
813 }
814
815 TCP_OFF(sk) = off + copy;
816 }
817
818 if (!copied)
819 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
820
821 tp->write_seq += copy;
822 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700823 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
825 from += copy;
826 copied += copy;
827 if ((seglen -= copy) == 0 && iovlen == 0)
828 goto out;
829
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700830 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 continue;
832
833 if (forced_push(tp)) {
834 tcp_mark_push(tp, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700835 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
David S. Millerfe067e82007-03-07 12:12:44 -0800836 } else if (skb == tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 tcp_push_one(sk, mss_now);
838 continue;
839
840wait_for_sndbuf:
841 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
842wait_for_memory:
843 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700844 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
846 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
847 goto do_error;
848
849 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700850 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 }
852 }
853
854out:
855 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700856 tcp_push(sk, flags, mss_now, tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 TCP_CHECK_TIMER(sk);
858 release_sock(sk);
859 return copied;
860
861do_fault:
862 if (!skb->len) {
David S. Millerfe067e82007-03-07 12:12:44 -0800863 tcp_unlink_write_queue(skb, sk);
864 /* It is the one place in all of TCP, except connection
865 * reset, where we can be unlinking the send_head.
866 */
867 tcp_check_send_head(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 sk_stream_free_skb(sk, skb);
869 }
870
871do_error:
872 if (copied)
873 goto out;
874out_err:
875 err = sk_stream_error(sk, flags, err);
876 TCP_CHECK_TIMER(sk);
877 release_sock(sk);
878 return err;
879}
880
881/*
882 * Handle reading urgent data. BSD has very simple semantics for
883 * this, no blocking and very strange errors 8)
884 */
885
886static int tcp_recv_urg(struct sock *sk, long timeo,
887 struct msghdr *msg, int len, int flags,
888 int *addr_len)
889{
890 struct tcp_sock *tp = tcp_sk(sk);
891
892 /* No URG data to read. */
893 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
894 tp->urg_data == TCP_URG_READ)
895 return -EINVAL; /* Yes this is right ! */
896
897 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
898 return -ENOTCONN;
899
900 if (tp->urg_data & TCP_URG_VALID) {
901 int err = 0;
902 char c = tp->urg_data;
903
904 if (!(flags & MSG_PEEK))
905 tp->urg_data = TCP_URG_READ;
906
907 /* Read urgent data. */
908 msg->msg_flags |= MSG_OOB;
909
910 if (len > 0) {
911 if (!(flags & MSG_TRUNC))
912 err = memcpy_toiovec(msg->msg_iov, &c, 1);
913 len = 1;
914 } else
915 msg->msg_flags |= MSG_TRUNC;
916
917 return err ? -EFAULT : len;
918 }
919
920 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
921 return 0;
922
923 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
924 * the available implementations agree in this case:
925 * this call should never block, independent of the
926 * blocking state of the socket.
927 * Mike <pall@rz.uni-karlsruhe.de>
928 */
929 return -EAGAIN;
930}
931
932/* Clean up the receive buffer for full frames taken by the user,
933 * then send an ACK if necessary. COPIED is the number of bytes
934 * tcp_recvmsg has given to the user so far, it speeds up the
935 * calculation of whether or not we must ACK for the sake of
936 * a window update.
937 */
Chris Leech0e4b4992006-05-23 18:00:16 -0700938void tcp_cleanup_rbuf(struct sock *sk, int copied)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939{
940 struct tcp_sock *tp = tcp_sk(sk);
941 int time_to_ack = 0;
942
943#if TCP_DEBUG
944 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
945
946 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
947#endif
948
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700949 if (inet_csk_ack_scheduled(sk)) {
950 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 /* Delayed ACKs frequently hit locked sockets during bulk
952 * receive. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700953 if (icsk->icsk_ack.blocked ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 /* Once-per-two-segments ACK was not sent by tcp_input.c */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700955 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 /*
957 * If this read emptied read buffer, we send ACK, if
958 * connection is not bidirectional, user drained
959 * receive buffer and there was a small segment
960 * in queue.
961 */
Alexey Kuznetsov1ef96962006-09-19 12:52:50 -0700962 (copied > 0 &&
963 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
964 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
965 !icsk->icsk_ack.pingpong)) &&
966 !atomic_read(&sk->sk_rmem_alloc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 time_to_ack = 1;
968 }
969
970 /* We send an ACK if we can now advertise a non-zero window
971 * which has been raised "significantly".
972 *
973 * Even if window raised up to infinity, do not send window open ACK
974 * in states, where we will not receive more. It is useless.
975 */
976 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
977 __u32 rcv_window_now = tcp_receive_window(tp);
978
979 /* Optimize, __tcp_select_window() is not cheap. */
980 if (2*rcv_window_now <= tp->window_clamp) {
981 __u32 new_window = __tcp_select_window(sk);
982
983 /* Send ACK now, if this read freed lots of space
984 * in our buffer. Certainly, new_window is new window.
985 * We can advertise it now, if it is not less than current one.
986 * "Lots" means "at least twice" here.
987 */
988 if (new_window && new_window >= 2 * rcv_window_now)
989 time_to_ack = 1;
990 }
991 }
992 if (time_to_ack)
993 tcp_send_ack(sk);
994}
995
996static void tcp_prequeue_process(struct sock *sk)
997{
998 struct sk_buff *skb;
999 struct tcp_sock *tp = tcp_sk(sk);
1000
David S. Millerb03efcf2005-07-08 14:57:23 -07001001 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
1003 /* RX process wants to run with disabled BHs, though it is not
1004 * necessary */
1005 local_bh_disable();
1006 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1007 sk->sk_backlog_rcv(sk, skb);
1008 local_bh_enable();
1009
1010 /* Clear memory counter. */
1011 tp->ucopy.memory = 0;
1012}
1013
1014static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1015{
1016 struct sk_buff *skb;
1017 u32 offset;
1018
1019 skb_queue_walk(&sk->sk_receive_queue, skb) {
1020 offset = seq - TCP_SKB_CB(skb)->seq;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001021 if (tcp_hdr(skb)->syn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 offset--;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001023 if (offset < skb->len || tcp_hdr(skb)->fin) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 *off = offset;
1025 return skb;
1026 }
1027 }
1028 return NULL;
1029}
1030
1031/*
1032 * This routine provides an alternative to tcp_recvmsg() for routines
1033 * that would like to handle copying from skbuffs directly in 'sendfile'
1034 * fashion.
1035 * Note:
1036 * - It is assumed that the socket was locked by the caller.
1037 * - The routine does not block.
1038 * - At present, there is no support for reading OOB data
1039 * or for 'peeking' the socket using this routine
1040 * (although both would be easy to implement).
1041 */
1042int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1043 sk_read_actor_t recv_actor)
1044{
1045 struct sk_buff *skb;
1046 struct tcp_sock *tp = tcp_sk(sk);
1047 u32 seq = tp->copied_seq;
1048 u32 offset;
1049 int copied = 0;
1050
1051 if (sk->sk_state == TCP_LISTEN)
1052 return -ENOTCONN;
1053 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1054 if (offset < skb->len) {
1055 size_t used, len;
1056
1057 len = skb->len - offset;
1058 /* Stop reading if we hit a patch of urgent data */
1059 if (tp->urg_data) {
1060 u32 urg_offset = tp->urg_seq - seq;
1061 if (urg_offset < len)
1062 len = urg_offset;
1063 if (!len)
1064 break;
1065 }
1066 used = recv_actor(desc, skb, offset, len);
Jens Axboeddb61a52007-06-23 23:07:50 -07001067 if (used < 0) {
1068 if (!copied)
1069 copied = used;
1070 break;
1071 } else if (used <= len) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 seq += used;
1073 copied += used;
1074 offset += used;
1075 }
1076 if (offset != skb->len)
1077 break;
1078 }
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001079 if (tcp_hdr(skb)->fin) {
Chris Leech624d1162006-05-23 18:01:28 -07001080 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 ++seq;
1082 break;
1083 }
Chris Leech624d1162006-05-23 18:01:28 -07001084 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 if (!desc->count)
1086 break;
1087 }
1088 tp->copied_seq = seq;
1089
1090 tcp_rcv_space_adjust(sk);
1091
1092 /* Clean up data we have read: This will do ACK frames. */
Jens Axboeddb61a52007-06-23 23:07:50 -07001093 if (copied > 0)
Chris Leech0e4b4992006-05-23 18:00:16 -07001094 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 return copied;
1096}
1097
1098/*
1099 * This routine copies from a sock struct into the user buffer.
1100 *
1101 * Technical note: in 2.3 we work on _locked_ socket, so that
1102 * tricks with *seq access order and skb->users are not required.
1103 * Probably, code can be easily improved even more.
1104 */
1105
1106int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1107 size_t len, int nonblock, int flags, int *addr_len)
1108{
1109 struct tcp_sock *tp = tcp_sk(sk);
1110 int copied = 0;
1111 u32 peek_seq;
1112 u32 *seq;
1113 unsigned long used;
1114 int err;
1115 int target; /* Read at least this many bytes */
1116 long timeo;
1117 struct task_struct *user_recv = NULL;
Chris Leech1a2449a2006-05-23 18:05:53 -07001118 int copied_early = 0;
Chris Leech2b1244a2007-03-08 09:57:36 -08001119 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
1121 lock_sock(sk);
1122
1123 TCP_CHECK_TIMER(sk);
1124
1125 err = -ENOTCONN;
1126 if (sk->sk_state == TCP_LISTEN)
1127 goto out;
1128
1129 timeo = sock_rcvtimeo(sk, nonblock);
1130
1131 /* Urgent data needs to be handled specially. */
1132 if (flags & MSG_OOB)
1133 goto recv_urg;
1134
1135 seq = &tp->copied_seq;
1136 if (flags & MSG_PEEK) {
1137 peek_seq = tp->copied_seq;
1138 seq = &peek_seq;
1139 }
1140
1141 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1142
Chris Leech1a2449a2006-05-23 18:05:53 -07001143#ifdef CONFIG_NET_DMA
1144 tp->ucopy.dma_chan = NULL;
1145 preempt_disable();
Chris Leech2b1244a2007-03-08 09:57:36 -08001146 skb = skb_peek_tail(&sk->sk_receive_queue);
Andrew Mortone00c5d82007-03-08 09:57:36 -08001147 {
1148 int available = 0;
1149
1150 if (skb)
1151 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1152 if ((available < target) &&
1153 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1154 !sysctl_tcp_low_latency &&
1155 __get_cpu_var(softnet_data).net_dma) {
1156 preempt_enable_no_resched();
1157 tp->ucopy.pinned_list =
1158 dma_pin_iovec_pages(msg->msg_iov, len);
1159 } else {
1160 preempt_enable_no_resched();
1161 }
1162 }
Chris Leech1a2449a2006-05-23 18:05:53 -07001163#endif
1164
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 u32 offset;
1167
1168 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1169 if (tp->urg_data && tp->urg_seq == *seq) {
1170 if (copied)
1171 break;
1172 if (signal_pending(current)) {
1173 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1174 break;
1175 }
1176 }
1177
1178 /* Next get a buffer. */
1179
1180 skb = skb_peek(&sk->sk_receive_queue);
1181 do {
1182 if (!skb)
1183 break;
1184
1185 /* Now that we have two receive queues this
1186 * shouldn't happen.
1187 */
1188 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1189 printk(KERN_INFO "recvmsg bug: copied %X "
1190 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1191 break;
1192 }
1193 offset = *seq - TCP_SKB_CB(skb)->seq;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001194 if (tcp_hdr(skb)->syn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 offset--;
1196 if (offset < skb->len)
1197 goto found_ok_skb;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001198 if (tcp_hdr(skb)->fin)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 goto found_fin_ok;
1200 BUG_TRAP(flags & MSG_PEEK);
1201 skb = skb->next;
1202 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1203
1204 /* Well, if we have backlog, try to process it now yet. */
1205
1206 if (copied >= target && !sk->sk_backlog.tail)
1207 break;
1208
1209 if (copied) {
1210 if (sk->sk_err ||
1211 sk->sk_state == TCP_CLOSE ||
1212 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1213 !timeo ||
1214 signal_pending(current) ||
1215 (flags & MSG_PEEK))
1216 break;
1217 } else {
1218 if (sock_flag(sk, SOCK_DONE))
1219 break;
1220
1221 if (sk->sk_err) {
1222 copied = sock_error(sk);
1223 break;
1224 }
1225
1226 if (sk->sk_shutdown & RCV_SHUTDOWN)
1227 break;
1228
1229 if (sk->sk_state == TCP_CLOSE) {
1230 if (!sock_flag(sk, SOCK_DONE)) {
1231 /* This occurs when user tries to read
1232 * from never connected socket.
1233 */
1234 copied = -ENOTCONN;
1235 break;
1236 }
1237 break;
1238 }
1239
1240 if (!timeo) {
1241 copied = -EAGAIN;
1242 break;
1243 }
1244
1245 if (signal_pending(current)) {
1246 copied = sock_intr_errno(timeo);
1247 break;
1248 }
1249 }
1250
Chris Leech0e4b4992006-05-23 18:00:16 -07001251 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
David S. Miller7df55122005-06-18 23:01:10 -07001253 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 /* Install new reader */
1255 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1256 user_recv = current;
1257 tp->ucopy.task = user_recv;
1258 tp->ucopy.iov = msg->msg_iov;
1259 }
1260
1261 tp->ucopy.len = len;
1262
1263 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1264 (flags & (MSG_PEEK | MSG_TRUNC)));
1265
1266 /* Ugly... If prequeue is not empty, we have to
1267 * process it before releasing socket, otherwise
1268 * order will be broken at second iteration.
1269 * More elegant solution is required!!!
1270 *
1271 * Look: we have the following (pseudo)queues:
1272 *
1273 * 1. packets in flight
1274 * 2. backlog
1275 * 3. prequeue
1276 * 4. receive_queue
1277 *
1278 * Each queue can be processed only if the next ones
1279 * are empty. At this point we have empty receive_queue.
1280 * But prequeue _can_ be not empty after 2nd iteration,
1281 * when we jumped to start of loop because backlog
1282 * processing added something to receive_queue.
1283 * We cannot release_sock(), because backlog contains
1284 * packets arrived _after_ prequeued ones.
1285 *
1286 * Shortly, algorithm is clear --- to process all
1287 * the queues in order. We could make it more directly,
1288 * requeueing packets from backlog to prequeue, if
1289 * is not empty. It is more elegant, but eats cycles,
1290 * unfortunately.
1291 */
David S. Millerb03efcf2005-07-08 14:57:23 -07001292 if (!skb_queue_empty(&tp->ucopy.prequeue))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 goto do_prequeue;
1294
1295 /* __ Set realtime policy in scheduler __ */
1296 }
1297
1298 if (copied >= target) {
1299 /* Do not sleep, just process backlog. */
1300 release_sock(sk);
1301 lock_sock(sk);
1302 } else
1303 sk_wait_data(sk, &timeo);
1304
Chris Leech1a2449a2006-05-23 18:05:53 -07001305#ifdef CONFIG_NET_DMA
1306 tp->ucopy.wakeup = 0;
1307#endif
1308
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 if (user_recv) {
1310 int chunk;
1311
1312 /* __ Restore normal policy in scheduler __ */
1313
1314 if ((chunk = len - tp->ucopy.len) != 0) {
1315 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1316 len -= chunk;
1317 copied += chunk;
1318 }
1319
1320 if (tp->rcv_nxt == tp->copied_seq &&
David S. Millerb03efcf2005-07-08 14:57:23 -07001321 !skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322do_prequeue:
1323 tcp_prequeue_process(sk);
1324
1325 if ((chunk = len - tp->ucopy.len) != 0) {
1326 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1327 len -= chunk;
1328 copied += chunk;
1329 }
1330 }
1331 }
1332 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1333 if (net_ratelimit())
1334 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1335 current->comm, current->pid);
1336 peek_seq = tp->copied_seq;
1337 }
1338 continue;
1339
1340 found_ok_skb:
1341 /* Ok so how much can we use? */
1342 used = skb->len - offset;
1343 if (len < used)
1344 used = len;
1345
1346 /* Do we have urgent data here? */
1347 if (tp->urg_data) {
1348 u32 urg_offset = tp->urg_seq - *seq;
1349 if (urg_offset < used) {
1350 if (!urg_offset) {
1351 if (!sock_flag(sk, SOCK_URGINLINE)) {
1352 ++*seq;
1353 offset++;
1354 used--;
1355 if (!used)
1356 goto skip_copy;
1357 }
1358 } else
1359 used = urg_offset;
1360 }
1361 }
1362
1363 if (!(flags & MSG_TRUNC)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001364#ifdef CONFIG_NET_DMA
1365 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1366 tp->ucopy.dma_chan = get_softnet_dma();
1367
1368 if (tp->ucopy.dma_chan) {
1369 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1370 tp->ucopy.dma_chan, skb, offset,
1371 msg->msg_iov, used,
1372 tp->ucopy.pinned_list);
1373
1374 if (tp->ucopy.dma_cookie < 0) {
1375
1376 printk(KERN_ALERT "dma_cookie < 0\n");
1377
1378 /* Exception. Bailout! */
1379 if (!copied)
1380 copied = -EFAULT;
1381 break;
1382 }
1383 if ((offset + used) == skb->len)
1384 copied_early = 1;
1385
1386 } else
1387#endif
1388 {
1389 err = skb_copy_datagram_iovec(skb, offset,
1390 msg->msg_iov, used);
1391 if (err) {
1392 /* Exception. Bailout! */
1393 if (!copied)
1394 copied = -EFAULT;
1395 break;
1396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 }
1398 }
1399
1400 *seq += used;
1401 copied += used;
1402 len -= used;
1403
1404 tcp_rcv_space_adjust(sk);
1405
1406skip_copy:
1407 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1408 tp->urg_data = 0;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001409 tcp_fast_path_check(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 }
1411 if (used + offset < skb->len)
1412 continue;
1413
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001414 if (tcp_hdr(skb)->fin)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 goto found_fin_ok;
Chris Leech1a2449a2006-05-23 18:05:53 -07001416 if (!(flags & MSG_PEEK)) {
1417 sk_eat_skb(sk, skb, copied_early);
1418 copied_early = 0;
1419 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 continue;
1421
1422 found_fin_ok:
1423 /* Process the FIN. */
1424 ++*seq;
Chris Leech1a2449a2006-05-23 18:05:53 -07001425 if (!(flags & MSG_PEEK)) {
1426 sk_eat_skb(sk, skb, copied_early);
1427 copied_early = 0;
1428 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 break;
1430 } while (len > 0);
1431
1432 if (user_recv) {
David S. Millerb03efcf2005-07-08 14:57:23 -07001433 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 int chunk;
1435
1436 tp->ucopy.len = copied > 0 ? len : 0;
1437
1438 tcp_prequeue_process(sk);
1439
1440 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1441 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1442 len -= chunk;
1443 copied += chunk;
1444 }
1445 }
1446
1447 tp->ucopy.task = NULL;
1448 tp->ucopy.len = 0;
1449 }
1450
Chris Leech1a2449a2006-05-23 18:05:53 -07001451#ifdef CONFIG_NET_DMA
1452 if (tp->ucopy.dma_chan) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001453 dma_cookie_t done, used;
1454
1455 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1456
1457 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001458 tp->ucopy.dma_cookie, &done,
1459 &used) == DMA_IN_PROGRESS) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001460 /* do partial cleanup of sk_async_wait_queue */
1461 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1462 (dma_async_is_complete(skb->dma_cookie, done,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001463 used) == DMA_SUCCESS)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001464 __skb_dequeue(&sk->sk_async_wait_queue);
1465 kfree_skb(skb);
1466 }
1467 }
1468
1469 /* Safe to free early-copied skbs now */
1470 __skb_queue_purge(&sk->sk_async_wait_queue);
1471 dma_chan_put(tp->ucopy.dma_chan);
1472 tp->ucopy.dma_chan = NULL;
1473 }
1474 if (tp->ucopy.pinned_list) {
1475 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1476 tp->ucopy.pinned_list = NULL;
1477 }
1478#endif
1479
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 /* According to UNIX98, msg_name/msg_namelen are ignored
1481 * on connected socket. I was just happy when found this 8) --ANK
1482 */
1483
1484 /* Clean up data we have read: This will do ACK frames. */
Chris Leech0e4b4992006-05-23 18:00:16 -07001485 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486
1487 TCP_CHECK_TIMER(sk);
1488 release_sock(sk);
1489 return copied;
1490
1491out:
1492 TCP_CHECK_TIMER(sk);
1493 release_sock(sk);
1494 return err;
1495
1496recv_urg:
1497 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1498 goto out;
1499}
1500
1501/*
1502 * State processing on a close. This implements the state shift for
1503 * sending our FIN frame. Note that we only send a FIN for some
1504 * states. A shutdown() may have already sent the FIN, or we may be
1505 * closed.
1506 */
1507
Arjan van de Ven9b5b5cf2005-11-29 16:21:38 -08001508static const unsigned char new_state[16] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 /* current state: new state: action: */
1510 /* (Invalid) */ TCP_CLOSE,
1511 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1512 /* TCP_SYN_SENT */ TCP_CLOSE,
1513 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1514 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1515 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1516 /* TCP_TIME_WAIT */ TCP_CLOSE,
1517 /* TCP_CLOSE */ TCP_CLOSE,
1518 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1519 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1520 /* TCP_LISTEN */ TCP_CLOSE,
1521 /* TCP_CLOSING */ TCP_CLOSING,
1522};
1523
1524static int tcp_close_state(struct sock *sk)
1525{
1526 int next = (int)new_state[sk->sk_state];
1527 int ns = next & TCP_STATE_MASK;
1528
1529 tcp_set_state(sk, ns);
1530
1531 return next & TCP_ACTION_FIN;
1532}
1533
1534/*
1535 * Shutdown the sending side of a connection. Much like close except
1536 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1537 */
1538
1539void tcp_shutdown(struct sock *sk, int how)
1540{
1541 /* We need to grab some memory, and put together a FIN,
1542 * and then put it into the queue to be sent.
1543 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1544 */
1545 if (!(how & SEND_SHUTDOWN))
1546 return;
1547
1548 /* If we've already sent a FIN, or it's a closed state, skip this. */
1549 if ((1 << sk->sk_state) &
1550 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1551 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1552 /* Clear out any half completed packets. FIN if needed. */
1553 if (tcp_close_state(sk))
1554 tcp_send_fin(sk);
1555 }
1556}
1557
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558void tcp_close(struct sock *sk, long timeout)
1559{
1560 struct sk_buff *skb;
1561 int data_was_unread = 0;
Herbert Xu75c2d9072006-05-03 23:31:35 -07001562 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
1564 lock_sock(sk);
1565 sk->sk_shutdown = SHUTDOWN_MASK;
1566
1567 if (sk->sk_state == TCP_LISTEN) {
1568 tcp_set_state(sk, TCP_CLOSE);
1569
1570 /* Special case. */
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001571 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572
1573 goto adjudge_to_death;
1574 }
1575
1576 /* We need to flush the recv. buffs. We do this only on the
1577 * descriptor close, not protocol-sourced closes, because the
1578 * reader process may not have drained the data yet!
1579 */
1580 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1581 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001582 tcp_hdr(skb)->fin;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 data_was_unread += len;
1584 __kfree_skb(skb);
1585 }
1586
1587 sk_stream_mem_reclaim(sk);
1588
Gerrit Renker65bb7232007-04-28 21:21:46 -07001589 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1590 * data was lost. To witness the awful effects of the old behavior of
1591 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1592 * GET in an FTP client, suspend the process, wait for the client to
1593 * advertise a zero window, then kill -9 the FTP client, wheee...
1594 * Note: timeout is always zero in such a case.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 */
1596 if (data_was_unread) {
1597 /* Unread data was tossed, zap the connection. */
1598 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1599 tcp_set_state(sk, TCP_CLOSE);
1600 tcp_send_active_reset(sk, GFP_KERNEL);
1601 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1602 /* Check zero linger _after_ checking for unread data. */
1603 sk->sk_prot->disconnect(sk, 0);
1604 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1605 } else if (tcp_close_state(sk)) {
1606 /* We FIN if the application ate all the data before
1607 * zapping the connection.
1608 */
1609
1610 /* RED-PEN. Formally speaking, we have broken TCP state
1611 * machine. State transitions:
1612 *
1613 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1614 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1615 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1616 *
1617 * are legal only when FIN has been sent (i.e. in window),
1618 * rather than queued out of window. Purists blame.
1619 *
1620 * F.e. "RFC state" is ESTABLISHED,
1621 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1622 *
1623 * The visible declinations are that sometimes
1624 * we enter time-wait state, when it is not required really
1625 * (harmless), do not send active resets, when they are
1626 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1627 * they look as CLOSING or LAST_ACK for Linux)
1628 * Probably, I missed some more holelets.
1629 * --ANK
1630 */
1631 tcp_send_fin(sk);
1632 }
1633
1634 sk_stream_wait_close(sk, timeout);
1635
1636adjudge_to_death:
Herbert Xu75c2d9072006-05-03 23:31:35 -07001637 state = sk->sk_state;
1638 sock_hold(sk);
1639 sock_orphan(sk);
1640 atomic_inc(sk->sk_prot->orphan_count);
1641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 /* It is the last release_sock in its life. It will remove backlog. */
1643 release_sock(sk);
1644
1645
1646 /* Now socket is owned by kernel and we acquire BH lock
1647 to finish close. No need to check for user refs.
1648 */
1649 local_bh_disable();
1650 bh_lock_sock(sk);
1651 BUG_TRAP(!sock_owned_by_user(sk));
1652
Herbert Xu75c2d9072006-05-03 23:31:35 -07001653 /* Have we already been destroyed by a softirq or backlog? */
1654 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1655 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656
1657 /* This is a (useful) BSD violating of the RFC. There is a
1658 * problem with TCP as specified in that the other end could
1659 * keep a socket open forever with no application left this end.
1660 * We use a 3 minute timeout (about the same as BSD) then kill
1661 * our end. If they send after that then tough - BUT: long enough
1662 * that we won't make the old 4*rto = almost no time - whoops
1663 * reset mistake.
1664 *
1665 * Nope, it was not mistake. It is really desired behaviour
1666 * f.e. on http servers, when such sockets are useless, but
1667 * consume significant resources. Let's do it with special
1668 * linger2 option. --ANK
1669 */
1670
1671 if (sk->sk_state == TCP_FIN_WAIT2) {
1672 struct tcp_sock *tp = tcp_sk(sk);
1673 if (tp->linger2 < 0) {
1674 tcp_set_state(sk, TCP_CLOSE);
1675 tcp_send_active_reset(sk, GFP_ATOMIC);
1676 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1677 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001678 const int tmo = tcp_fin_time(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679
1680 if (tmo > TCP_TIMEWAIT_LEN) {
David S. Miller52499af2006-07-31 22:32:09 -07001681 inet_csk_reset_keepalive_timer(sk,
1682 tmo - TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1685 goto out;
1686 }
1687 }
1688 }
1689 if (sk->sk_state != TCP_CLOSE) {
1690 sk_stream_mem_reclaim(sk);
Pavel Emelianove4fd5da2007-05-29 13:19:18 -07001691 if (tcp_too_many_orphans(sk,
1692 atomic_read(sk->sk_prot->orphan_count))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 if (net_ratelimit())
1694 printk(KERN_INFO "TCP: too many of orphaned "
1695 "sockets\n");
1696 tcp_set_state(sk, TCP_CLOSE);
1697 tcp_send_active_reset(sk, GFP_ATOMIC);
1698 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1699 }
1700 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
1702 if (sk->sk_state == TCP_CLOSE)
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001703 inet_csk_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 /* Otherwise, socket is reprieved until protocol close. */
1705
1706out:
1707 bh_unlock_sock(sk);
1708 local_bh_enable();
1709 sock_put(sk);
1710}
1711
1712/* These states need RST on ABORT according to RFC793 */
1713
1714static inline int tcp_need_reset(int state)
1715{
1716 return (1 << state) &
1717 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1718 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1719}
1720
1721int tcp_disconnect(struct sock *sk, int flags)
1722{
1723 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001724 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 struct tcp_sock *tp = tcp_sk(sk);
1726 int err = 0;
1727 int old_state = sk->sk_state;
1728
1729 if (old_state != TCP_CLOSE)
1730 tcp_set_state(sk, TCP_CLOSE);
1731
1732 /* ABORT function of RFC793 */
1733 if (old_state == TCP_LISTEN) {
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001734 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 } else if (tcp_need_reset(old_state) ||
1736 (tp->snd_nxt != tp->write_seq &&
1737 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001738 /* The last check adjusts for discrepancy of Linux wrt. RFC
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 * states
1740 */
1741 tcp_send_active_reset(sk, gfp_any());
1742 sk->sk_err = ECONNRESET;
1743 } else if (old_state == TCP_SYN_SENT)
1744 sk->sk_err = ECONNRESET;
1745
1746 tcp_clear_xmit_timers(sk);
1747 __skb_queue_purge(&sk->sk_receive_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001748 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 __skb_queue_purge(&tp->out_of_order_queue);
Chris Leech1a2449a2006-05-23 18:05:53 -07001750#ifdef CONFIG_NET_DMA
1751 __skb_queue_purge(&sk->sk_async_wait_queue);
1752#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
1754 inet->dport = 0;
1755
1756 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1757 inet_reset_saddr(sk);
1758
1759 sk->sk_shutdown = 0;
1760 sock_reset_flag(sk, SOCK_DONE);
1761 tp->srtt = 0;
1762 if ((tp->write_seq += tp->max_window + 2) == 0)
1763 tp->write_seq = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001764 icsk->icsk_backoff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 tp->snd_cwnd = 2;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001766 icsk->icsk_probes_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 tp->packets_out = 0;
1768 tp->snd_ssthresh = 0x7fffffff;
1769 tp->snd_cwnd_cnt = 0;
Stephen Hemminger9772efb2005-11-10 17:09:53 -08001770 tp->bytes_acked = 0;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001771 tcp_set_ca_state(sk, TCP_CA_Open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 tcp_clear_retrans(tp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001773 inet_csk_delack_init(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08001774 tcp_init_send_head(sk);
Srinivas Ajib40b4f72007-05-03 17:32:28 -07001775 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 __sk_dst_reset(sk);
1777
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001778 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
1780 sk->sk_error_report(sk);
1781 return err;
1782}
1783
1784/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 * Socket option code for TCP.
1786 */
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001787static int do_tcp_setsockopt(struct sock *sk, int level,
1788 int optname, char __user *optval, int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789{
1790 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001791 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 int val;
1793 int err = 0;
1794
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001795 /* This is a string value all the others are int's */
1796 if (optname == TCP_CONGESTION) {
1797 char name[TCP_CA_NAME_MAX];
1798
1799 if (optlen < 1)
1800 return -EINVAL;
1801
1802 val = strncpy_from_user(name, optval,
1803 min(TCP_CA_NAME_MAX-1, optlen));
1804 if (val < 0)
1805 return -EFAULT;
1806 name[val] = 0;
1807
1808 lock_sock(sk);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001809 err = tcp_set_congestion_control(sk, name);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001810 release_sock(sk);
1811 return err;
1812 }
1813
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 if (optlen < sizeof(int))
1815 return -EINVAL;
1816
1817 if (get_user(val, (int __user *)optval))
1818 return -EFAULT;
1819
1820 lock_sock(sk);
1821
1822 switch (optname) {
1823 case TCP_MAXSEG:
1824 /* Values greater than interface MTU won't take effect. However
1825 * at the point when this call is done we typically don't yet
1826 * know which interface is going to be used */
1827 if (val < 8 || val > MAX_TCP_WINDOW) {
1828 err = -EINVAL;
1829 break;
1830 }
1831 tp->rx_opt.user_mss = val;
1832 break;
1833
1834 case TCP_NODELAY:
1835 if (val) {
1836 /* TCP_NODELAY is weaker than TCP_CORK, so that
1837 * this option on corked socket is remembered, but
1838 * it is not activated until cork is cleared.
1839 *
1840 * However, when TCP_NODELAY is set we make
1841 * an explicit push, which overrides even TCP_CORK
1842 * for currently queued segments.
1843 */
1844 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001845 tcp_push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 } else {
1847 tp->nonagle &= ~TCP_NAGLE_OFF;
1848 }
1849 break;
1850
1851 case TCP_CORK:
1852 /* When set indicates to always queue non-full frames.
1853 * Later the user clears this option and we transmit
1854 * any pending partial frames in the queue. This is
1855 * meant to be used alongside sendfile() to get properly
1856 * filled frames when the user (for example) must write
1857 * out headers with a write() call first and then use
1858 * sendfile to send out the data parts.
1859 *
1860 * TCP_CORK can be set together with TCP_NODELAY and it is
1861 * stronger than TCP_NODELAY.
1862 */
1863 if (val) {
1864 tp->nonagle |= TCP_NAGLE_CORK;
1865 } else {
1866 tp->nonagle &= ~TCP_NAGLE_CORK;
1867 if (tp->nonagle&TCP_NAGLE_OFF)
1868 tp->nonagle |= TCP_NAGLE_PUSH;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001869 tcp_push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 }
1871 break;
1872
1873 case TCP_KEEPIDLE:
1874 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1875 err = -EINVAL;
1876 else {
1877 tp->keepalive_time = val * HZ;
1878 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1879 !((1 << sk->sk_state) &
1880 (TCPF_CLOSE | TCPF_LISTEN))) {
1881 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1882 if (tp->keepalive_time > elapsed)
1883 elapsed = tp->keepalive_time - elapsed;
1884 else
1885 elapsed = 0;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001886 inet_csk_reset_keepalive_timer(sk, elapsed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 }
1888 }
1889 break;
1890 case TCP_KEEPINTVL:
1891 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1892 err = -EINVAL;
1893 else
1894 tp->keepalive_intvl = val * HZ;
1895 break;
1896 case TCP_KEEPCNT:
1897 if (val < 1 || val > MAX_TCP_KEEPCNT)
1898 err = -EINVAL;
1899 else
1900 tp->keepalive_probes = val;
1901 break;
1902 case TCP_SYNCNT:
1903 if (val < 1 || val > MAX_TCP_SYNCNT)
1904 err = -EINVAL;
1905 else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001906 icsk->icsk_syn_retries = val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 break;
1908
1909 case TCP_LINGER2:
1910 if (val < 0)
1911 tp->linger2 = -1;
1912 else if (val > sysctl_tcp_fin_timeout / HZ)
1913 tp->linger2 = 0;
1914 else
1915 tp->linger2 = val * HZ;
1916 break;
1917
1918 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001919 icsk->icsk_accept_queue.rskq_defer_accept = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 if (val > 0) {
1921 /* Translate value in seconds to number of
1922 * retransmits */
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001923 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 val > ((TCP_TIMEOUT_INIT / HZ) <<
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001925 icsk->icsk_accept_queue.rskq_defer_accept))
1926 icsk->icsk_accept_queue.rskq_defer_accept++;
1927 icsk->icsk_accept_queue.rskq_defer_accept++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 }
1929 break;
1930
1931 case TCP_WINDOW_CLAMP:
1932 if (!val) {
1933 if (sk->sk_state != TCP_CLOSE) {
1934 err = -EINVAL;
1935 break;
1936 }
1937 tp->window_clamp = 0;
1938 } else
1939 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1940 SOCK_MIN_RCVBUF / 2 : val;
1941 break;
1942
1943 case TCP_QUICKACK:
1944 if (!val) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001945 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001947 icsk->icsk_ack.pingpong = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 if ((1 << sk->sk_state) &
1949 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001950 inet_csk_ack_scheduled(sk)) {
1951 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
Chris Leech0e4b4992006-05-23 18:00:16 -07001952 tcp_cleanup_rbuf(sk, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 if (!(val & 1))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001954 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 }
1956 }
1957 break;
1958
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001959#ifdef CONFIG_TCP_MD5SIG
1960 case TCP_MD5SIG:
1961 /* Read the IP->Key mappings from userspace */
1962 err = tp->af_specific->md5_parse(sk, optval, optlen);
1963 break;
1964#endif
1965
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 default:
1967 err = -ENOPROTOOPT;
1968 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001969 }
1970
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 release_sock(sk);
1972 return err;
1973}
1974
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001975int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1976 int optlen)
1977{
1978 struct inet_connection_sock *icsk = inet_csk(sk);
1979
1980 if (level != SOL_TCP)
1981 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1982 optval, optlen);
1983 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1984}
1985
1986#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001987int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1988 char __user *optval, int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001989{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08001990 if (level != SOL_TCP)
1991 return inet_csk_compat_setsockopt(sk, level, optname,
1992 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001993 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1994}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001995
1996EXPORT_SYMBOL(compat_tcp_setsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001997#endif
1998
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999/* Return information about state of tcp endpoint in API format. */
2000void tcp_get_info(struct sock *sk, struct tcp_info *info)
2001{
2002 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002003 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 u32 now = tcp_time_stamp;
2005
2006 memset(info, 0, sizeof(*info));
2007
2008 info->tcpi_state = sk->sk_state;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002009 info->tcpi_ca_state = icsk->icsk_ca_state;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002010 info->tcpi_retransmits = icsk->icsk_retransmits;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002011 info->tcpi_probes = icsk->icsk_probes_out;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002012 info->tcpi_backoff = icsk->icsk_backoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013
2014 if (tp->rx_opt.tstamp_ok)
2015 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2016 if (tp->rx_opt.sack_ok)
2017 info->tcpi_options |= TCPI_OPT_SACK;
2018 if (tp->rx_opt.wscale_ok) {
2019 info->tcpi_options |= TCPI_OPT_WSCALE;
2020 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2021 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002022 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
2024 if (tp->ecn_flags&TCP_ECN_OK)
2025 info->tcpi_options |= TCPI_OPT_ECN;
2026
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002027 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2028 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002029 info->tcpi_snd_mss = tp->mss_cache;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002030 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031
2032 info->tcpi_unacked = tp->packets_out;
2033 info->tcpi_sacked = tp->sacked_out;
2034 info->tcpi_lost = tp->lost_out;
2035 info->tcpi_retrans = tp->retrans_out;
2036 info->tcpi_fackets = tp->fackets_out;
2037
2038 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002039 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2041
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002042 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2044 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2045 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2046 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2047 info->tcpi_snd_cwnd = tp->snd_cwnd;
2048 info->tcpi_advmss = tp->advmss;
2049 info->tcpi_reordering = tp->reordering;
2050
2051 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2052 info->tcpi_rcv_space = tp->rcvq_space.space;
2053
2054 info->tcpi_total_retrans = tp->total_retrans;
2055}
2056
2057EXPORT_SYMBOL_GPL(tcp_get_info);
2058
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002059static int do_tcp_getsockopt(struct sock *sk, int level,
2060 int optname, char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061{
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002062 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 struct tcp_sock *tp = tcp_sk(sk);
2064 int val, len;
2065
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 if (get_user(len, optlen))
2067 return -EFAULT;
2068
2069 len = min_t(unsigned int, len, sizeof(int));
2070
2071 if (len < 0)
2072 return -EINVAL;
2073
2074 switch (optname) {
2075 case TCP_MAXSEG:
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002076 val = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2078 val = tp->rx_opt.user_mss;
2079 break;
2080 case TCP_NODELAY:
2081 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2082 break;
2083 case TCP_CORK:
2084 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2085 break;
2086 case TCP_KEEPIDLE:
2087 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2088 break;
2089 case TCP_KEEPINTVL:
2090 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2091 break;
2092 case TCP_KEEPCNT:
2093 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2094 break;
2095 case TCP_SYNCNT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002096 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 break;
2098 case TCP_LINGER2:
2099 val = tp->linger2;
2100 if (val >= 0)
2101 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2102 break;
2103 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002104 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2105 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 break;
2107 case TCP_WINDOW_CLAMP:
2108 val = tp->window_clamp;
2109 break;
2110 case TCP_INFO: {
2111 struct tcp_info info;
2112
2113 if (get_user(len, optlen))
2114 return -EFAULT;
2115
2116 tcp_get_info(sk, &info);
2117
2118 len = min_t(unsigned int, len, sizeof(info));
2119 if (put_user(len, optlen))
2120 return -EFAULT;
2121 if (copy_to_user(optval, &info, len))
2122 return -EFAULT;
2123 return 0;
2124 }
2125 case TCP_QUICKACK:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002126 val = !icsk->icsk_ack.pingpong;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 break;
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002128
2129 case TCP_CONGESTION:
2130 if (get_user(len, optlen))
2131 return -EFAULT;
2132 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2133 if (put_user(len, optlen))
2134 return -EFAULT;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002135 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002136 return -EFAULT;
2137 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 default:
2139 return -ENOPROTOOPT;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
2142 if (put_user(len, optlen))
2143 return -EFAULT;
2144 if (copy_to_user(optval, &val, len))
2145 return -EFAULT;
2146 return 0;
2147}
2148
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002149int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2150 int __user *optlen)
2151{
2152 struct inet_connection_sock *icsk = inet_csk(sk);
2153
2154 if (level != SOL_TCP)
2155 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2156 optval, optlen);
2157 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2158}
2159
2160#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002161int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2162 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002163{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08002164 if (level != SOL_TCP)
2165 return inet_csk_compat_getsockopt(sk, level, optname,
2166 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002167 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2168}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002169
2170EXPORT_SYMBOL(compat_tcp_getsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002171#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172
Herbert Xu576a30e2006-06-27 13:22:38 -07002173struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
Herbert Xuf4c50d92006-06-22 03:02:40 -07002174{
2175 struct sk_buff *segs = ERR_PTR(-EINVAL);
2176 struct tcphdr *th;
2177 unsigned thlen;
2178 unsigned int seq;
Al Virod3bc23e2006-11-14 21:24:49 -08002179 __be32 delta;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002180 unsigned int oldlen;
2181 unsigned int len;
2182
2183 if (!pskb_may_pull(skb, sizeof(*th)))
2184 goto out;
2185
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002186 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002187 thlen = th->doff * 4;
2188 if (thlen < sizeof(*th))
2189 goto out;
2190
2191 if (!pskb_may_pull(skb, thlen))
2192 goto out;
2193
Herbert Xu0718bcc2006-06-25 23:55:46 -07002194 oldlen = (u16)~skb->len;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002195 __skb_pull(skb, thlen);
2196
Herbert Xu3820c3f2006-06-29 20:11:25 -07002197 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2198 /* Packet is from an untrusted source, reset gso_segs. */
Herbert Xubbcf4672006-07-03 19:38:35 -07002199 int type = skb_shinfo(skb)->gso_type;
2200 int mss;
Herbert Xu3820c3f2006-06-29 20:11:25 -07002201
Herbert Xubbcf4672006-07-03 19:38:35 -07002202 if (unlikely(type &
2203 ~(SKB_GSO_TCPV4 |
2204 SKB_GSO_DODGY |
2205 SKB_GSO_TCP_ECN |
2206 SKB_GSO_TCPV6 |
2207 0) ||
2208 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2209 goto out;
2210
2211 mss = skb_shinfo(skb)->gso_size;
Herbert Xu3820c3f2006-06-29 20:11:25 -07002212 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
2213
2214 segs = NULL;
2215 goto out;
2216 }
2217
Herbert Xu576a30e2006-06-27 13:22:38 -07002218 segs = skb_segment(skb, features);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002219 if (IS_ERR(segs))
2220 goto out;
2221
2222 len = skb_shinfo(skb)->gso_size;
Herbert Xu0718bcc2006-06-25 23:55:46 -07002223 delta = htonl(oldlen + (thlen + len));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002224
2225 skb = segs;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002226 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002227 seq = ntohl(th->seq);
2228
2229 do {
2230 th->fin = th->psh = 0;
2231
Al Virod3bc23e2006-11-14 21:24:49 -08002232 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2233 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002234 if (skb->ip_summed != CHECKSUM_PARTIAL)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002235 th->check =
2236 csum_fold(csum_partial(skb_transport_header(skb),
2237 thlen, skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002238
2239 seq += len;
2240 skb = skb->next;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002241 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002242
2243 th->seq = htonl(seq);
2244 th->cwr = 0;
2245 } while (skb->next);
2246
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07002247 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002248 skb->data_len);
Al Virod3bc23e2006-11-14 21:24:49 -08002249 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2250 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002251 if (skb->ip_summed != CHECKSUM_PARTIAL)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002252 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2253 thlen, skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002254
2255out:
2256 return segs;
2257}
Herbert Xuadcfc7d2006-06-30 13:36:15 -07002258EXPORT_SYMBOL(tcp_tso_segment);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002259
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002260#ifdef CONFIG_TCP_MD5SIG
2261static unsigned long tcp_md5sig_users;
2262static struct tcp_md5sig_pool **tcp_md5sig_pool;
2263static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2264
2265static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2266{
2267 int cpu;
2268 for_each_possible_cpu(cpu) {
2269 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2270 if (p) {
2271 if (p->md5_desc.tfm)
2272 crypto_free_hash(p->md5_desc.tfm);
2273 kfree(p);
2274 p = NULL;
2275 }
2276 }
2277 free_percpu(pool);
2278}
2279
2280void tcp_free_md5sig_pool(void)
2281{
2282 struct tcp_md5sig_pool **pool = NULL;
2283
David S. Miller2c4f6212007-02-20 23:51:47 -08002284 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002285 if (--tcp_md5sig_users == 0) {
2286 pool = tcp_md5sig_pool;
2287 tcp_md5sig_pool = NULL;
2288 }
David S. Miller2c4f6212007-02-20 23:51:47 -08002289 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002290 if (pool)
2291 __tcp_free_md5sig_pool(pool);
2292}
2293
2294EXPORT_SYMBOL(tcp_free_md5sig_pool);
2295
Adrian Bunkf5b99bc2006-11-30 17:22:29 -08002296static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002297{
2298 int cpu;
2299 struct tcp_md5sig_pool **pool;
2300
2301 pool = alloc_percpu(struct tcp_md5sig_pool *);
2302 if (!pool)
2303 return NULL;
2304
2305 for_each_possible_cpu(cpu) {
2306 struct tcp_md5sig_pool *p;
2307 struct crypto_hash *hash;
2308
2309 p = kzalloc(sizeof(*p), GFP_KERNEL);
2310 if (!p)
2311 goto out_free;
2312 *per_cpu_ptr(pool, cpu) = p;
2313
2314 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2315 if (!hash || IS_ERR(hash))
2316 goto out_free;
2317
2318 p->md5_desc.tfm = hash;
2319 }
2320 return pool;
2321out_free:
2322 __tcp_free_md5sig_pool(pool);
2323 return NULL;
2324}
2325
2326struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2327{
2328 struct tcp_md5sig_pool **pool;
2329 int alloc = 0;
2330
2331retry:
David S. Miller2c4f6212007-02-20 23:51:47 -08002332 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002333 pool = tcp_md5sig_pool;
2334 if (tcp_md5sig_users++ == 0) {
2335 alloc = 1;
David S. Miller2c4f6212007-02-20 23:51:47 -08002336 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002337 } else if (!pool) {
2338 tcp_md5sig_users--;
David S. Miller2c4f6212007-02-20 23:51:47 -08002339 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002340 cpu_relax();
2341 goto retry;
2342 } else
David S. Miller2c4f6212007-02-20 23:51:47 -08002343 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002344
2345 if (alloc) {
2346 /* we cannot hold spinlock here because this may sleep. */
2347 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
David S. Miller2c4f6212007-02-20 23:51:47 -08002348 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002349 if (!p) {
2350 tcp_md5sig_users--;
David S. Miller2c4f6212007-02-20 23:51:47 -08002351 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002352 return NULL;
2353 }
2354 pool = tcp_md5sig_pool;
2355 if (pool) {
2356 /* oops, it has already been assigned. */
David S. Miller2c4f6212007-02-20 23:51:47 -08002357 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002358 __tcp_free_md5sig_pool(p);
2359 } else {
2360 tcp_md5sig_pool = pool = p;
David S. Miller2c4f6212007-02-20 23:51:47 -08002361 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002362 }
2363 }
2364 return pool;
2365}
2366
2367EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2368
2369struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2370{
2371 struct tcp_md5sig_pool **p;
David S. Miller2c4f6212007-02-20 23:51:47 -08002372 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002373 p = tcp_md5sig_pool;
2374 if (p)
2375 tcp_md5sig_users++;
David S. Miller2c4f6212007-02-20 23:51:47 -08002376 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002377 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2378}
2379
2380EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2381
David S. Miller6931ba72006-12-13 16:25:44 -08002382void __tcp_put_md5sig_pool(void)
2383{
2384 tcp_free_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002385}
2386
2387EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2388#endif
2389
Andi Kleen4ac02ba2007-04-20 17:11:46 -07002390void tcp_done(struct sock *sk)
2391{
2392 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2393 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
2394
2395 tcp_set_state(sk, TCP_CLOSE);
2396 tcp_clear_xmit_timers(sk);
2397
2398 sk->sk_shutdown = SHUTDOWN_MASK;
2399
2400 if (!sock_flag(sk, SOCK_DEAD))
2401 sk->sk_state_change(sk);
2402 else
2403 inet_csk_destroy_sock(sk);
2404}
2405EXPORT_SYMBOL_GPL(tcp_done);
2406
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407extern void __skb_cb_too_small_for_tcp(int, int);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002408extern struct tcp_congestion_ops tcp_reno;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409
2410static __initdata unsigned long thash_entries;
2411static int __init set_thash_entries(char *str)
2412{
2413 if (!str)
2414 return 0;
2415 thash_entries = simple_strtoul(str, &str, 0);
2416 return 1;
2417}
2418__setup("thash_entries=", set_thash_entries);
2419
2420void __init tcp_init(void)
2421{
2422 struct sk_buff *skb = NULL;
John Heffner7b4f4b52006-03-25 01:34:07 -08002423 unsigned long limit;
2424 int order, i, max_share;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425
2426 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2427 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2428 sizeof(skb->cb));
2429
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002430 tcp_hashinfo.bind_bucket_cachep =
2431 kmem_cache_create("tcp_bind_bucket",
2432 sizeof(struct inet_bind_bucket), 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002433 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 /* Size and allocate the main established and bind bucket
2436 * hash tables.
2437 *
2438 * The methodology is similar to that of the buffer cache.
2439 */
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002440 tcp_hashinfo.ehash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 alloc_large_system_hash("TCP established",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002442 sizeof(struct inet_ehash_bucket),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 thash_entries,
2444 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002445 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002446 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002447 &tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 NULL,
2449 0);
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002450 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2451 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002452 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2453 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002454 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 }
2456
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002457 tcp_hashinfo.bhash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 alloc_large_system_hash("TCP bind",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002459 sizeof(struct inet_bind_hashbucket),
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002460 tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002462 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002463 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002464 &tcp_hashinfo.bhash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 NULL,
2466 64 * 1024);
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002467 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2468 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2469 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2470 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 }
2472
2473 /* Try to be a bit smarter and adjust defaults depending
2474 * on available memory.
2475 */
2476 for (order = 0; ((1 << order) << PAGE_SHIFT) <
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002477 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 order++)
2479 ;
Andi Kleene7626482005-06-13 14:24:52 -07002480 if (order >= 4) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002481 tcp_death_row.sysctl_max_tw_buckets = 180000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 sysctl_tcp_max_orphans = 4096 << (order - 4);
2483 sysctl_max_syn_backlog = 1024;
2484 } else if (order < 3) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002485 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 sysctl_tcp_max_orphans >>= (3 - order);
2487 sysctl_max_syn_backlog = 128;
2488 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489
John Heffner53cdcc02007-03-16 15:04:03 -07002490 /* Set the pressure threshold to be a fraction of global memory that
2491 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2492 * memory, with a floor of 128 pages.
2493 */
2494 limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2495 limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2496 limit = max(limit, 128UL);
2497 sysctl_tcp_mem[0] = limit / 4 * 3;
2498 sysctl_tcp_mem[1] = limit;
John Heffner52bf3762006-11-14 20:25:17 -08002499 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500
John Heffner53cdcc02007-03-16 15:04:03 -07002501 /* Set per-socket limits to no more than 1/128 the pressure threshold */
John Heffner7b4f4b52006-03-25 01:34:07 -08002502 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2503 max_share = min(4UL*1024*1024, limit);
2504
2505 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2506 sysctl_tcp_wmem[1] = 16*1024;
2507 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2508
2509 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2510 sysctl_tcp_rmem[1] = 87380;
2511 sysctl_tcp_rmem[2] = max(87380, max_share);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512
2513 printk(KERN_INFO "TCP: Hash tables configured "
2514 "(established %d bind %d)\n",
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002515 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07002516
2517 tcp_register_congestion_control(&tcp_reno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518}
2519
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520EXPORT_SYMBOL(tcp_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521EXPORT_SYMBOL(tcp_disconnect);
2522EXPORT_SYMBOL(tcp_getsockopt);
2523EXPORT_SYMBOL(tcp_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524EXPORT_SYMBOL(tcp_poll);
2525EXPORT_SYMBOL(tcp_read_sock);
2526EXPORT_SYMBOL(tcp_recvmsg);
2527EXPORT_SYMBOL(tcp_sendmsg);
2528EXPORT_SYMBOL(tcp_sendpage);
2529EXPORT_SYMBOL(tcp_setsockopt);
2530EXPORT_SYMBOL(tcp_shutdown);
2531EXPORT_SYMBOL(tcp_statistics);