blob: 56ed40703f98872c43837e3e00452f845d76e478 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
Ilpo Järvinen172589c2007-08-28 15:50:33 -0700250#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251#include <linux/module.h>
252#include <linux/types.h>
253#include <linux/fcntl.h>
254#include <linux/poll.h>
255#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256#include <linux/fs.h>
Jens Axboe9c55e012007-11-06 23:30:13 -0800257#include <linux/skbuff.h>
258#include <linux/splice.h>
259#include <linux/net.h>
260#include <linux/socket.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261#include <linux/random.h>
262#include <linux/bootmem.h>
David S. Millerb8059ea2006-03-25 01:36:56 -0800263#include <linux/cache.h>
Herbert Xuf4c50d92006-06-22 03:02:40 -0700264#include <linux/err.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800265#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
267#include <net/icmp.h>
268#include <net/tcp.h>
269#include <net/xfrm.h>
270#include <net/ip.h>
Chris Leech1a2449a2006-05-23 18:05:53 -0700271#include <net/netdma.h>
Jens Axboe9c55e012007-11-06 23:30:13 -0800272#include <net/sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
274#include <asm/uaccess.h>
275#include <asm/ioctls.h>
276
Brian Haleyab32ea52006-09-22 14:15:41 -0700277int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Eric Dumazetba899662005-08-26 12:05:31 -0700279DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281atomic_t tcp_orphan_count = ATOMIC_INIT(0);
282
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -0700283EXPORT_SYMBOL_GPL(tcp_orphan_count);
284
David S. Millerb8059ea2006-03-25 01:36:56 -0800285int sysctl_tcp_mem[3] __read_mostly;
286int sysctl_tcp_wmem[3] __read_mostly;
287int sysctl_tcp_rmem[3] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
289EXPORT_SYMBOL(sysctl_tcp_mem);
290EXPORT_SYMBOL(sysctl_tcp_rmem);
291EXPORT_SYMBOL(sysctl_tcp_wmem);
292
293atomic_t tcp_memory_allocated; /* Current allocated memory. */
294atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
295
296EXPORT_SYMBOL(tcp_memory_allocated);
297EXPORT_SYMBOL(tcp_sockets_allocated);
298
299/*
Jens Axboe9c55e012007-11-06 23:30:13 -0800300 * TCP splice context
301 */
302struct tcp_splice_state {
303 struct pipe_inode_info *pipe;
304 size_t len;
305 unsigned int flags;
306};
307
308/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 * Pressure flag: try to collapse.
310 * Technical note: it is used by multiple contexts non atomically.
311 * All the sk_stream_mem_schedule() is of this nature: accounting
312 * is strict, actions are advisory and have some latency.
313 */
Eric Dumazet4103f8c2007-03-27 13:58:31 -0700314int tcp_memory_pressure __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316EXPORT_SYMBOL(tcp_memory_pressure);
317
318void tcp_enter_memory_pressure(void)
319{
320 if (!tcp_memory_pressure) {
321 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
322 tcp_memory_pressure = 1;
323 }
324}
325
326EXPORT_SYMBOL(tcp_enter_memory_pressure);
327
328/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 * Wait for a TCP event.
330 *
331 * Note that we don't need to lock the socket, as the upper poll layers
332 * take care of normal races (between the test and the event) and we don't
333 * go look at any of the socket buffers directly.
334 */
335unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
336{
337 unsigned int mask;
338 struct sock *sk = sock->sk;
339 struct tcp_sock *tp = tcp_sk(sk);
340
341 poll_wait(file, sk->sk_sleep, wait);
342 if (sk->sk_state == TCP_LISTEN)
Arnaldo Carvalho de Melodc40c7b2005-08-23 21:52:58 -0700343 return inet_csk_listen_poll(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
345 /* Socket is not locked. We are protected from async events
346 by poll logic and correct handling of state changes
347 made by another threads is impossible in any case.
348 */
349
350 mask = 0;
351 if (sk->sk_err)
352 mask = POLLERR;
353
354 /*
355 * POLLHUP is certainly not done right. But poll() doesn't
356 * have a notion of HUP in just one direction, and for a
357 * socket the read side is more interesting.
358 *
359 * Some poll() documentation says that POLLHUP is incompatible
360 * with the POLLOUT/POLLWR flags, so somebody should check this
361 * all. But careful, it tends to be safer to return too many
362 * bits than too few, and you can easily break real applications
363 * if you don't tell them that something has hung up!
364 *
365 * Check-me.
366 *
367 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
368 * our fs/select.c). It means that after we received EOF,
369 * poll always returns immediately, making impossible poll() on write()
370 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
371 * if and only if shutdown has been made in both directions.
372 * Actually, it is interesting to look how Solaris and DUX
373 * solve this dilemma. I would prefer, if PULLHUP were maskable,
374 * then we could set it on SND_SHUTDOWN. BTW examples given
375 * in Stevens' books assume exactly this behaviour, it explains
376 * why PULLHUP is incompatible with POLLOUT. --ANK
377 *
378 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
379 * blocking on fresh not-connected or disconnected socket. --ANK
380 */
381 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
382 mask |= POLLHUP;
383 if (sk->sk_shutdown & RCV_SHUTDOWN)
Davide Libenzif348d702006-03-25 03:07:39 -0800384 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
386 /* Connected? */
387 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
388 /* Potential race condition. If read of tp below will
389 * escape above sk->sk_state, we can be illegally awaken
390 * in SYN_* states. */
391 if ((tp->rcv_nxt != tp->copied_seq) &&
392 (tp->urg_seq != tp->copied_seq ||
393 tp->rcv_nxt != tp->copied_seq + 1 ||
394 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
395 mask |= POLLIN | POLLRDNORM;
396
397 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
398 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
399 mask |= POLLOUT | POLLWRNORM;
400 } else { /* send SIGIO later */
401 set_bit(SOCK_ASYNC_NOSPACE,
402 &sk->sk_socket->flags);
403 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
404
405 /* Race breaker. If space is freed after
406 * wspace test but before the flags are set,
407 * IO signal will be lost.
408 */
409 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
410 mask |= POLLOUT | POLLWRNORM;
411 }
412 }
413
414 if (tp->urg_data & TCP_URG_VALID)
415 mask |= POLLPRI;
416 }
417 return mask;
418}
419
420int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
421{
422 struct tcp_sock *tp = tcp_sk(sk);
423 int answ;
424
425 switch (cmd) {
426 case SIOCINQ:
427 if (sk->sk_state == TCP_LISTEN)
428 return -EINVAL;
429
430 lock_sock(sk);
431 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
432 answ = 0;
433 else if (sock_flag(sk, SOCK_URGINLINE) ||
434 !tp->urg_data ||
435 before(tp->urg_seq, tp->copied_seq) ||
436 !before(tp->urg_seq, tp->rcv_nxt)) {
437 answ = tp->rcv_nxt - tp->copied_seq;
438
439 /* Subtract 1, if FIN is in queue. */
440 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
441 answ -=
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700442 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 } else
444 answ = tp->urg_seq - tp->copied_seq;
445 release_sock(sk);
446 break;
447 case SIOCATMARK:
448 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
449 break;
450 case SIOCOUTQ:
451 if (sk->sk_state == TCP_LISTEN)
452 return -EINVAL;
453
454 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
455 answ = 0;
456 else
457 answ = tp->write_seq - tp->snd_una;
458 break;
459 default:
460 return -ENOIOCTLCMD;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700461 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463 return put_user(answ, (int __user *)arg);
464}
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
467{
468 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
469 tp->pushed_seq = tp->write_seq;
470}
471
472static inline int forced_push(struct tcp_sock *tp)
473{
474 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
475}
476
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700477static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700479 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo352d4802006-11-17 19:59:12 -0200480 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
481
482 skb->csum = 0;
483 tcb->seq = tcb->end_seq = tp->write_seq;
484 tcb->flags = TCPCB_FLAG_ACK;
485 tcb->sacked = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 skb_header_release(skb);
David S. Millerfe067e82007-03-07 12:12:44 -0800487 tcp_add_write_queue_tail(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 sk_charge_skb(sk, skb);
David S. Miller89ebd192005-08-23 10:13:06 -0700489 if (tp->nonagle & TCP_NAGLE_PUSH)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900490 tp->nonagle &= ~TCP_NAGLE_PUSH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491}
492
493static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
494 struct sk_buff *skb)
495{
496 if (flags & MSG_OOB) {
497 tp->urg_mode = 1;
498 tp->snd_up = tp->write_seq;
499 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
500 }
501}
502
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700503static inline void tcp_push(struct sock *sk, int flags, int mss_now,
504 int nonagle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700506 struct tcp_sock *tp = tcp_sk(sk);
507
David S. Millerfe067e82007-03-07 12:12:44 -0800508 if (tcp_send_head(sk)) {
509 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 if (!(flags & MSG_MORE) || forced_push(tp))
511 tcp_mark_push(tp, skb);
512 tcp_mark_urg(tp, flags, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700513 __tcp_push_pending_frames(sk, mss_now,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
515 }
516}
517
Jens Axboe9c55e012007-11-06 23:30:13 -0800518int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
519 unsigned int offset, size_t len)
520{
521 struct tcp_splice_state *tss = rd_desc->arg.data;
522
523 return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
524}
525
526static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
527{
528 /* Store TCP splice context information in read_descriptor_t. */
529 read_descriptor_t rd_desc = {
530 .arg.data = tss,
531 };
532
533 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
534}
535
536/**
537 * tcp_splice_read - splice data from TCP socket to a pipe
538 * @sock: socket to splice from
539 * @ppos: position (not valid)
540 * @pipe: pipe to splice to
541 * @len: number of bytes to splice
542 * @flags: splice modifier flags
543 *
544 * Description:
545 * Will read pages from given socket and fill them into a pipe.
546 *
547 **/
548ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
549 struct pipe_inode_info *pipe, size_t len,
550 unsigned int flags)
551{
552 struct sock *sk = sock->sk;
553 struct tcp_splice_state tss = {
554 .pipe = pipe,
555 .len = len,
556 .flags = flags,
557 };
558 long timeo;
559 ssize_t spliced;
560 int ret;
561
562 /*
563 * We can't seek on a socket input
564 */
565 if (unlikely(*ppos))
566 return -ESPIPE;
567
568 ret = spliced = 0;
569
570 lock_sock(sk);
571
572 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
573 while (tss.len) {
574 ret = __tcp_splice_read(sk, &tss);
575 if (ret < 0)
576 break;
577 else if (!ret) {
578 if (spliced)
579 break;
580 if (flags & SPLICE_F_NONBLOCK) {
581 ret = -EAGAIN;
582 break;
583 }
584 if (sock_flag(sk, SOCK_DONE))
585 break;
586 if (sk->sk_err) {
587 ret = sock_error(sk);
588 break;
589 }
590 if (sk->sk_shutdown & RCV_SHUTDOWN)
591 break;
592 if (sk->sk_state == TCP_CLOSE) {
593 /*
594 * This occurs when user tries to read
595 * from never connected socket.
596 */
597 if (!sock_flag(sk, SOCK_DONE))
598 ret = -ENOTCONN;
599 break;
600 }
601 if (!timeo) {
602 ret = -EAGAIN;
603 break;
604 }
605 sk_wait_data(sk, &timeo);
606 if (signal_pending(current)) {
607 ret = sock_intr_errno(timeo);
608 break;
609 }
610 continue;
611 }
612 tss.len -= ret;
613 spliced += ret;
614
615 release_sock(sk);
616 lock_sock(sk);
617
618 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
619 (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
620 signal_pending(current))
621 break;
622 }
623
624 release_sock(sk);
625
626 if (spliced)
627 return spliced;
628
629 return ret;
630}
631
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
633 size_t psize, int flags)
634{
635 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700636 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 int err;
638 ssize_t copied;
639 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
640
641 /* Wait for a connection to finish. */
642 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
643 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
644 goto out_err;
645
646 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
647
648 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700649 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 copied = 0;
651
652 err = -EPIPE;
653 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
654 goto do_error;
655
656 while (psize > 0) {
David S. Millerfe067e82007-03-07 12:12:44 -0800657 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 struct page *page = pages[poffset / PAGE_SIZE];
659 int copy, i, can_coalesce;
660 int offset = poffset % PAGE_SIZE;
661 int size = min_t(size_t, psize, PAGE_SIZE - offset);
662
David S. Millerfe067e82007-03-07 12:12:44 -0800663 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664new_segment:
665 if (!sk_stream_memory_free(sk))
666 goto wait_for_sndbuf;
667
668 skb = sk_stream_alloc_pskb(sk, 0, 0,
669 sk->sk_allocation);
670 if (!skb)
671 goto wait_for_memory;
672
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700673 skb_entail(sk, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700674 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 }
676
677 if (copy > size)
678 copy = size;
679
680 i = skb_shinfo(skb)->nr_frags;
681 can_coalesce = skb_can_coalesce(skb, i, page, offset);
682 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
683 tcp_mark_push(tp, skb);
684 goto new_segment;
685 }
Herbert Xud80d99d2005-09-01 17:48:23 -0700686 if (!sk_stream_wmem_schedule(sk, copy))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 goto wait_for_memory;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900688
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 if (can_coalesce) {
690 skb_shinfo(skb)->frags[i - 1].size += copy;
691 } else {
692 get_page(page);
693 skb_fill_page_desc(skb, i, page, offset, copy);
694 }
695
696 skb->len += copy;
697 skb->data_len += copy;
698 skb->truesize += copy;
699 sk->sk_wmem_queued += copy;
700 sk->sk_forward_alloc -= copy;
Patrick McHardy84fa7932006-08-29 16:44:56 -0700701 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 tp->write_seq += copy;
703 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700704 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
706 if (!copied)
707 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
708
709 copied += copy;
710 poffset += copy;
711 if (!(psize -= copy))
712 goto out;
713
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700714 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 continue;
716
717 if (forced_push(tp)) {
718 tcp_mark_push(tp, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700719 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
David S. Millerfe067e82007-03-07 12:12:44 -0800720 } else if (skb == tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 tcp_push_one(sk, mss_now);
722 continue;
723
724wait_for_sndbuf:
725 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
726wait_for_memory:
727 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700728 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
730 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
731 goto do_error;
732
733 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700734 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 }
736
737out:
738 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700739 tcp_push(sk, flags, mss_now, tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 return copied;
741
742do_error:
743 if (copied)
744 goto out;
745out_err:
746 return sk_stream_error(sk, flags, err);
747}
748
749ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
750 size_t size, int flags)
751{
752 ssize_t res;
753 struct sock *sk = sock->sk;
754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 if (!(sk->sk_route_caps & NETIF_F_SG) ||
Herbert Xu8648b302006-06-17 22:06:05 -0700756 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 return sock_no_sendpage(sock, page, offset, size, flags);
758
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 lock_sock(sk);
760 TCP_CHECK_TIMER(sk);
761 res = do_tcp_sendpages(sk, &page, offset, size, flags);
762 TCP_CHECK_TIMER(sk);
763 release_sock(sk);
764 return res;
765}
766
767#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
768#define TCP_OFF(sk) (sk->sk_sndmsg_off)
769
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700770static inline int select_size(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700772 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700773 int tmp = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
David S. Millerb4e26f52005-07-05 15:20:27 -0700775 if (sk->sk_route_caps & NETIF_F_SG) {
Herbert Xubcd76112006-06-30 13:36:35 -0700776 if (sk_can_gso(sk))
David S. Millerb4e26f52005-07-05 15:20:27 -0700777 tmp = 0;
778 else {
779 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
780
781 if (tmp >= pgbreak &&
782 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
783 tmp = pgbreak;
784 }
785 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 return tmp;
788}
789
David S. Miller3516ffb2007-08-02 19:23:56 -0700790int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 size_t size)
792{
David S. Miller3516ffb2007-08-02 19:23:56 -0700793 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 struct iovec *iov;
795 struct tcp_sock *tp = tcp_sk(sk);
796 struct sk_buff *skb;
797 int iovlen, flags;
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700798 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 int err, copied;
800 long timeo;
801
802 lock_sock(sk);
803 TCP_CHECK_TIMER(sk);
804
805 flags = msg->msg_flags;
806 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
807
808 /* Wait for a connection to finish. */
809 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
810 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
811 goto out_err;
812
813 /* This should be in poll */
814 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
815
816 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700817 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818
819 /* Ok commence sending. */
820 iovlen = msg->msg_iovlen;
821 iov = msg->msg_iov;
822 copied = 0;
823
824 err = -EPIPE;
825 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
826 goto do_error;
827
828 while (--iovlen >= 0) {
829 int seglen = iov->iov_len;
830 unsigned char __user *from = iov->iov_base;
831
832 iov++;
833
834 while (seglen > 0) {
835 int copy;
836
David S. Millerfe067e82007-03-07 12:12:44 -0800837 skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
David S. Millerfe067e82007-03-07 12:12:44 -0800839 if (!tcp_send_head(sk) ||
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700840 (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
842new_segment:
843 /* Allocate new segment. If the interface is SG,
844 * allocate skb fitting to single page.
845 */
846 if (!sk_stream_memory_free(sk))
847 goto wait_for_sndbuf;
848
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700849 skb = sk_stream_alloc_pskb(sk, select_size(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 0, sk->sk_allocation);
851 if (!skb)
852 goto wait_for_memory;
853
854 /*
855 * Check whether we can use HW checksum.
856 */
Herbert Xu8648b302006-06-17 22:06:05 -0700857 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700858 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700860 skb_entail(sk, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700861 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 }
863
864 /* Try to append data to the end of skb. */
865 if (copy > seglen)
866 copy = seglen;
867
868 /* Where to copy to? */
869 if (skb_tailroom(skb) > 0) {
870 /* We have some space in skb head. Superb! */
871 if (copy > skb_tailroom(skb))
872 copy = skb_tailroom(skb);
873 if ((err = skb_add_data(skb, from, copy)) != 0)
874 goto do_fault;
875 } else {
876 int merge = 0;
877 int i = skb_shinfo(skb)->nr_frags;
878 struct page *page = TCP_PAGE(sk);
879 int off = TCP_OFF(sk);
880
881 if (skb_can_coalesce(skb, i, page, off) &&
882 off != PAGE_SIZE) {
883 /* We can extend the last page
884 * fragment. */
885 merge = 1;
886 } else if (i == MAX_SKB_FRAGS ||
887 (!i &&
888 !(sk->sk_route_caps & NETIF_F_SG))) {
889 /* Need to add new fragment and cannot
890 * do this because interface is non-SG,
891 * or because all the page slots are
892 * busy. */
893 tcp_mark_push(tp, skb);
894 goto new_segment;
895 } else if (page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 if (off == PAGE_SIZE) {
897 put_page(page);
898 TCP_PAGE(sk) = page = NULL;
Herbert Xufb5f5e62005-09-05 18:55:48 -0700899 off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 }
Herbert Xuef015782005-09-01 17:48:59 -0700901 } else
Herbert Xufb5f5e62005-09-05 18:55:48 -0700902 off = 0;
Herbert Xuef015782005-09-01 17:48:59 -0700903
904 if (copy > PAGE_SIZE - off)
905 copy = PAGE_SIZE - off;
906
907 if (!sk_stream_wmem_schedule(sk, copy))
908 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
910 if (!page) {
911 /* Allocate new cache page. */
912 if (!(page = sk_stream_alloc_page(sk)))
913 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 }
915
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 /* Time to copy data. We are close to
917 * the end! */
918 err = skb_copy_to_page(sk, from, skb, page,
919 off, copy);
920 if (err) {
921 /* If this page was new, give it to the
922 * socket so it does not get leaked.
923 */
924 if (!TCP_PAGE(sk)) {
925 TCP_PAGE(sk) = page;
926 TCP_OFF(sk) = 0;
927 }
928 goto do_error;
929 }
930
931 /* Update the skb. */
932 if (merge) {
933 skb_shinfo(skb)->frags[i - 1].size +=
934 copy;
935 } else {
936 skb_fill_page_desc(skb, i, page, off, copy);
937 if (TCP_PAGE(sk)) {
938 get_page(page);
939 } else if (off + copy < PAGE_SIZE) {
940 get_page(page);
941 TCP_PAGE(sk) = page;
942 }
943 }
944
945 TCP_OFF(sk) = off + copy;
946 }
947
948 if (!copied)
949 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
950
951 tp->write_seq += copy;
952 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700953 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
955 from += copy;
956 copied += copy;
957 if ((seglen -= copy) == 0 && iovlen == 0)
958 goto out;
959
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700960 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 continue;
962
963 if (forced_push(tp)) {
964 tcp_mark_push(tp, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700965 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
David S. Millerfe067e82007-03-07 12:12:44 -0800966 } else if (skb == tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 tcp_push_one(sk, mss_now);
968 continue;
969
970wait_for_sndbuf:
971 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
972wait_for_memory:
973 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700974 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
976 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
977 goto do_error;
978
979 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700980 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 }
982 }
983
984out:
985 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700986 tcp_push(sk, flags, mss_now, tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 TCP_CHECK_TIMER(sk);
988 release_sock(sk);
989 return copied;
990
991do_fault:
992 if (!skb->len) {
David S. Millerfe067e82007-03-07 12:12:44 -0800993 tcp_unlink_write_queue(skb, sk);
994 /* It is the one place in all of TCP, except connection
995 * reset, where we can be unlinking the send_head.
996 */
997 tcp_check_send_head(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 sk_stream_free_skb(sk, skb);
999 }
1000
1001do_error:
1002 if (copied)
1003 goto out;
1004out_err:
1005 err = sk_stream_error(sk, flags, err);
1006 TCP_CHECK_TIMER(sk);
1007 release_sock(sk);
1008 return err;
1009}
1010
1011/*
1012 * Handle reading urgent data. BSD has very simple semantics for
1013 * this, no blocking and very strange errors 8)
1014 */
1015
1016static int tcp_recv_urg(struct sock *sk, long timeo,
1017 struct msghdr *msg, int len, int flags,
1018 int *addr_len)
1019{
1020 struct tcp_sock *tp = tcp_sk(sk);
1021
1022 /* No URG data to read. */
1023 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1024 tp->urg_data == TCP_URG_READ)
1025 return -EINVAL; /* Yes this is right ! */
1026
1027 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1028 return -ENOTCONN;
1029
1030 if (tp->urg_data & TCP_URG_VALID) {
1031 int err = 0;
1032 char c = tp->urg_data;
1033
1034 if (!(flags & MSG_PEEK))
1035 tp->urg_data = TCP_URG_READ;
1036
1037 /* Read urgent data. */
1038 msg->msg_flags |= MSG_OOB;
1039
1040 if (len > 0) {
1041 if (!(flags & MSG_TRUNC))
1042 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1043 len = 1;
1044 } else
1045 msg->msg_flags |= MSG_TRUNC;
1046
1047 return err ? -EFAULT : len;
1048 }
1049
1050 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1051 return 0;
1052
1053 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1054 * the available implementations agree in this case:
1055 * this call should never block, independent of the
1056 * blocking state of the socket.
1057 * Mike <pall@rz.uni-karlsruhe.de>
1058 */
1059 return -EAGAIN;
1060}
1061
1062/* Clean up the receive buffer for full frames taken by the user,
1063 * then send an ACK if necessary. COPIED is the number of bytes
1064 * tcp_recvmsg has given to the user so far, it speeds up the
1065 * calculation of whether or not we must ACK for the sake of
1066 * a window update.
1067 */
Chris Leech0e4b4992006-05-23 18:00:16 -07001068void tcp_cleanup_rbuf(struct sock *sk, int copied)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069{
1070 struct tcp_sock *tp = tcp_sk(sk);
1071 int time_to_ack = 0;
1072
1073#if TCP_DEBUG
1074 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1075
1076 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1077#endif
1078
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001079 if (inet_csk_ack_scheduled(sk)) {
1080 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 /* Delayed ACKs frequently hit locked sockets during bulk
1082 * receive. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001083 if (icsk->icsk_ack.blocked ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 /* Once-per-two-segments ACK was not sent by tcp_input.c */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001085 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 /*
1087 * If this read emptied read buffer, we send ACK, if
1088 * connection is not bidirectional, user drained
1089 * receive buffer and there was a small segment
1090 * in queue.
1091 */
Alexey Kuznetsov1ef96962006-09-19 12:52:50 -07001092 (copied > 0 &&
1093 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1094 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1095 !icsk->icsk_ack.pingpong)) &&
1096 !atomic_read(&sk->sk_rmem_alloc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 time_to_ack = 1;
1098 }
1099
1100 /* We send an ACK if we can now advertise a non-zero window
1101 * which has been raised "significantly".
1102 *
1103 * Even if window raised up to infinity, do not send window open ACK
1104 * in states, where we will not receive more. It is useless.
1105 */
1106 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1107 __u32 rcv_window_now = tcp_receive_window(tp);
1108
1109 /* Optimize, __tcp_select_window() is not cheap. */
1110 if (2*rcv_window_now <= tp->window_clamp) {
1111 __u32 new_window = __tcp_select_window(sk);
1112
1113 /* Send ACK now, if this read freed lots of space
1114 * in our buffer. Certainly, new_window is new window.
1115 * We can advertise it now, if it is not less than current one.
1116 * "Lots" means "at least twice" here.
1117 */
1118 if (new_window && new_window >= 2 * rcv_window_now)
1119 time_to_ack = 1;
1120 }
1121 }
1122 if (time_to_ack)
1123 tcp_send_ack(sk);
1124}
1125
1126static void tcp_prequeue_process(struct sock *sk)
1127{
1128 struct sk_buff *skb;
1129 struct tcp_sock *tp = tcp_sk(sk);
1130
David S. Millerb03efcf2005-07-08 14:57:23 -07001131 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
1133 /* RX process wants to run with disabled BHs, though it is not
1134 * necessary */
1135 local_bh_disable();
1136 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1137 sk->sk_backlog_rcv(sk, skb);
1138 local_bh_enable();
1139
1140 /* Clear memory counter. */
1141 tp->ucopy.memory = 0;
1142}
1143
1144static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1145{
1146 struct sk_buff *skb;
1147 u32 offset;
1148
1149 skb_queue_walk(&sk->sk_receive_queue, skb) {
1150 offset = seq - TCP_SKB_CB(skb)->seq;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001151 if (tcp_hdr(skb)->syn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 offset--;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001153 if (offset < skb->len || tcp_hdr(skb)->fin) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 *off = offset;
1155 return skb;
1156 }
1157 }
1158 return NULL;
1159}
1160
1161/*
1162 * This routine provides an alternative to tcp_recvmsg() for routines
1163 * that would like to handle copying from skbuffs directly in 'sendfile'
1164 * fashion.
1165 * Note:
1166 * - It is assumed that the socket was locked by the caller.
1167 * - The routine does not block.
1168 * - At present, there is no support for reading OOB data
1169 * or for 'peeking' the socket using this routine
1170 * (although both would be easy to implement).
1171 */
1172int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1173 sk_read_actor_t recv_actor)
1174{
1175 struct sk_buff *skb;
1176 struct tcp_sock *tp = tcp_sk(sk);
1177 u32 seq = tp->copied_seq;
1178 u32 offset;
1179 int copied = 0;
1180
1181 if (sk->sk_state == TCP_LISTEN)
1182 return -ENOTCONN;
1183 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1184 if (offset < skb->len) {
1185 size_t used, len;
1186
1187 len = skb->len - offset;
1188 /* Stop reading if we hit a patch of urgent data */
1189 if (tp->urg_data) {
1190 u32 urg_offset = tp->urg_seq - seq;
1191 if (urg_offset < len)
1192 len = urg_offset;
1193 if (!len)
1194 break;
1195 }
1196 used = recv_actor(desc, skb, offset, len);
Jens Axboeddb61a52007-06-23 23:07:50 -07001197 if (used < 0) {
1198 if (!copied)
1199 copied = used;
1200 break;
1201 } else if (used <= len) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 seq += used;
1203 copied += used;
1204 offset += used;
1205 }
1206 if (offset != skb->len)
1207 break;
1208 }
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001209 if (tcp_hdr(skb)->fin) {
Chris Leech624d1162006-05-23 18:01:28 -07001210 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 ++seq;
1212 break;
1213 }
Chris Leech624d1162006-05-23 18:01:28 -07001214 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 if (!desc->count)
1216 break;
1217 }
1218 tp->copied_seq = seq;
1219
1220 tcp_rcv_space_adjust(sk);
1221
1222 /* Clean up data we have read: This will do ACK frames. */
Jens Axboeddb61a52007-06-23 23:07:50 -07001223 if (copied > 0)
Chris Leech0e4b4992006-05-23 18:00:16 -07001224 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 return copied;
1226}
1227
1228/*
1229 * This routine copies from a sock struct into the user buffer.
1230 *
1231 * Technical note: in 2.3 we work on _locked_ socket, so that
1232 * tricks with *seq access order and skb->users are not required.
1233 * Probably, code can be easily improved even more.
1234 */
1235
1236int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1237 size_t len, int nonblock, int flags, int *addr_len)
1238{
1239 struct tcp_sock *tp = tcp_sk(sk);
1240 int copied = 0;
1241 u32 peek_seq;
1242 u32 *seq;
1243 unsigned long used;
1244 int err;
1245 int target; /* Read at least this many bytes */
1246 long timeo;
1247 struct task_struct *user_recv = NULL;
Chris Leech1a2449a2006-05-23 18:05:53 -07001248 int copied_early = 0;
Chris Leech2b1244a2007-03-08 09:57:36 -08001249 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
1251 lock_sock(sk);
1252
1253 TCP_CHECK_TIMER(sk);
1254
1255 err = -ENOTCONN;
1256 if (sk->sk_state == TCP_LISTEN)
1257 goto out;
1258
1259 timeo = sock_rcvtimeo(sk, nonblock);
1260
1261 /* Urgent data needs to be handled specially. */
1262 if (flags & MSG_OOB)
1263 goto recv_urg;
1264
1265 seq = &tp->copied_seq;
1266 if (flags & MSG_PEEK) {
1267 peek_seq = tp->copied_seq;
1268 seq = &peek_seq;
1269 }
1270
1271 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1272
Chris Leech1a2449a2006-05-23 18:05:53 -07001273#ifdef CONFIG_NET_DMA
1274 tp->ucopy.dma_chan = NULL;
1275 preempt_disable();
Chris Leech2b1244a2007-03-08 09:57:36 -08001276 skb = skb_peek_tail(&sk->sk_receive_queue);
Andrew Mortone00c5d82007-03-08 09:57:36 -08001277 {
1278 int available = 0;
1279
1280 if (skb)
1281 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1282 if ((available < target) &&
1283 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1284 !sysctl_tcp_low_latency &&
1285 __get_cpu_var(softnet_data).net_dma) {
1286 preempt_enable_no_resched();
1287 tp->ucopy.pinned_list =
1288 dma_pin_iovec_pages(msg->msg_iov, len);
1289 } else {
1290 preempt_enable_no_resched();
1291 }
1292 }
Chris Leech1a2449a2006-05-23 18:05:53 -07001293#endif
1294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 u32 offset;
1297
1298 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1299 if (tp->urg_data && tp->urg_seq == *seq) {
1300 if (copied)
1301 break;
1302 if (signal_pending(current)) {
1303 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1304 break;
1305 }
1306 }
1307
1308 /* Next get a buffer. */
1309
1310 skb = skb_peek(&sk->sk_receive_queue);
1311 do {
1312 if (!skb)
1313 break;
1314
1315 /* Now that we have two receive queues this
1316 * shouldn't happen.
1317 */
1318 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1319 printk(KERN_INFO "recvmsg bug: copied %X "
1320 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1321 break;
1322 }
1323 offset = *seq - TCP_SKB_CB(skb)->seq;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001324 if (tcp_hdr(skb)->syn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 offset--;
1326 if (offset < skb->len)
1327 goto found_ok_skb;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001328 if (tcp_hdr(skb)->fin)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 goto found_fin_ok;
1330 BUG_TRAP(flags & MSG_PEEK);
1331 skb = skb->next;
1332 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1333
1334 /* Well, if we have backlog, try to process it now yet. */
1335
1336 if (copied >= target && !sk->sk_backlog.tail)
1337 break;
1338
1339 if (copied) {
1340 if (sk->sk_err ||
1341 sk->sk_state == TCP_CLOSE ||
1342 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1343 !timeo ||
1344 signal_pending(current) ||
1345 (flags & MSG_PEEK))
1346 break;
1347 } else {
1348 if (sock_flag(sk, SOCK_DONE))
1349 break;
1350
1351 if (sk->sk_err) {
1352 copied = sock_error(sk);
1353 break;
1354 }
1355
1356 if (sk->sk_shutdown & RCV_SHUTDOWN)
1357 break;
1358
1359 if (sk->sk_state == TCP_CLOSE) {
1360 if (!sock_flag(sk, SOCK_DONE)) {
1361 /* This occurs when user tries to read
1362 * from never connected socket.
1363 */
1364 copied = -ENOTCONN;
1365 break;
1366 }
1367 break;
1368 }
1369
1370 if (!timeo) {
1371 copied = -EAGAIN;
1372 break;
1373 }
1374
1375 if (signal_pending(current)) {
1376 copied = sock_intr_errno(timeo);
1377 break;
1378 }
1379 }
1380
Chris Leech0e4b4992006-05-23 18:00:16 -07001381 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
David S. Miller7df55122005-06-18 23:01:10 -07001383 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 /* Install new reader */
1385 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1386 user_recv = current;
1387 tp->ucopy.task = user_recv;
1388 tp->ucopy.iov = msg->msg_iov;
1389 }
1390
1391 tp->ucopy.len = len;
1392
1393 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1394 (flags & (MSG_PEEK | MSG_TRUNC)));
1395
1396 /* Ugly... If prequeue is not empty, we have to
1397 * process it before releasing socket, otherwise
1398 * order will be broken at second iteration.
1399 * More elegant solution is required!!!
1400 *
1401 * Look: we have the following (pseudo)queues:
1402 *
1403 * 1. packets in flight
1404 * 2. backlog
1405 * 3. prequeue
1406 * 4. receive_queue
1407 *
1408 * Each queue can be processed only if the next ones
1409 * are empty. At this point we have empty receive_queue.
1410 * But prequeue _can_ be not empty after 2nd iteration,
1411 * when we jumped to start of loop because backlog
1412 * processing added something to receive_queue.
1413 * We cannot release_sock(), because backlog contains
1414 * packets arrived _after_ prequeued ones.
1415 *
1416 * Shortly, algorithm is clear --- to process all
1417 * the queues in order. We could make it more directly,
1418 * requeueing packets from backlog to prequeue, if
1419 * is not empty. It is more elegant, but eats cycles,
1420 * unfortunately.
1421 */
David S. Millerb03efcf2005-07-08 14:57:23 -07001422 if (!skb_queue_empty(&tp->ucopy.prequeue))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 goto do_prequeue;
1424
1425 /* __ Set realtime policy in scheduler __ */
1426 }
1427
1428 if (copied >= target) {
1429 /* Do not sleep, just process backlog. */
1430 release_sock(sk);
1431 lock_sock(sk);
1432 } else
1433 sk_wait_data(sk, &timeo);
1434
Chris Leech1a2449a2006-05-23 18:05:53 -07001435#ifdef CONFIG_NET_DMA
1436 tp->ucopy.wakeup = 0;
1437#endif
1438
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 if (user_recv) {
1440 int chunk;
1441
1442 /* __ Restore normal policy in scheduler __ */
1443
1444 if ((chunk = len - tp->ucopy.len) != 0) {
1445 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1446 len -= chunk;
1447 copied += chunk;
1448 }
1449
1450 if (tp->rcv_nxt == tp->copied_seq &&
David S. Millerb03efcf2005-07-08 14:57:23 -07001451 !skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452do_prequeue:
1453 tcp_prequeue_process(sk);
1454
1455 if ((chunk = len - tp->ucopy.len) != 0) {
1456 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1457 len -= chunk;
1458 copied += chunk;
1459 }
1460 }
1461 }
1462 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1463 if (net_ratelimit())
1464 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001465 current->comm, task_pid_nr(current));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 peek_seq = tp->copied_seq;
1467 }
1468 continue;
1469
1470 found_ok_skb:
1471 /* Ok so how much can we use? */
1472 used = skb->len - offset;
1473 if (len < used)
1474 used = len;
1475
1476 /* Do we have urgent data here? */
1477 if (tp->urg_data) {
1478 u32 urg_offset = tp->urg_seq - *seq;
1479 if (urg_offset < used) {
1480 if (!urg_offset) {
1481 if (!sock_flag(sk, SOCK_URGINLINE)) {
1482 ++*seq;
1483 offset++;
1484 used--;
1485 if (!used)
1486 goto skip_copy;
1487 }
1488 } else
1489 used = urg_offset;
1490 }
1491 }
1492
1493 if (!(flags & MSG_TRUNC)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001494#ifdef CONFIG_NET_DMA
1495 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1496 tp->ucopy.dma_chan = get_softnet_dma();
1497
1498 if (tp->ucopy.dma_chan) {
1499 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1500 tp->ucopy.dma_chan, skb, offset,
1501 msg->msg_iov, used,
1502 tp->ucopy.pinned_list);
1503
1504 if (tp->ucopy.dma_cookie < 0) {
1505
1506 printk(KERN_ALERT "dma_cookie < 0\n");
1507
1508 /* Exception. Bailout! */
1509 if (!copied)
1510 copied = -EFAULT;
1511 break;
1512 }
1513 if ((offset + used) == skb->len)
1514 copied_early = 1;
1515
1516 } else
1517#endif
1518 {
1519 err = skb_copy_datagram_iovec(skb, offset,
1520 msg->msg_iov, used);
1521 if (err) {
1522 /* Exception. Bailout! */
1523 if (!copied)
1524 copied = -EFAULT;
1525 break;
1526 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 }
1528 }
1529
1530 *seq += used;
1531 copied += used;
1532 len -= used;
1533
1534 tcp_rcv_space_adjust(sk);
1535
1536skip_copy:
1537 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1538 tp->urg_data = 0;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001539 tcp_fast_path_check(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 }
1541 if (used + offset < skb->len)
1542 continue;
1543
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001544 if (tcp_hdr(skb)->fin)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 goto found_fin_ok;
Chris Leech1a2449a2006-05-23 18:05:53 -07001546 if (!(flags & MSG_PEEK)) {
1547 sk_eat_skb(sk, skb, copied_early);
1548 copied_early = 0;
1549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 continue;
1551
1552 found_fin_ok:
1553 /* Process the FIN. */
1554 ++*seq;
Chris Leech1a2449a2006-05-23 18:05:53 -07001555 if (!(flags & MSG_PEEK)) {
1556 sk_eat_skb(sk, skb, copied_early);
1557 copied_early = 0;
1558 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 break;
1560 } while (len > 0);
1561
1562 if (user_recv) {
David S. Millerb03efcf2005-07-08 14:57:23 -07001563 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 int chunk;
1565
1566 tp->ucopy.len = copied > 0 ? len : 0;
1567
1568 tcp_prequeue_process(sk);
1569
1570 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1571 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1572 len -= chunk;
1573 copied += chunk;
1574 }
1575 }
1576
1577 tp->ucopy.task = NULL;
1578 tp->ucopy.len = 0;
1579 }
1580
Chris Leech1a2449a2006-05-23 18:05:53 -07001581#ifdef CONFIG_NET_DMA
1582 if (tp->ucopy.dma_chan) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001583 dma_cookie_t done, used;
1584
1585 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1586
1587 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001588 tp->ucopy.dma_cookie, &done,
1589 &used) == DMA_IN_PROGRESS) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001590 /* do partial cleanup of sk_async_wait_queue */
1591 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1592 (dma_async_is_complete(skb->dma_cookie, done,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001593 used) == DMA_SUCCESS)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001594 __skb_dequeue(&sk->sk_async_wait_queue);
1595 kfree_skb(skb);
1596 }
1597 }
1598
1599 /* Safe to free early-copied skbs now */
1600 __skb_queue_purge(&sk->sk_async_wait_queue);
1601 dma_chan_put(tp->ucopy.dma_chan);
1602 tp->ucopy.dma_chan = NULL;
1603 }
1604 if (tp->ucopy.pinned_list) {
1605 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1606 tp->ucopy.pinned_list = NULL;
1607 }
1608#endif
1609
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 /* According to UNIX98, msg_name/msg_namelen are ignored
1611 * on connected socket. I was just happy when found this 8) --ANK
1612 */
1613
1614 /* Clean up data we have read: This will do ACK frames. */
Chris Leech0e4b4992006-05-23 18:00:16 -07001615 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
1617 TCP_CHECK_TIMER(sk);
1618 release_sock(sk);
1619 return copied;
1620
1621out:
1622 TCP_CHECK_TIMER(sk);
1623 release_sock(sk);
1624 return err;
1625
1626recv_urg:
1627 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1628 goto out;
1629}
1630
1631/*
1632 * State processing on a close. This implements the state shift for
1633 * sending our FIN frame. Note that we only send a FIN for some
1634 * states. A shutdown() may have already sent the FIN, or we may be
1635 * closed.
1636 */
1637
Arjan van de Ven9b5b5cf2005-11-29 16:21:38 -08001638static const unsigned char new_state[16] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 /* current state: new state: action: */
1640 /* (Invalid) */ TCP_CLOSE,
1641 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1642 /* TCP_SYN_SENT */ TCP_CLOSE,
1643 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1644 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1645 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1646 /* TCP_TIME_WAIT */ TCP_CLOSE,
1647 /* TCP_CLOSE */ TCP_CLOSE,
1648 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1649 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1650 /* TCP_LISTEN */ TCP_CLOSE,
1651 /* TCP_CLOSING */ TCP_CLOSING,
1652};
1653
1654static int tcp_close_state(struct sock *sk)
1655{
1656 int next = (int)new_state[sk->sk_state];
1657 int ns = next & TCP_STATE_MASK;
1658
1659 tcp_set_state(sk, ns);
1660
1661 return next & TCP_ACTION_FIN;
1662}
1663
1664/*
1665 * Shutdown the sending side of a connection. Much like close except
1666 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1667 */
1668
1669void tcp_shutdown(struct sock *sk, int how)
1670{
1671 /* We need to grab some memory, and put together a FIN,
1672 * and then put it into the queue to be sent.
1673 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1674 */
1675 if (!(how & SEND_SHUTDOWN))
1676 return;
1677
1678 /* If we've already sent a FIN, or it's a closed state, skip this. */
1679 if ((1 << sk->sk_state) &
1680 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1681 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1682 /* Clear out any half completed packets. FIN if needed. */
1683 if (tcp_close_state(sk))
1684 tcp_send_fin(sk);
1685 }
1686}
1687
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688void tcp_close(struct sock *sk, long timeout)
1689{
1690 struct sk_buff *skb;
1691 int data_was_unread = 0;
Herbert Xu75c2d9072006-05-03 23:31:35 -07001692 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
1694 lock_sock(sk);
1695 sk->sk_shutdown = SHUTDOWN_MASK;
1696
1697 if (sk->sk_state == TCP_LISTEN) {
1698 tcp_set_state(sk, TCP_CLOSE);
1699
1700 /* Special case. */
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001701 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
1703 goto adjudge_to_death;
1704 }
1705
1706 /* We need to flush the recv. buffs. We do this only on the
1707 * descriptor close, not protocol-sourced closes, because the
1708 * reader process may not have drained the data yet!
1709 */
1710 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1711 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001712 tcp_hdr(skb)->fin;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 data_was_unread += len;
1714 __kfree_skb(skb);
1715 }
1716
1717 sk_stream_mem_reclaim(sk);
1718
Gerrit Renker65bb7232007-04-28 21:21:46 -07001719 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1720 * data was lost. To witness the awful effects of the old behavior of
1721 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1722 * GET in an FTP client, suspend the process, wait for the client to
1723 * advertise a zero window, then kill -9 the FTP client, wheee...
1724 * Note: timeout is always zero in such a case.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 */
1726 if (data_was_unread) {
1727 /* Unread data was tossed, zap the connection. */
1728 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1729 tcp_set_state(sk, TCP_CLOSE);
1730 tcp_send_active_reset(sk, GFP_KERNEL);
1731 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1732 /* Check zero linger _after_ checking for unread data. */
1733 sk->sk_prot->disconnect(sk, 0);
1734 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1735 } else if (tcp_close_state(sk)) {
1736 /* We FIN if the application ate all the data before
1737 * zapping the connection.
1738 */
1739
1740 /* RED-PEN. Formally speaking, we have broken TCP state
1741 * machine. State transitions:
1742 *
1743 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1744 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1745 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1746 *
1747 * are legal only when FIN has been sent (i.e. in window),
1748 * rather than queued out of window. Purists blame.
1749 *
1750 * F.e. "RFC state" is ESTABLISHED,
1751 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1752 *
1753 * The visible declinations are that sometimes
1754 * we enter time-wait state, when it is not required really
1755 * (harmless), do not send active resets, when they are
1756 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1757 * they look as CLOSING or LAST_ACK for Linux)
1758 * Probably, I missed some more holelets.
1759 * --ANK
1760 */
1761 tcp_send_fin(sk);
1762 }
1763
1764 sk_stream_wait_close(sk, timeout);
1765
1766adjudge_to_death:
Herbert Xu75c2d9072006-05-03 23:31:35 -07001767 state = sk->sk_state;
1768 sock_hold(sk);
1769 sock_orphan(sk);
1770 atomic_inc(sk->sk_prot->orphan_count);
1771
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 /* It is the last release_sock in its life. It will remove backlog. */
1773 release_sock(sk);
1774
1775
1776 /* Now socket is owned by kernel and we acquire BH lock
1777 to finish close. No need to check for user refs.
1778 */
1779 local_bh_disable();
1780 bh_lock_sock(sk);
1781 BUG_TRAP(!sock_owned_by_user(sk));
1782
Herbert Xu75c2d9072006-05-03 23:31:35 -07001783 /* Have we already been destroyed by a softirq or backlog? */
1784 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1785 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
1787 /* This is a (useful) BSD violating of the RFC. There is a
1788 * problem with TCP as specified in that the other end could
1789 * keep a socket open forever with no application left this end.
1790 * We use a 3 minute timeout (about the same as BSD) then kill
1791 * our end. If they send after that then tough - BUT: long enough
1792 * that we won't make the old 4*rto = almost no time - whoops
1793 * reset mistake.
1794 *
1795 * Nope, it was not mistake. It is really desired behaviour
1796 * f.e. on http servers, when such sockets are useless, but
1797 * consume significant resources. Let's do it with special
1798 * linger2 option. --ANK
1799 */
1800
1801 if (sk->sk_state == TCP_FIN_WAIT2) {
1802 struct tcp_sock *tp = tcp_sk(sk);
1803 if (tp->linger2 < 0) {
1804 tcp_set_state(sk, TCP_CLOSE);
1805 tcp_send_active_reset(sk, GFP_ATOMIC);
1806 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1807 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001808 const int tmo = tcp_fin_time(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
1810 if (tmo > TCP_TIMEWAIT_LEN) {
David S. Miller52499af2006-07-31 22:32:09 -07001811 inet_csk_reset_keepalive_timer(sk,
1812 tmo - TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1815 goto out;
1816 }
1817 }
1818 }
1819 if (sk->sk_state != TCP_CLOSE) {
1820 sk_stream_mem_reclaim(sk);
Pavel Emelianove4fd5da2007-05-29 13:19:18 -07001821 if (tcp_too_many_orphans(sk,
1822 atomic_read(sk->sk_prot->orphan_count))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 if (net_ratelimit())
1824 printk(KERN_INFO "TCP: too many of orphaned "
1825 "sockets\n");
1826 tcp_set_state(sk, TCP_CLOSE);
1827 tcp_send_active_reset(sk, GFP_ATOMIC);
1828 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1829 }
1830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831
1832 if (sk->sk_state == TCP_CLOSE)
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001833 inet_csk_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 /* Otherwise, socket is reprieved until protocol close. */
1835
1836out:
1837 bh_unlock_sock(sk);
1838 local_bh_enable();
1839 sock_put(sk);
1840}
1841
1842/* These states need RST on ABORT according to RFC793 */
1843
1844static inline int tcp_need_reset(int state)
1845{
1846 return (1 << state) &
1847 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1848 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1849}
1850
1851int tcp_disconnect(struct sock *sk, int flags)
1852{
1853 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001854 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 struct tcp_sock *tp = tcp_sk(sk);
1856 int err = 0;
1857 int old_state = sk->sk_state;
1858
1859 if (old_state != TCP_CLOSE)
1860 tcp_set_state(sk, TCP_CLOSE);
1861
1862 /* ABORT function of RFC793 */
1863 if (old_state == TCP_LISTEN) {
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001864 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 } else if (tcp_need_reset(old_state) ||
1866 (tp->snd_nxt != tp->write_seq &&
1867 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001868 /* The last check adjusts for discrepancy of Linux wrt. RFC
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 * states
1870 */
1871 tcp_send_active_reset(sk, gfp_any());
1872 sk->sk_err = ECONNRESET;
1873 } else if (old_state == TCP_SYN_SENT)
1874 sk->sk_err = ECONNRESET;
1875
1876 tcp_clear_xmit_timers(sk);
1877 __skb_queue_purge(&sk->sk_receive_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001878 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 __skb_queue_purge(&tp->out_of_order_queue);
Chris Leech1a2449a2006-05-23 18:05:53 -07001880#ifdef CONFIG_NET_DMA
1881 __skb_queue_purge(&sk->sk_async_wait_queue);
1882#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883
1884 inet->dport = 0;
1885
1886 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1887 inet_reset_saddr(sk);
1888
1889 sk->sk_shutdown = 0;
1890 sock_reset_flag(sk, SOCK_DONE);
1891 tp->srtt = 0;
1892 if ((tp->write_seq += tp->max_window + 2) == 0)
1893 tp->write_seq = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001894 icsk->icsk_backoff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 tp->snd_cwnd = 2;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001896 icsk->icsk_probes_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 tp->packets_out = 0;
1898 tp->snd_ssthresh = 0x7fffffff;
1899 tp->snd_cwnd_cnt = 0;
Stephen Hemminger9772efb2005-11-10 17:09:53 -08001900 tp->bytes_acked = 0;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001901 tcp_set_ca_state(sk, TCP_CA_Open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 tcp_clear_retrans(tp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001903 inet_csk_delack_init(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08001904 tcp_init_send_head(sk);
Srinivas Ajib40b4f72007-05-03 17:32:28 -07001905 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 __sk_dst_reset(sk);
1907
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001908 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
1910 sk->sk_error_report(sk);
1911 return err;
1912}
1913
1914/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 * Socket option code for TCP.
1916 */
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001917static int do_tcp_setsockopt(struct sock *sk, int level,
1918 int optname, char __user *optval, int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919{
1920 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001921 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 int val;
1923 int err = 0;
1924
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001925 /* This is a string value all the others are int's */
1926 if (optname == TCP_CONGESTION) {
1927 char name[TCP_CA_NAME_MAX];
1928
1929 if (optlen < 1)
1930 return -EINVAL;
1931
1932 val = strncpy_from_user(name, optval,
1933 min(TCP_CA_NAME_MAX-1, optlen));
1934 if (val < 0)
1935 return -EFAULT;
1936 name[val] = 0;
1937
1938 lock_sock(sk);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001939 err = tcp_set_congestion_control(sk, name);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001940 release_sock(sk);
1941 return err;
1942 }
1943
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 if (optlen < sizeof(int))
1945 return -EINVAL;
1946
1947 if (get_user(val, (int __user *)optval))
1948 return -EFAULT;
1949
1950 lock_sock(sk);
1951
1952 switch (optname) {
1953 case TCP_MAXSEG:
1954 /* Values greater than interface MTU won't take effect. However
1955 * at the point when this call is done we typically don't yet
1956 * know which interface is going to be used */
1957 if (val < 8 || val > MAX_TCP_WINDOW) {
1958 err = -EINVAL;
1959 break;
1960 }
1961 tp->rx_opt.user_mss = val;
1962 break;
1963
1964 case TCP_NODELAY:
1965 if (val) {
1966 /* TCP_NODELAY is weaker than TCP_CORK, so that
1967 * this option on corked socket is remembered, but
1968 * it is not activated until cork is cleared.
1969 *
1970 * However, when TCP_NODELAY is set we make
1971 * an explicit push, which overrides even TCP_CORK
1972 * for currently queued segments.
1973 */
1974 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001975 tcp_push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 } else {
1977 tp->nonagle &= ~TCP_NAGLE_OFF;
1978 }
1979 break;
1980
1981 case TCP_CORK:
1982 /* When set indicates to always queue non-full frames.
1983 * Later the user clears this option and we transmit
1984 * any pending partial frames in the queue. This is
1985 * meant to be used alongside sendfile() to get properly
1986 * filled frames when the user (for example) must write
1987 * out headers with a write() call first and then use
1988 * sendfile to send out the data parts.
1989 *
1990 * TCP_CORK can be set together with TCP_NODELAY and it is
1991 * stronger than TCP_NODELAY.
1992 */
1993 if (val) {
1994 tp->nonagle |= TCP_NAGLE_CORK;
1995 } else {
1996 tp->nonagle &= ~TCP_NAGLE_CORK;
1997 if (tp->nonagle&TCP_NAGLE_OFF)
1998 tp->nonagle |= TCP_NAGLE_PUSH;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001999 tcp_push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 }
2001 break;
2002
2003 case TCP_KEEPIDLE:
2004 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2005 err = -EINVAL;
2006 else {
2007 tp->keepalive_time = val * HZ;
2008 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2009 !((1 << sk->sk_state) &
2010 (TCPF_CLOSE | TCPF_LISTEN))) {
2011 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
2012 if (tp->keepalive_time > elapsed)
2013 elapsed = tp->keepalive_time - elapsed;
2014 else
2015 elapsed = 0;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002016 inet_csk_reset_keepalive_timer(sk, elapsed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 }
2018 }
2019 break;
2020 case TCP_KEEPINTVL:
2021 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2022 err = -EINVAL;
2023 else
2024 tp->keepalive_intvl = val * HZ;
2025 break;
2026 case TCP_KEEPCNT:
2027 if (val < 1 || val > MAX_TCP_KEEPCNT)
2028 err = -EINVAL;
2029 else
2030 tp->keepalive_probes = val;
2031 break;
2032 case TCP_SYNCNT:
2033 if (val < 1 || val > MAX_TCP_SYNCNT)
2034 err = -EINVAL;
2035 else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002036 icsk->icsk_syn_retries = val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 break;
2038
2039 case TCP_LINGER2:
2040 if (val < 0)
2041 tp->linger2 = -1;
2042 else if (val > sysctl_tcp_fin_timeout / HZ)
2043 tp->linger2 = 0;
2044 else
2045 tp->linger2 = val * HZ;
2046 break;
2047
2048 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002049 icsk->icsk_accept_queue.rskq_defer_accept = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 if (val > 0) {
2051 /* Translate value in seconds to number of
2052 * retransmits */
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002053 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 val > ((TCP_TIMEOUT_INIT / HZ) <<
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002055 icsk->icsk_accept_queue.rskq_defer_accept))
2056 icsk->icsk_accept_queue.rskq_defer_accept++;
2057 icsk->icsk_accept_queue.rskq_defer_accept++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 }
2059 break;
2060
2061 case TCP_WINDOW_CLAMP:
2062 if (!val) {
2063 if (sk->sk_state != TCP_CLOSE) {
2064 err = -EINVAL;
2065 break;
2066 }
2067 tp->window_clamp = 0;
2068 } else
2069 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2070 SOCK_MIN_RCVBUF / 2 : val;
2071 break;
2072
2073 case TCP_QUICKACK:
2074 if (!val) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002075 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002077 icsk->icsk_ack.pingpong = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 if ((1 << sk->sk_state) &
2079 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002080 inet_csk_ack_scheduled(sk)) {
2081 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
Chris Leech0e4b4992006-05-23 18:00:16 -07002082 tcp_cleanup_rbuf(sk, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 if (!(val & 1))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002084 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 }
2086 }
2087 break;
2088
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002089#ifdef CONFIG_TCP_MD5SIG
2090 case TCP_MD5SIG:
2091 /* Read the IP->Key mappings from userspace */
2092 err = tp->af_specific->md5_parse(sk, optval, optlen);
2093 break;
2094#endif
2095
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 default:
2097 err = -ENOPROTOOPT;
2098 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002099 }
2100
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 release_sock(sk);
2102 return err;
2103}
2104
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002105int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2106 int optlen)
2107{
2108 struct inet_connection_sock *icsk = inet_csk(sk);
2109
2110 if (level != SOL_TCP)
2111 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2112 optval, optlen);
2113 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2114}
2115
2116#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002117int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2118 char __user *optval, int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002119{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08002120 if (level != SOL_TCP)
2121 return inet_csk_compat_setsockopt(sk, level, optname,
2122 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002123 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2124}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002125
2126EXPORT_SYMBOL(compat_tcp_setsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002127#endif
2128
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129/* Return information about state of tcp endpoint in API format. */
2130void tcp_get_info(struct sock *sk, struct tcp_info *info)
2131{
2132 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002133 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 u32 now = tcp_time_stamp;
2135
2136 memset(info, 0, sizeof(*info));
2137
2138 info->tcpi_state = sk->sk_state;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002139 info->tcpi_ca_state = icsk->icsk_ca_state;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002140 info->tcpi_retransmits = icsk->icsk_retransmits;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002141 info->tcpi_probes = icsk->icsk_probes_out;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002142 info->tcpi_backoff = icsk->icsk_backoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
2144 if (tp->rx_opt.tstamp_ok)
2145 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
Ilpo Järvinene60402d2007-08-09 15:14:46 +03002146 if (tcp_is_sack(tp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 info->tcpi_options |= TCPI_OPT_SACK;
2148 if (tp->rx_opt.wscale_ok) {
2149 info->tcpi_options |= TCPI_OPT_WSCALE;
2150 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2151 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153
2154 if (tp->ecn_flags&TCP_ECN_OK)
2155 info->tcpi_options |= TCPI_OPT_ECN;
2156
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002157 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2158 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002159 info->tcpi_snd_mss = tp->mss_cache;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002160 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161
Rick Jones5ee3afb2007-09-18 13:26:31 -07002162 if (sk->sk_state == TCP_LISTEN) {
2163 info->tcpi_unacked = sk->sk_ack_backlog;
2164 info->tcpi_sacked = sk->sk_max_ack_backlog;
2165 } else {
2166 info->tcpi_unacked = tp->packets_out;
2167 info->tcpi_sacked = tp->sacked_out;
2168 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 info->tcpi_lost = tp->lost_out;
2170 info->tcpi_retrans = tp->retrans_out;
2171 info->tcpi_fackets = tp->fackets_out;
2172
2173 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002174 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2176
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002177 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2179 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2180 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2181 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2182 info->tcpi_snd_cwnd = tp->snd_cwnd;
2183 info->tcpi_advmss = tp->advmss;
2184 info->tcpi_reordering = tp->reordering;
2185
2186 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2187 info->tcpi_rcv_space = tp->rcvq_space.space;
2188
2189 info->tcpi_total_retrans = tp->total_retrans;
2190}
2191
2192EXPORT_SYMBOL_GPL(tcp_get_info);
2193
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002194static int do_tcp_getsockopt(struct sock *sk, int level,
2195 int optname, char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196{
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002197 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 struct tcp_sock *tp = tcp_sk(sk);
2199 int val, len;
2200
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 if (get_user(len, optlen))
2202 return -EFAULT;
2203
2204 len = min_t(unsigned int, len, sizeof(int));
2205
2206 if (len < 0)
2207 return -EINVAL;
2208
2209 switch (optname) {
2210 case TCP_MAXSEG:
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002211 val = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2213 val = tp->rx_opt.user_mss;
2214 break;
2215 case TCP_NODELAY:
2216 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2217 break;
2218 case TCP_CORK:
2219 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2220 break;
2221 case TCP_KEEPIDLE:
2222 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2223 break;
2224 case TCP_KEEPINTVL:
2225 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2226 break;
2227 case TCP_KEEPCNT:
2228 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2229 break;
2230 case TCP_SYNCNT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002231 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 break;
2233 case TCP_LINGER2:
2234 val = tp->linger2;
2235 if (val >= 0)
2236 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2237 break;
2238 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002239 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2240 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 break;
2242 case TCP_WINDOW_CLAMP:
2243 val = tp->window_clamp;
2244 break;
2245 case TCP_INFO: {
2246 struct tcp_info info;
2247
2248 if (get_user(len, optlen))
2249 return -EFAULT;
2250
2251 tcp_get_info(sk, &info);
2252
2253 len = min_t(unsigned int, len, sizeof(info));
2254 if (put_user(len, optlen))
2255 return -EFAULT;
2256 if (copy_to_user(optval, &info, len))
2257 return -EFAULT;
2258 return 0;
2259 }
2260 case TCP_QUICKACK:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002261 val = !icsk->icsk_ack.pingpong;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 break;
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002263
2264 case TCP_CONGESTION:
2265 if (get_user(len, optlen))
2266 return -EFAULT;
2267 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2268 if (put_user(len, optlen))
2269 return -EFAULT;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002270 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002271 return -EFAULT;
2272 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 default:
2274 return -ENOPROTOOPT;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
2277 if (put_user(len, optlen))
2278 return -EFAULT;
2279 if (copy_to_user(optval, &val, len))
2280 return -EFAULT;
2281 return 0;
2282}
2283
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002284int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2285 int __user *optlen)
2286{
2287 struct inet_connection_sock *icsk = inet_csk(sk);
2288
2289 if (level != SOL_TCP)
2290 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2291 optval, optlen);
2292 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2293}
2294
2295#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002296int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2297 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002298{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08002299 if (level != SOL_TCP)
2300 return inet_csk_compat_getsockopt(sk, level, optname,
2301 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002302 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2303}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002304
2305EXPORT_SYMBOL(compat_tcp_getsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002306#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
Herbert Xu576a30e2006-06-27 13:22:38 -07002308struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
Herbert Xuf4c50d92006-06-22 03:02:40 -07002309{
2310 struct sk_buff *segs = ERR_PTR(-EINVAL);
2311 struct tcphdr *th;
2312 unsigned thlen;
2313 unsigned int seq;
Al Virod3bc23e2006-11-14 21:24:49 -08002314 __be32 delta;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002315 unsigned int oldlen;
2316 unsigned int len;
2317
2318 if (!pskb_may_pull(skb, sizeof(*th)))
2319 goto out;
2320
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002321 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002322 thlen = th->doff * 4;
2323 if (thlen < sizeof(*th))
2324 goto out;
2325
2326 if (!pskb_may_pull(skb, thlen))
2327 goto out;
2328
Herbert Xu0718bcc2006-06-25 23:55:46 -07002329 oldlen = (u16)~skb->len;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002330 __skb_pull(skb, thlen);
2331
Herbert Xu3820c3f2006-06-29 20:11:25 -07002332 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2333 /* Packet is from an untrusted source, reset gso_segs. */
Herbert Xubbcf4672006-07-03 19:38:35 -07002334 int type = skb_shinfo(skb)->gso_type;
2335 int mss;
Herbert Xu3820c3f2006-06-29 20:11:25 -07002336
Herbert Xubbcf4672006-07-03 19:38:35 -07002337 if (unlikely(type &
2338 ~(SKB_GSO_TCPV4 |
2339 SKB_GSO_DODGY |
2340 SKB_GSO_TCP_ECN |
2341 SKB_GSO_TCPV6 |
2342 0) ||
2343 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2344 goto out;
2345
2346 mss = skb_shinfo(skb)->gso_size;
Ilpo Järvinen172589c2007-08-28 15:50:33 -07002347 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
Herbert Xu3820c3f2006-06-29 20:11:25 -07002348
2349 segs = NULL;
2350 goto out;
2351 }
2352
Herbert Xu576a30e2006-06-27 13:22:38 -07002353 segs = skb_segment(skb, features);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002354 if (IS_ERR(segs))
2355 goto out;
2356
2357 len = skb_shinfo(skb)->gso_size;
Herbert Xu0718bcc2006-06-25 23:55:46 -07002358 delta = htonl(oldlen + (thlen + len));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002359
2360 skb = segs;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002361 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002362 seq = ntohl(th->seq);
2363
2364 do {
2365 th->fin = th->psh = 0;
2366
Al Virod3bc23e2006-11-14 21:24:49 -08002367 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2368 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002369 if (skb->ip_summed != CHECKSUM_PARTIAL)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002370 th->check =
2371 csum_fold(csum_partial(skb_transport_header(skb),
2372 thlen, skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002373
2374 seq += len;
2375 skb = skb->next;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002376 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002377
2378 th->seq = htonl(seq);
2379 th->cwr = 0;
2380 } while (skb->next);
2381
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07002382 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002383 skb->data_len);
Al Virod3bc23e2006-11-14 21:24:49 -08002384 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2385 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002386 if (skb->ip_summed != CHECKSUM_PARTIAL)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002387 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2388 thlen, skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002389
2390out:
2391 return segs;
2392}
Herbert Xuadcfc7d2006-06-30 13:36:15 -07002393EXPORT_SYMBOL(tcp_tso_segment);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002394
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002395#ifdef CONFIG_TCP_MD5SIG
2396static unsigned long tcp_md5sig_users;
2397static struct tcp_md5sig_pool **tcp_md5sig_pool;
2398static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2399
2400static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2401{
2402 int cpu;
2403 for_each_possible_cpu(cpu) {
2404 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2405 if (p) {
2406 if (p->md5_desc.tfm)
2407 crypto_free_hash(p->md5_desc.tfm);
2408 kfree(p);
2409 p = NULL;
2410 }
2411 }
2412 free_percpu(pool);
2413}
2414
2415void tcp_free_md5sig_pool(void)
2416{
2417 struct tcp_md5sig_pool **pool = NULL;
2418
David S. Miller2c4f6212007-02-20 23:51:47 -08002419 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002420 if (--tcp_md5sig_users == 0) {
2421 pool = tcp_md5sig_pool;
2422 tcp_md5sig_pool = NULL;
2423 }
David S. Miller2c4f6212007-02-20 23:51:47 -08002424 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002425 if (pool)
2426 __tcp_free_md5sig_pool(pool);
2427}
2428
2429EXPORT_SYMBOL(tcp_free_md5sig_pool);
2430
Adrian Bunkf5b99bc2006-11-30 17:22:29 -08002431static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002432{
2433 int cpu;
2434 struct tcp_md5sig_pool **pool;
2435
2436 pool = alloc_percpu(struct tcp_md5sig_pool *);
2437 if (!pool)
2438 return NULL;
2439
2440 for_each_possible_cpu(cpu) {
2441 struct tcp_md5sig_pool *p;
2442 struct crypto_hash *hash;
2443
2444 p = kzalloc(sizeof(*p), GFP_KERNEL);
2445 if (!p)
2446 goto out_free;
2447 *per_cpu_ptr(pool, cpu) = p;
2448
2449 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2450 if (!hash || IS_ERR(hash))
2451 goto out_free;
2452
2453 p->md5_desc.tfm = hash;
2454 }
2455 return pool;
2456out_free:
2457 __tcp_free_md5sig_pool(pool);
2458 return NULL;
2459}
2460
2461struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2462{
2463 struct tcp_md5sig_pool **pool;
2464 int alloc = 0;
2465
2466retry:
David S. Miller2c4f6212007-02-20 23:51:47 -08002467 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002468 pool = tcp_md5sig_pool;
2469 if (tcp_md5sig_users++ == 0) {
2470 alloc = 1;
David S. Miller2c4f6212007-02-20 23:51:47 -08002471 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002472 } else if (!pool) {
2473 tcp_md5sig_users--;
David S. Miller2c4f6212007-02-20 23:51:47 -08002474 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002475 cpu_relax();
2476 goto retry;
2477 } else
David S. Miller2c4f6212007-02-20 23:51:47 -08002478 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002479
2480 if (alloc) {
2481 /* we cannot hold spinlock here because this may sleep. */
2482 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
David S. Miller2c4f6212007-02-20 23:51:47 -08002483 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002484 if (!p) {
2485 tcp_md5sig_users--;
David S. Miller2c4f6212007-02-20 23:51:47 -08002486 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002487 return NULL;
2488 }
2489 pool = tcp_md5sig_pool;
2490 if (pool) {
2491 /* oops, it has already been assigned. */
David S. Miller2c4f6212007-02-20 23:51:47 -08002492 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002493 __tcp_free_md5sig_pool(p);
2494 } else {
2495 tcp_md5sig_pool = pool = p;
David S. Miller2c4f6212007-02-20 23:51:47 -08002496 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002497 }
2498 }
2499 return pool;
2500}
2501
2502EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2503
2504struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2505{
2506 struct tcp_md5sig_pool **p;
David S. Miller2c4f6212007-02-20 23:51:47 -08002507 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002508 p = tcp_md5sig_pool;
2509 if (p)
2510 tcp_md5sig_users++;
David S. Miller2c4f6212007-02-20 23:51:47 -08002511 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002512 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2513}
2514
2515EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2516
David S. Miller6931ba72006-12-13 16:25:44 -08002517void __tcp_put_md5sig_pool(void)
2518{
2519 tcp_free_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002520}
2521
2522EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2523#endif
2524
Andi Kleen4ac02ba2007-04-20 17:11:46 -07002525void tcp_done(struct sock *sk)
2526{
2527 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2528 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
2529
2530 tcp_set_state(sk, TCP_CLOSE);
2531 tcp_clear_xmit_timers(sk);
2532
2533 sk->sk_shutdown = SHUTDOWN_MASK;
2534
2535 if (!sock_flag(sk, SOCK_DEAD))
2536 sk->sk_state_change(sk);
2537 else
2538 inet_csk_destroy_sock(sk);
2539}
2540EXPORT_SYMBOL_GPL(tcp_done);
2541
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542extern void __skb_cb_too_small_for_tcp(int, int);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002543extern struct tcp_congestion_ops tcp_reno;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544
2545static __initdata unsigned long thash_entries;
2546static int __init set_thash_entries(char *str)
2547{
2548 if (!str)
2549 return 0;
2550 thash_entries = simple_strtoul(str, &str, 0);
2551 return 1;
2552}
2553__setup("thash_entries=", set_thash_entries);
2554
2555void __init tcp_init(void)
2556{
2557 struct sk_buff *skb = NULL;
John Heffner7b4f4b52006-03-25 01:34:07 -08002558 unsigned long limit;
2559 int order, i, max_share;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560
2561 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2562 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2563 sizeof(skb->cb));
2564
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002565 tcp_hashinfo.bind_bucket_cachep =
2566 kmem_cache_create("tcp_bind_bucket",
2567 sizeof(struct inet_bind_bucket), 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002568 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 /* Size and allocate the main established and bind bucket
2571 * hash tables.
2572 *
2573 * The methodology is similar to that of the buffer cache.
2574 */
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002575 tcp_hashinfo.ehash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 alloc_large_system_hash("TCP established",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002577 sizeof(struct inet_ehash_bucket),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578 thash_entries,
2579 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002580 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002581 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002582 &tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 NULL,
Jean Delvare0ccfe612007-10-30 00:59:25 -07002584 thash_entries ? 0 : 512 * 1024);
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002585 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2586 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002587 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002588 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 }
Eric Dumazet230140c2007-11-07 02:40:20 -08002590 if (inet_ehash_locks_alloc(&tcp_hashinfo))
2591 panic("TCP: failed to alloc ehash_locks");
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002592 tcp_hashinfo.bhash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 alloc_large_system_hash("TCP bind",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002594 sizeof(struct inet_bind_hashbucket),
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002595 tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002597 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002598 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002599 &tcp_hashinfo.bhash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 NULL,
2601 64 * 1024);
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002602 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2603 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2604 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2605 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 }
2607
2608 /* Try to be a bit smarter and adjust defaults depending
2609 * on available memory.
2610 */
2611 for (order = 0; ((1 << order) << PAGE_SHIFT) <
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002612 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 order++)
2614 ;
Andi Kleene7626482005-06-13 14:24:52 -07002615 if (order >= 4) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002616 tcp_death_row.sysctl_max_tw_buckets = 180000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 sysctl_tcp_max_orphans = 4096 << (order - 4);
2618 sysctl_max_syn_backlog = 1024;
2619 } else if (order < 3) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002620 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 sysctl_tcp_max_orphans >>= (3 - order);
2622 sysctl_max_syn_backlog = 128;
2623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624
John Heffner53cdcc02007-03-16 15:04:03 -07002625 /* Set the pressure threshold to be a fraction of global memory that
2626 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2627 * memory, with a floor of 128 pages.
2628 */
2629 limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2630 limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2631 limit = max(limit, 128UL);
2632 sysctl_tcp_mem[0] = limit / 4 * 3;
2633 sysctl_tcp_mem[1] = limit;
John Heffner52bf3762006-11-14 20:25:17 -08002634 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635
John Heffner53cdcc02007-03-16 15:04:03 -07002636 /* Set per-socket limits to no more than 1/128 the pressure threshold */
John Heffner7b4f4b52006-03-25 01:34:07 -08002637 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2638 max_share = min(4UL*1024*1024, limit);
2639
2640 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2641 sysctl_tcp_wmem[1] = 16*1024;
2642 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2643
2644 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2645 sysctl_tcp_rmem[1] = 87380;
2646 sysctl_tcp_rmem[2] = max(87380, max_share);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647
2648 printk(KERN_INFO "TCP: Hash tables configured "
2649 "(established %d bind %d)\n",
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002650 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07002651
2652 tcp_register_congestion_control(&tcp_reno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653}
2654
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655EXPORT_SYMBOL(tcp_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656EXPORT_SYMBOL(tcp_disconnect);
2657EXPORT_SYMBOL(tcp_getsockopt);
2658EXPORT_SYMBOL(tcp_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659EXPORT_SYMBOL(tcp_poll);
2660EXPORT_SYMBOL(tcp_read_sock);
2661EXPORT_SYMBOL(tcp_recvmsg);
2662EXPORT_SYMBOL(tcp_sendmsg);
Jens Axboe9c55e012007-11-06 23:30:13 -08002663EXPORT_SYMBOL(tcp_splice_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664EXPORT_SYMBOL(tcp_sendpage);
2665EXPORT_SYMBOL(tcp_setsockopt);
2666EXPORT_SYMBOL(tcp_shutdown);
2667EXPORT_SYMBOL(tcp_statistics);