blob: b05b9b6ddb8700989e63e8597f6946ffd205bdba [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Randy Dunlap4fc268d2006-01-11 12:17:47 -080092#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <linux/errno.h>
94#include <linux/types.h>
95#include <linux/socket.h>
96#include <linux/in.h>
97#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/module.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/sched.h>
102#include <linux/timer.h>
103#include <linux/string.h>
104#include <linux/sockios.h>
105#include <linux/net.h>
106#include <linux/mm.h>
107#include <linux/slab.h>
108#include <linux/interrupt.h>
109#include <linux/poll.h>
110#include <linux/tcp.h>
111#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400112#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000113#include <linux/user_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115#include <asm/uaccess.h>
116#include <asm/system.h>
117
118#include <linux/netdevice.h>
119#include <net/protocol.h>
120#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200121#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700122#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000124#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#include <net/xfrm.h>
126#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700127#include <net/cls_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129#include <linux/filter.h>
130
131#ifdef CONFIG_INET
132#include <net/tcp.h>
133#endif
134
Ingo Molnarda21f242006-07-03 00:25:12 -0700135/*
136 * Each address family might have different locking rules, so we have
137 * one slock key per address family:
138 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700139static struct lock_class_key af_family_keys[AF_MAX];
140static struct lock_class_key af_family_slock_keys[AF_MAX];
141
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700142/*
143 * Make lock validator output more readable. (we pre-construct these
144 * strings build-time, so that runtime initialization of socket
145 * locks is fast):
146 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700147static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700148 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
149 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
150 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
151 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
152 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
153 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
154 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800155 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700156 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800157 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700158 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700159 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Alex Lorcafe331472010-06-07 01:01:22 -0700160 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700161 "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700162};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700163static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700164 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
165 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
166 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
167 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
168 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
169 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
170 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800171 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700172 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800173 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700174 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700175 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Alex Lorcafe331472010-06-07 01:01:22 -0700176 "slock-AF_IEEE802154", "slock-AF_CAIF" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700177 "slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700178};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700179static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700180 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
181 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
182 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
183 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
184 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
185 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
186 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800187 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700188 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700189 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700190 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700191 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Alex Lorcafe331472010-06-07 01:01:22 -0700192 "clock-AF_IEEE802154", "clock-AF_CAIF" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700193 "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700194};
Ingo Molnarda21f242006-07-03 00:25:12 -0700195
196/*
197 * sk_callback_lock locking rules are per-address-family,
198 * so split the lock classes by using a per-AF key:
199 */
200static struct lock_class_key af_callback_keys[AF_MAX];
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202/* Take into consideration the size of the struct sk_buff overhead in the
203 * determination of these values, since that is non-constant across
204 * platforms. This makes socket queueing behavior and performance
205 * not depend upon such differences.
206 */
207#define _SK_MEM_PACKETS 256
208#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
209#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
210#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
211
212/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700213__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
214__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
215__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
216__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218/* Maximal space eaten by iovec or ancilliary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700219int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000220EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Herbert Xuf8451722010-05-24 00:12:34 -0700222#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
223int net_cls_subsys_id = -1;
224EXPORT_SYMBOL_GPL(net_cls_subsys_id);
225#endif
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
228{
229 struct timeval tv;
230
231 if (optlen < sizeof(tv))
232 return -EINVAL;
233 if (copy_from_user(&tv, optval, sizeof(tv)))
234 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700235 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
236 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Vasily Averinba780732007-05-24 16:58:54 -0700238 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700239 static int warned __read_mostly;
240
Vasily Averinba780732007-05-24 16:58:54 -0700241 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700242 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700243 warned++;
244 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
245 "tries to set negative timeout\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700246 current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700247 }
Vasily Averinba780732007-05-24 16:58:54 -0700248 return 0;
249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 *timeo_p = MAX_SCHEDULE_TIMEOUT;
251 if (tv.tv_sec == 0 && tv.tv_usec == 0)
252 return 0;
253 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
254 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
255 return 0;
256}
257
258static void sock_warn_obsolete_bsdism(const char *name)
259{
260 static int warned;
261 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900262 if (strcmp(warncomm, current->comm) && warned < 5) {
263 strcpy(warncomm, current->comm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 printk(KERN_WARNING "process `%s' is using obsolete "
265 "%s SO_BSDCOMPAT\n", warncomm, name);
266 warned++;
267 }
268}
269
Patrick Ohly20d49472009-02-12 05:03:38 +0000270static void sock_disable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900271{
Patrick Ohly20d49472009-02-12 05:03:38 +0000272 if (sock_flag(sk, flag)) {
273 sock_reset_flag(sk, flag);
274 if (!sock_flag(sk, SOCK_TIMESTAMP) &&
275 !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
276 net_disable_timestamp();
277 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 }
279}
280
281
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800282int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
283{
Eric Dumazet766e90372009-10-14 20:40:11 -0700284 int err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800285 int skb_len;
Neil Horman3b885782009-10-12 13:26:31 -0700286 unsigned long flags;
287 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800288
Rami Rosen9ee6b7f2008-05-14 03:50:03 -0700289 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800290 number of warnings when compiling with -W --ANK
291 */
292 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
293 (unsigned)sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700294 atomic_inc(&sk->sk_drops);
295 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800296 }
297
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700298 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800299 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700300 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800301
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800302 if (!sk_rmem_schedule(sk, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700303 atomic_inc(&sk->sk_drops);
304 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800305 }
306
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800307 skb->dev = NULL;
308 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800309
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800310 /* Cache the SKB length before we tack it onto the receive
311 * queue. Once it is added it no longer belongs to us and
312 * may be freed by other threads of control pulling packets
313 * from the queue.
314 */
315 skb_len = skb->len;
316
Eric Dumazet7fee2262010-05-11 23:19:48 +0000317 /* we escape from rcu protected region, make sure we dont leak
318 * a norefcounted dst
319 */
320 skb_dst_force(skb);
321
Neil Horman3b885782009-10-12 13:26:31 -0700322 spin_lock_irqsave(&list->lock, flags);
323 skb->dropcount = atomic_read(&sk->sk_drops);
324 __skb_queue_tail(list, skb);
325 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800326
327 if (!sock_flag(sk, SOCK_DEAD))
328 sk->sk_data_ready(sk, skb_len);
Eric Dumazet766e90372009-10-14 20:40:11 -0700329 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800330}
331EXPORT_SYMBOL(sock_queue_rcv_skb);
332
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200333int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800334{
335 int rc = NET_RX_SUCCESS;
336
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700337 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800338 goto discard_and_relse;
339
340 skb->dev = NULL;
341
Eric Dumazetc3774112010-04-27 15:13:20 -0700342 if (sk_rcvqueues_full(sk, skb)) {
343 atomic_inc(&sk->sk_drops);
344 goto discard_and_relse;
345 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200346 if (nested)
347 bh_lock_sock_nested(sk);
348 else
349 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700350 if (!sock_owned_by_user(sk)) {
351 /*
352 * trylock + unlock semantics:
353 */
354 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
355
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700356 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700357
358 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Zhu Yia3a858f2010-03-04 18:01:47 +0000359 } else if (sk_add_backlog(sk, skb)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000360 bh_unlock_sock(sk);
361 atomic_inc(&sk->sk_drops);
362 goto discard_and_relse;
363 }
364
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800365 bh_unlock_sock(sk);
366out:
367 sock_put(sk);
368 return rc;
369discard_and_relse:
370 kfree_skb(skb);
371 goto out;
372}
373EXPORT_SYMBOL(sk_receive_skb);
374
Krishna Kumarea94ff32009-10-19 23:46:45 +0000375void sk_reset_txq(struct sock *sk)
376{
377 sk_tx_queue_clear(sk);
378}
379EXPORT_SYMBOL(sk_reset_txq);
380
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800381struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
382{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000383 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800384
385 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000386 sk_tx_queue_clear(sk);
Eric Dumazetb6c67122010-04-08 23:03:29 +0000387 rcu_assign_pointer(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800388 dst_release(dst);
389 return NULL;
390 }
391
392 return dst;
393}
394EXPORT_SYMBOL(__sk_dst_check);
395
396struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
397{
398 struct dst_entry *dst = sk_dst_get(sk);
399
400 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
401 sk_dst_reset(sk);
402 dst_release(dst);
403 return NULL;
404 }
405
406 return dst;
407}
408EXPORT_SYMBOL(sk_dst_check);
409
David S. Miller48788092007-09-14 16:41:03 -0700410static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
411{
412 int ret = -ENOPROTOOPT;
413#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900414 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700415 char devname[IFNAMSIZ];
416 int index;
417
418 /* Sorry... */
419 ret = -EPERM;
420 if (!capable(CAP_NET_RAW))
421 goto out;
422
423 ret = -EINVAL;
424 if (optlen < 0)
425 goto out;
426
427 /* Bind this socket to a particular device like "eth0",
428 * as specified in the passed interface name. If the
429 * name is "" or the option length is zero the socket
430 * is not bound.
431 */
432 if (optlen > IFNAMSIZ - 1)
433 optlen = IFNAMSIZ - 1;
434 memset(devname, 0, sizeof(devname));
435
436 ret = -EFAULT;
437 if (copy_from_user(devname, optval, optlen))
438 goto out;
439
David S. Miller000ba2e2009-11-05 22:37:11 -0800440 index = 0;
441 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800442 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700443
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800444 rcu_read_lock();
445 dev = dev_get_by_name_rcu(net, devname);
446 if (dev)
447 index = dev->ifindex;
448 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700449 ret = -ENODEV;
450 if (!dev)
451 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700452 }
453
454 lock_sock(sk);
455 sk->sk_bound_dev_if = index;
456 sk_dst_reset(sk);
457 release_sock(sk);
458
459 ret = 0;
460
461out:
462#endif
463
464 return ret;
465}
466
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800467static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
468{
469 if (valbool)
470 sock_set_flag(sk, bit);
471 else
472 sock_reset_flag(sk, bit);
473}
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475/*
476 * This is meant for all protocols to use and covers goings on
477 * at the socket level. Everything here is generic.
478 */
479
480int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700481 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482{
Eric Dumazet2a915252009-05-27 11:30:05 +0000483 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 int val;
485 int valbool;
486 struct linger ling;
487 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 /*
490 * Options without arguments
491 */
492
David S. Miller48788092007-09-14 16:41:03 -0700493 if (optname == SO_BINDTODEVICE)
494 return sock_bindtodevice(sk, optval, optlen);
495
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700496 if (optlen < sizeof(int))
497 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 if (get_user(val, (int __user *)optval))
500 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900501
Eric Dumazet2a915252009-05-27 11:30:05 +0000502 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
504 lock_sock(sk);
505
Eric Dumazet2a915252009-05-27 11:30:05 +0000506 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700507 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000508 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700509 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000510 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800511 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700512 break;
513 case SO_REUSEADDR:
514 sk->sk_reuse = valbool;
515 break;
516 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000517 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000518 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700519 case SO_ERROR:
520 ret = -ENOPROTOOPT;
521 break;
522 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800523 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700524 break;
525 case SO_BROADCAST:
526 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
527 break;
528 case SO_SNDBUF:
529 /* Don't error on this BSD doesn't and if you think
530 about it this is right. Otherwise apps have to
531 play 'guess the biggest size' games. RCVBUF/SNDBUF
532 are treated in BSD as hints */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900533
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700534 if (val > sysctl_wmem_max)
535 val = sysctl_wmem_max;
Patrick McHardyb0573de2005-08-09 19:30:51 -0700536set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700537 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
538 if ((val * 2) < SOCK_MIN_SNDBUF)
539 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
540 else
541 sk->sk_sndbuf = val * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700543 /*
544 * Wake up sending tasks if we
545 * upped the value.
546 */
547 sk->sk_write_space(sk);
548 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700550 case SO_SNDBUFFORCE:
551 if (!capable(CAP_NET_ADMIN)) {
552 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 break;
554 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700555 goto set_sndbuf;
556
557 case SO_RCVBUF:
558 /* Don't error on this BSD doesn't and if you think
559 about it this is right. Otherwise apps have to
560 play 'guess the biggest size' games. RCVBUF/SNDBUF
561 are treated in BSD as hints */
562
563 if (val > sysctl_rmem_max)
564 val = sysctl_rmem_max;
565set_rcvbuf:
566 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
567 /*
568 * We double it on the way in to account for
569 * "struct sk_buff" etc. overhead. Applications
570 * assume that the SO_RCVBUF setting they make will
571 * allow that much actual data to be received on that
572 * socket.
573 *
574 * Applications are unaware that "struct sk_buff" and
575 * other overheads allocate from the receive buffer
576 * during socket buffer allocation.
577 *
578 * And after considering the possible alternatives,
579 * returning the value we actually used in getsockopt
580 * is the most desirable behavior.
581 */
582 if ((val * 2) < SOCK_MIN_RCVBUF)
583 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
584 else
585 sk->sk_rcvbuf = val * 2;
586 break;
587
588 case SO_RCVBUFFORCE:
589 if (!capable(CAP_NET_ADMIN)) {
590 ret = -EPERM;
591 break;
592 }
593 goto set_rcvbuf;
594
595 case SO_KEEPALIVE:
596#ifdef CONFIG_INET
597 if (sk->sk_protocol == IPPROTO_TCP)
598 tcp_set_keepalive(sk, valbool);
599#endif
600 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
601 break;
602
603 case SO_OOBINLINE:
604 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
605 break;
606
607 case SO_NO_CHECK:
608 sk->sk_no_check = valbool;
609 break;
610
611 case SO_PRIORITY:
612 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
613 sk->sk_priority = val;
614 else
615 ret = -EPERM;
616 break;
617
618 case SO_LINGER:
619 if (optlen < sizeof(ling)) {
620 ret = -EINVAL; /* 1003.1g */
621 break;
622 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000623 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700624 ret = -EFAULT;
625 break;
626 }
627 if (!ling.l_onoff)
628 sock_reset_flag(sk, SOCK_LINGER);
629 else {
630#if (BITS_PER_LONG == 32)
631 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
632 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
633 else
634#endif
635 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
636 sock_set_flag(sk, SOCK_LINGER);
637 }
638 break;
639
640 case SO_BSDCOMPAT:
641 sock_warn_obsolete_bsdism("setsockopt");
642 break;
643
644 case SO_PASSCRED:
645 if (valbool)
646 set_bit(SOCK_PASSCRED, &sock->flags);
647 else
648 clear_bit(SOCK_PASSCRED, &sock->flags);
649 break;
650
651 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700652 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700653 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700654 if (optname == SO_TIMESTAMP)
655 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
656 else
657 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700658 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000659 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700660 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700661 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700662 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
663 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700664 break;
665
Patrick Ohly20d49472009-02-12 05:03:38 +0000666 case SO_TIMESTAMPING:
667 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000668 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000669 break;
670 }
671 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
672 val & SOF_TIMESTAMPING_TX_HARDWARE);
673 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
674 val & SOF_TIMESTAMPING_TX_SOFTWARE);
675 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
676 val & SOF_TIMESTAMPING_RX_HARDWARE);
677 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
678 sock_enable_timestamp(sk,
679 SOCK_TIMESTAMPING_RX_SOFTWARE);
680 else
681 sock_disable_timestamp(sk,
682 SOCK_TIMESTAMPING_RX_SOFTWARE);
683 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
684 val & SOF_TIMESTAMPING_SOFTWARE);
685 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
686 val & SOF_TIMESTAMPING_SYS_HARDWARE);
687 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
688 val & SOF_TIMESTAMPING_RAW_HARDWARE);
689 break;
690
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700691 case SO_RCVLOWAT:
692 if (val < 0)
693 val = INT_MAX;
694 sk->sk_rcvlowat = val ? : 1;
695 break;
696
697 case SO_RCVTIMEO:
698 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
699 break;
700
701 case SO_SNDTIMEO:
702 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
703 break;
704
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700705 case SO_ATTACH_FILTER:
706 ret = -EINVAL;
707 if (optlen == sizeof(struct sock_fprog)) {
708 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700710 ret = -EFAULT;
711 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700714 ret = sk_attach_filter(&fprog, sk);
715 }
716 break;
717
718 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700719 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700720 break;
721
722 case SO_PASSSEC:
723 if (valbool)
724 set_bit(SOCK_PASSSEC, &sock->flags);
725 else
726 clear_bit(SOCK_PASSSEC, &sock->flags);
727 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800728 case SO_MARK:
729 if (!capable(CAP_NET_ADMIN))
730 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000731 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800732 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800733 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700734
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 /* We implement the SO_SNDLOWAT etc to
736 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700737 case SO_RXQ_OVFL:
738 if (valbool)
739 sock_set_flag(sk, SOCK_RXQ_OVFL);
740 else
741 sock_reset_flag(sk, SOCK_RXQ_OVFL);
742 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700743 default:
744 ret = -ENOPROTOOPT;
745 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900746 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 release_sock(sk);
748 return ret;
749}
Eric Dumazet2a915252009-05-27 11:30:05 +0000750EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
752
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000753void cred_to_ucred(struct pid *pid, const struct cred *cred,
754 struct ucred *ucred)
755{
756 ucred->pid = pid_vnr(pid);
757 ucred->uid = ucred->gid = -1;
758 if (cred) {
759 struct user_namespace *current_ns = current_user_ns();
760
761 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
762 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
763 }
764}
David S. Miller39247732010-06-16 16:18:25 -0700765EXPORT_SYMBOL_GPL(cred_to_ucred);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000766
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767int sock_getsockopt(struct socket *sock, int level, int optname,
768 char __user *optval, int __user *optlen)
769{
770 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900771
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700772 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900773 int val;
774 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 struct timeval tm;
776 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900777
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -0800778 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900780
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700781 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900782 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700783 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900785
Eugene Teo50fee1d2009-02-23 15:38:41 -0800786 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -0800787
Eric Dumazet2a915252009-05-27 11:30:05 +0000788 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700789 case SO_DEBUG:
790 v.val = sock_flag(sk, SOCK_DBG);
791 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900792
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700793 case SO_DONTROUTE:
794 v.val = sock_flag(sk, SOCK_LOCALROUTE);
795 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900796
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700797 case SO_BROADCAST:
798 v.val = !!sock_flag(sk, SOCK_BROADCAST);
799 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700801 case SO_SNDBUF:
802 v.val = sk->sk_sndbuf;
803 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900804
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700805 case SO_RCVBUF:
806 v.val = sk->sk_rcvbuf;
807 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700809 case SO_REUSEADDR:
810 v.val = sk->sk_reuse;
811 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700813 case SO_KEEPALIVE:
814 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
815 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700817 case SO_TYPE:
818 v.val = sk->sk_type;
819 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000821 case SO_PROTOCOL:
822 v.val = sk->sk_protocol;
823 break;
824
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000825 case SO_DOMAIN:
826 v.val = sk->sk_family;
827 break;
828
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700829 case SO_ERROR:
830 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +0000831 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700832 v.val = xchg(&sk->sk_err_soft, 0);
833 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700835 case SO_OOBINLINE:
836 v.val = !!sock_flag(sk, SOCK_URGINLINE);
837 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900838
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700839 case SO_NO_CHECK:
840 v.val = sk->sk_no_check;
841 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700843 case SO_PRIORITY:
844 v.val = sk->sk_priority;
845 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900846
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700847 case SO_LINGER:
848 lv = sizeof(v.ling);
849 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
850 v.ling.l_linger = sk->sk_lingertime / HZ;
851 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900852
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700853 case SO_BSDCOMPAT:
854 sock_warn_obsolete_bsdism("getsockopt");
855 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700857 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700858 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
859 !sock_flag(sk, SOCK_RCVTSTAMPNS);
860 break;
861
862 case SO_TIMESTAMPNS:
863 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700864 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
Patrick Ohly20d49472009-02-12 05:03:38 +0000866 case SO_TIMESTAMPING:
867 v.val = 0;
868 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
869 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
870 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
871 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
872 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
873 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
874 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
875 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
876 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
877 v.val |= SOF_TIMESTAMPING_SOFTWARE;
878 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
879 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
880 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
881 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
882 break;
883
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700884 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000885 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700886 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
887 v.tm.tv_sec = 0;
888 v.tm.tv_usec = 0;
889 } else {
890 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
891 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700893 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700895 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000896 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700897 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
898 v.tm.tv_sec = 0;
899 v.tm.tv_usec = 0;
900 } else {
901 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
902 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
903 }
904 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700906 case SO_RCVLOWAT:
907 v.val = sk->sk_rcvlowat;
908 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700909
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700910 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +0000911 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700912 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700914 case SO_PASSCRED:
915 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
916 break;
917
918 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000919 {
920 struct ucred peercred;
921 if (len > sizeof(peercred))
922 len = sizeof(peercred);
923 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
924 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700925 return -EFAULT;
926 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000927 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700928
929 case SO_PEERNAME:
930 {
931 char address[128];
932
933 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
934 return -ENOTCONN;
935 if (lv < len)
936 return -EINVAL;
937 if (copy_to_user(optval, address, len))
938 return -EFAULT;
939 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700941
942 /* Dubious BSD thing... Probably nobody even uses it, but
943 * the UNIX standard wants it for whatever reason... -DaveM
944 */
945 case SO_ACCEPTCONN:
946 v.val = sk->sk_state == TCP_LISTEN;
947 break;
948
949 case SO_PASSSEC:
950 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
951 break;
952
953 case SO_PEERSEC:
954 return security_socket_getpeersec_stream(sock, optval, optlen, len);
955
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800956 case SO_MARK:
957 v.val = sk->sk_mark;
958 break;
959
Neil Horman3b885782009-10-12 13:26:31 -0700960 case SO_RXQ_OVFL:
961 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
962 break;
963
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700964 default:
965 return -ENOPROTOOPT;
966 }
967
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 if (len > lv)
969 len = lv;
970 if (copy_to_user(optval, &v, len))
971 return -EFAULT;
972lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900973 if (put_user(len, optlen))
974 return -EFAULT;
975 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976}
977
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700978/*
979 * Initialize an sk_lock.
980 *
981 * (We also register the sk_lock with the lock validator.)
982 */
Dave Jonesb6f99a22007-03-22 12:27:49 -0700983static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700984{
Peter Zijlstraed075362006-12-06 20:35:24 -0800985 sock_lock_init_class_and_name(sk,
986 af_family_slock_key_strings[sk->sk_family],
987 af_family_slock_keys + sk->sk_family,
988 af_family_key_strings[sk->sk_family],
989 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700990}
991
Eric Dumazet4dc6dc72009-07-15 23:13:10 +0000992/*
993 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
994 * even temporarly, because of RCU lookups. sk_node should also be left as is.
995 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -0700996static void sock_copy(struct sock *nsk, const struct sock *osk)
997{
998#ifdef CONFIG_SECURITY_NETWORK
999 void *sptr = nsk->sk_security;
1000#endif
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001001 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
Krishna Kumare022f0b2009-10-19 23:46:20 +00001002 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
1003 sizeof(osk->sk_tx_queue_mapping));
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001004 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
1005 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001006#ifdef CONFIG_SECURITY_NETWORK
1007 nsk->sk_security = sptr;
1008 security_sk_clone(osk, nsk);
1009#endif
1010}
1011
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001012static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1013 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001014{
1015 struct sock *sk;
1016 struct kmem_cache *slab;
1017
1018 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001019 if (slab != NULL) {
1020 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1021 if (!sk)
1022 return sk;
1023 if (priority & __GFP_ZERO) {
1024 /*
1025 * caches using SLAB_DESTROY_BY_RCU should let
1026 * sk_node.next un-modified. Special care is taken
1027 * when initializing object to zero.
1028 */
1029 if (offsetof(struct sock, sk_node.next) != 0)
1030 memset(sk, 0, offsetof(struct sock, sk_node.next));
1031 memset(&sk->sk_node.pprev, 0,
1032 prot->obj_size - offsetof(struct sock,
1033 sk_node.pprev));
1034 }
1035 }
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001036 else
1037 sk = kmalloc(prot->obj_size, priority);
1038
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001039 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001040 kmemcheck_annotate_bitfield(sk, flags);
1041
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001042 if (security_sk_alloc(sk, family, priority))
1043 goto out_free;
1044
1045 if (!try_module_get(prot->owner))
1046 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001047 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001048 }
1049
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001050 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001051
1052out_free_sec:
1053 security_sk_free(sk);
1054out_free:
1055 if (slab != NULL)
1056 kmem_cache_free(slab, sk);
1057 else
1058 kfree(sk);
1059 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001060}
1061
1062static void sk_prot_free(struct proto *prot, struct sock *sk)
1063{
1064 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001065 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001066
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001067 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001068 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001069
1070 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001071 if (slab != NULL)
1072 kmem_cache_free(slab, sk);
1073 else
1074 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001075 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001076}
1077
Herbert Xuf8451722010-05-24 00:12:34 -07001078#ifdef CONFIG_CGROUPS
1079void sock_update_classid(struct sock *sk)
1080{
1081 u32 classid = task_cls_classid(current);
1082
1083 if (classid && classid != sk->sk_classid)
1084 sk->sk_classid = classid;
1085}
Herbert Xu82862742010-05-24 00:14:10 -07001086EXPORT_SYMBOL(sock_update_classid);
Herbert Xuf8451722010-05-24 00:12:34 -07001087#endif
1088
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089/**
1090 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001091 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001092 * @family: protocol family
1093 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1094 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001096struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001097 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001099 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001101 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001103 sk->sk_family = family;
1104 /*
1105 * See comment in struct sock definition to understand
1106 * why we need sk_prot_creator -acme
1107 */
1108 sk->sk_prot = sk->sk_prot_creator = prot;
1109 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001110 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001111 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001112
1113 sock_update_classid(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 }
Frank Filza79af592005-09-27 15:23:38 -07001115
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001116 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117}
Eric Dumazet2a915252009-05-27 11:30:05 +00001118EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119
Eric Dumazet2b85a342009-06-11 02:55:43 -07001120static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121{
1122 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123
1124 if (sk->sk_destruct)
1125 sk->sk_destruct(sk);
1126
Paul E. McKenneya898def2010-02-22 17:04:49 -08001127 filter = rcu_dereference_check(sk->sk_filter,
1128 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001130 sk_filter_uncharge(sk, filter);
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001131 rcu_assign_pointer(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 }
1133
Patrick Ohly20d49472009-02-12 05:03:38 +00001134 sock_disable_timestamp(sk, SOCK_TIMESTAMP);
1135 sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
1137 if (atomic_read(&sk->sk_omem_alloc))
1138 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
Harvey Harrison0dc47872008-03-05 20:47:47 -08001139 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001141 if (sk->sk_peer_cred)
1142 put_cred(sk->sk_peer_cred);
1143 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001144 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001145 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001147
1148void sk_free(struct sock *sk)
1149{
1150 /*
1151 * We substract one from sk_wmem_alloc and can know if
1152 * some packets are still in some tx queue.
1153 * If not null, sock_wfree() will call __sk_free(sk) later
1154 */
1155 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1156 __sk_free(sk);
1157}
Eric Dumazet2a915252009-05-27 11:30:05 +00001158EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159
Denis V. Lunevedf02082008-02-29 11:18:32 -08001160/*
1161 * Last sock_put should drop referrence to sk->sk_net. It has already
1162 * been dropped in sk_change_net. Taking referrence to stopping namespace
1163 * is not an option.
1164 * Take referrence to a socket to remove it from hash _alive_ and after that
1165 * destroy it in the context of init_net.
1166 */
1167void sk_release_kernel(struct sock *sk)
1168{
1169 if (sk == NULL || sk->sk_socket == NULL)
1170 return;
1171
1172 sock_hold(sk);
1173 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001174 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001175 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001176 sock_put(sk);
1177}
David S. Miller45af1752008-02-29 11:33:19 -08001178EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001179
Al Virodd0fc662005-10-07 07:46:04 +01001180struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001181{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001182 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001183
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001184 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001185 if (newsk != NULL) {
1186 struct sk_filter *filter;
1187
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001188 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001189
1190 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001191 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001192 sk_node_init(&newsk->sk_node);
1193 sock_lock_init(newsk);
1194 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001195 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001196 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001197
1198 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001199 /*
1200 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1201 */
1202 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001203 atomic_set(&newsk->sk_omem_alloc, 0);
1204 skb_queue_head_init(&newsk->sk_receive_queue);
1205 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001206#ifdef CONFIG_NET_DMA
1207 skb_queue_head_init(&newsk->sk_async_wait_queue);
1208#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001209
Eric Dumazetb6c67122010-04-08 23:03:29 +00001210 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001211 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001212 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1213 af_callback_keys + newsk->sk_family,
1214 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001215
1216 newsk->sk_dst_cache = NULL;
1217 newsk->sk_wmem_queued = 0;
1218 newsk->sk_forward_alloc = 0;
1219 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001220 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1221
1222 sock_reset_flag(newsk, SOCK_DONE);
1223 skb_queue_head_init(&newsk->sk_error_queue);
1224
1225 filter = newsk->sk_filter;
1226 if (filter != NULL)
1227 sk_filter_charge(newsk, filter);
1228
1229 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1230 /* It is still raw copy of parent, so invalidate
1231 * destructor and make plain sk_free() */
1232 newsk->sk_destruct = NULL;
1233 sk_free(newsk);
1234 newsk = NULL;
1235 goto out;
1236 }
1237
1238 newsk->sk_err = 0;
1239 newsk->sk_priority = 0;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001240 /*
1241 * Before updating sk_refcnt, we must commit prior changes to memory
1242 * (Documentation/RCU/rculist_nulls.txt for details)
1243 */
1244 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001245 atomic_set(&newsk->sk_refcnt, 2);
1246
1247 /*
1248 * Increment the counter in the same struct proto as the master
1249 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1250 * is the same as sk->sk_prot->socks, as this field was copied
1251 * with memcpy).
1252 *
1253 * This _changes_ the previous behaviour, where
1254 * tcp_create_openreq_child always was incrementing the
1255 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1256 * to be taken into account in all callers. -acme
1257 */
1258 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001259 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001260 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001261
1262 if (newsk->sk_prot->sockets_allocated)
Eric Dumazet17483762008-11-25 21:16:35 -08001263 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
Octavian Purdila704da5602010-01-08 00:00:09 -08001264
1265 if (sock_flag(newsk, SOCK_TIMESTAMP) ||
1266 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1267 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001268 }
1269out:
1270 return newsk;
1271}
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001272EXPORT_SYMBOL_GPL(sk_clone);
1273
Andi Kleen99580892007-04-20 17:12:43 -07001274void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1275{
1276 __sk_dst_set(sk, dst);
1277 sk->sk_route_caps = dst->dev->features;
1278 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001279 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001280 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001281 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001282 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001283 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001284 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001285 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001286 sk->sk_gso_max_size = dst->dev->gso_max_size;
1287 }
Andi Kleen99580892007-04-20 17:12:43 -07001288 }
1289}
1290EXPORT_SYMBOL_GPL(sk_setup_caps);
1291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292void __init sk_init(void)
1293{
Jan Beulich44813742009-09-21 17:03:05 -07001294 if (totalram_pages <= 4096) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 sysctl_wmem_max = 32767;
1296 sysctl_rmem_max = 32767;
1297 sysctl_wmem_default = 32767;
1298 sysctl_rmem_default = 32767;
Jan Beulich44813742009-09-21 17:03:05 -07001299 } else if (totalram_pages >= 131072) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 sysctl_wmem_max = 131071;
1301 sysctl_rmem_max = 131071;
1302 }
1303}
1304
1305/*
1306 * Simple resource managers for sockets.
1307 */
1308
1309
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001310/*
1311 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 */
1313void sock_wfree(struct sk_buff *skb)
1314{
1315 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001316 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
Eric Dumazetd99927f2009-09-24 10:49:24 +00001318 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1319 /*
1320 * Keep a reference on sk_wmem_alloc, this will be released
1321 * after sk_write_space() call
1322 */
1323 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001325 len = 1;
1326 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001327 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001328 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1329 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001330 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001331 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001332 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333}
Eric Dumazet2a915252009-05-27 11:30:05 +00001334EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001336/*
1337 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 */
1339void sock_rfree(struct sk_buff *skb)
1340{
1341 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001342 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
Eric Dumazetd361fd52010-07-10 22:45:17 +00001344 atomic_sub(len, &sk->sk_rmem_alloc);
1345 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346}
Eric Dumazet2a915252009-05-27 11:30:05 +00001347EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348
1349
1350int sock_i_uid(struct sock *sk)
1351{
1352 int uid;
1353
1354 read_lock(&sk->sk_callback_lock);
1355 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1356 read_unlock(&sk->sk_callback_lock);
1357 return uid;
1358}
Eric Dumazet2a915252009-05-27 11:30:05 +00001359EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
1361unsigned long sock_i_ino(struct sock *sk)
1362{
1363 unsigned long ino;
1364
1365 read_lock(&sk->sk_callback_lock);
1366 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1367 read_unlock(&sk->sk_callback_lock);
1368 return ino;
1369}
Eric Dumazet2a915252009-05-27 11:30:05 +00001370EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
1372/*
1373 * Allocate a skb from the socket's send buffer.
1374 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001375struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001376 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377{
1378 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001379 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 if (skb) {
1381 skb_set_owner_w(skb, sk);
1382 return skb;
1383 }
1384 }
1385 return NULL;
1386}
Eric Dumazet2a915252009-05-27 11:30:05 +00001387EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
1389/*
1390 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001391 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001392struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001393 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394{
1395 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1396 struct sk_buff *skb = alloc_skb(size, priority);
1397 if (skb) {
1398 skb_set_owner_r(skb, sk);
1399 return skb;
1400 }
1401 }
1402 return NULL;
1403}
1404
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001405/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001407 */
Al Virodd0fc662005-10-07 07:46:04 +01001408void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409{
1410 if ((unsigned)size <= sysctl_optmem_max &&
1411 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1412 void *mem;
1413 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001414 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 */
1416 atomic_add(size, &sk->sk_omem_alloc);
1417 mem = kmalloc(size, priority);
1418 if (mem)
1419 return mem;
1420 atomic_sub(size, &sk->sk_omem_alloc);
1421 }
1422 return NULL;
1423}
Eric Dumazet2a915252009-05-27 11:30:05 +00001424EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425
1426/*
1427 * Free an option memory block.
1428 */
1429void sock_kfree_s(struct sock *sk, void *mem, int size)
1430{
1431 kfree(mem);
1432 atomic_sub(size, &sk->sk_omem_alloc);
1433}
Eric Dumazet2a915252009-05-27 11:30:05 +00001434EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
1436/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1437 I think, these locks should be removed for datagram sockets.
1438 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001439static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440{
1441 DEFINE_WAIT(wait);
1442
1443 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1444 for (;;) {
1445 if (!timeo)
1446 break;
1447 if (signal_pending(current))
1448 break;
1449 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001450 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1452 break;
1453 if (sk->sk_shutdown & SEND_SHUTDOWN)
1454 break;
1455 if (sk->sk_err)
1456 break;
1457 timeo = schedule_timeout(timeo);
1458 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001459 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 return timeo;
1461}
1462
1463
1464/*
1465 * Generic send/receive buffer handlers
1466 */
1467
Herbert Xu4cc7f682009-02-04 16:55:54 -08001468struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1469 unsigned long data_len, int noblock,
1470 int *errcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471{
1472 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001473 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 long timeo;
1475 int err;
1476
1477 gfp_mask = sk->sk_allocation;
1478 if (gfp_mask & __GFP_WAIT)
1479 gfp_mask |= __GFP_REPEAT;
1480
1481 timeo = sock_sndtimeo(sk, noblock);
1482 while (1) {
1483 err = sock_error(sk);
1484 if (err != 0)
1485 goto failure;
1486
1487 err = -EPIPE;
1488 if (sk->sk_shutdown & SEND_SHUTDOWN)
1489 goto failure;
1490
1491 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001492 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 if (skb) {
1494 int npages;
1495 int i;
1496
1497 /* No pages, we're done... */
1498 if (!data_len)
1499 break;
1500
1501 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1502 skb->truesize += data_len;
1503 skb_shinfo(skb)->nr_frags = npages;
1504 for (i = 0; i < npages; i++) {
1505 struct page *page;
1506 skb_frag_t *frag;
1507
1508 page = alloc_pages(sk->sk_allocation, 0);
1509 if (!page) {
1510 err = -ENOBUFS;
1511 skb_shinfo(skb)->nr_frags = i;
1512 kfree_skb(skb);
1513 goto failure;
1514 }
1515
1516 frag = &skb_shinfo(skb)->frags[i];
1517 frag->page = page;
1518 frag->page_offset = 0;
1519 frag->size = (data_len >= PAGE_SIZE ?
1520 PAGE_SIZE :
1521 data_len);
1522 data_len -= PAGE_SIZE;
1523 }
1524
1525 /* Full success... */
1526 break;
1527 }
1528 err = -ENOBUFS;
1529 goto failure;
1530 }
1531 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1532 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1533 err = -EAGAIN;
1534 if (!timeo)
1535 goto failure;
1536 if (signal_pending(current))
1537 goto interrupted;
1538 timeo = sock_wait_for_wmem(sk, timeo);
1539 }
1540
1541 skb_set_owner_w(skb, sk);
1542 return skb;
1543
1544interrupted:
1545 err = sock_intr_errno(timeo);
1546failure:
1547 *errcode = err;
1548 return NULL;
1549}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001550EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001552struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 int noblock, int *errcode)
1554{
1555 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1556}
Eric Dumazet2a915252009-05-27 11:30:05 +00001557EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
1559static void __lock_sock(struct sock *sk)
1560{
1561 DEFINE_WAIT(wait);
1562
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001563 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1565 TASK_UNINTERRUPTIBLE);
1566 spin_unlock_bh(&sk->sk_lock.slock);
1567 schedule();
1568 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001569 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 break;
1571 }
1572 finish_wait(&sk->sk_lock.wq, &wait);
1573}
1574
1575static void __release_sock(struct sock *sk)
1576{
1577 struct sk_buff *skb = sk->sk_backlog.head;
1578
1579 do {
1580 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1581 bh_unlock_sock(sk);
1582
1583 do {
1584 struct sk_buff *next = skb->next;
1585
Eric Dumazet7fee2262010-05-11 23:19:48 +00001586 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001588 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
1590 /*
1591 * We are in process context here with softirqs
1592 * disabled, use cond_resched_softirq() to preempt.
1593 * This is safe to do because we've taken the backlog
1594 * queue private:
1595 */
1596 cond_resched_softirq();
1597
1598 skb = next;
1599 } while (skb != NULL);
1600
1601 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001602 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001603
1604 /*
1605 * Doing the zeroing here guarantee we can not loop forever
1606 * while a wild producer attempts to flood us.
1607 */
1608 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609}
1610
1611/**
1612 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001613 * @sk: sock to wait on
1614 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 *
1616 * Now socket state including sk->sk_err is changed only under lock,
1617 * hence we may omit checks after joining wait queue.
1618 * We check receive queue before schedule() only as optimization;
1619 * it is very likely that release_sock() added new data.
1620 */
1621int sk_wait_data(struct sock *sk, long *timeo)
1622{
1623 int rc;
1624 DEFINE_WAIT(wait);
1625
Eric Dumazetaa395142010-04-20 13:03:51 +00001626 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1628 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1629 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001630 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 return rc;
1632}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633EXPORT_SYMBOL(sk_wait_data);
1634
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001635/**
1636 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1637 * @sk: socket
1638 * @size: memory size to allocate
1639 * @kind: allocation type
1640 *
1641 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1642 * rmem allocation. This function assumes that protocols which have
1643 * memory_pressure use sk_wmem_queued as write buffer accounting.
1644 */
1645int __sk_mem_schedule(struct sock *sk, int size, int kind)
1646{
1647 struct proto *prot = sk->sk_prot;
1648 int amt = sk_mem_pages(size);
1649 int allocated;
1650
1651 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1652 allocated = atomic_add_return(amt, prot->memory_allocated);
1653
1654 /* Under limit. */
1655 if (allocated <= prot->sysctl_mem[0]) {
1656 if (prot->memory_pressure && *prot->memory_pressure)
1657 *prot->memory_pressure = 0;
1658 return 1;
1659 }
1660
1661 /* Under pressure. */
1662 if (allocated > prot->sysctl_mem[1])
1663 if (prot->enter_memory_pressure)
Pavel Emelyanov5c52ba12008-07-16 20:28:10 -07001664 prot->enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001665
1666 /* Over hard limit. */
1667 if (allocated > prot->sysctl_mem[2])
1668 goto suppress_allocation;
1669
1670 /* guarantee minimum buffer size under pressure */
1671 if (kind == SK_MEM_RECV) {
1672 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1673 return 1;
1674 } else { /* SK_MEM_SEND */
1675 if (sk->sk_type == SOCK_STREAM) {
1676 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1677 return 1;
1678 } else if (atomic_read(&sk->sk_wmem_alloc) <
1679 prot->sysctl_wmem[0])
1680 return 1;
1681 }
1682
1683 if (prot->memory_pressure) {
Eric Dumazet17483762008-11-25 21:16:35 -08001684 int alloc;
1685
1686 if (!*prot->memory_pressure)
1687 return 1;
1688 alloc = percpu_counter_read_positive(prot->sockets_allocated);
1689 if (prot->sysctl_mem[2] > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001690 sk_mem_pages(sk->sk_wmem_queued +
1691 atomic_read(&sk->sk_rmem_alloc) +
1692 sk->sk_forward_alloc))
1693 return 1;
1694 }
1695
1696suppress_allocation:
1697
1698 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1699 sk_stream_moderate_sndbuf(sk);
1700
1701 /* Fail only if socket is _under_ its sndbuf.
1702 * In this case we cannot block, so that we have to fail.
1703 */
1704 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1705 return 1;
1706 }
1707
1708 /* Alas. Undo changes. */
1709 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1710 atomic_sub(amt, prot->memory_allocated);
1711 return 0;
1712}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001713EXPORT_SYMBOL(__sk_mem_schedule);
1714
1715/**
1716 * __sk_reclaim - reclaim memory_allocated
1717 * @sk: socket
1718 */
1719void __sk_mem_reclaim(struct sock *sk)
1720{
1721 struct proto *prot = sk->sk_prot;
1722
Eric Dumazet680a5a52007-12-31 15:00:50 -08001723 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001724 prot->memory_allocated);
1725 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1726
1727 if (prot->memory_pressure && *prot->memory_pressure &&
1728 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1729 *prot->memory_pressure = 0;
1730}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001731EXPORT_SYMBOL(__sk_mem_reclaim);
1732
1733
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734/*
1735 * Set of default routines for initialising struct proto_ops when
1736 * the protocol does not support a particular function. In certain
1737 * cases where it makes no sense for a protocol to have a "do nothing"
1738 * function, some default processing is provided.
1739 */
1740
1741int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1742{
1743 return -EOPNOTSUPP;
1744}
Eric Dumazet2a915252009-05-27 11:30:05 +00001745EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001747int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 int len, int flags)
1749{
1750 return -EOPNOTSUPP;
1751}
Eric Dumazet2a915252009-05-27 11:30:05 +00001752EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
1754int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1755{
1756 return -EOPNOTSUPP;
1757}
Eric Dumazet2a915252009-05-27 11:30:05 +00001758EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759
1760int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1761{
1762 return -EOPNOTSUPP;
1763}
Eric Dumazet2a915252009-05-27 11:30:05 +00001764EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001766int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 int *len, int peer)
1768{
1769 return -EOPNOTSUPP;
1770}
Eric Dumazet2a915252009-05-27 11:30:05 +00001771EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
Eric Dumazet2a915252009-05-27 11:30:05 +00001773unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774{
1775 return 0;
1776}
Eric Dumazet2a915252009-05-27 11:30:05 +00001777EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
1779int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1780{
1781 return -EOPNOTSUPP;
1782}
Eric Dumazet2a915252009-05-27 11:30:05 +00001783EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
1785int sock_no_listen(struct socket *sock, int backlog)
1786{
1787 return -EOPNOTSUPP;
1788}
Eric Dumazet2a915252009-05-27 11:30:05 +00001789EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
1791int sock_no_shutdown(struct socket *sock, int how)
1792{
1793 return -EOPNOTSUPP;
1794}
Eric Dumazet2a915252009-05-27 11:30:05 +00001795EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
1797int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07001798 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799{
1800 return -EOPNOTSUPP;
1801}
Eric Dumazet2a915252009-05-27 11:30:05 +00001802EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
1804int sock_no_getsockopt(struct socket *sock, int level, int optname,
1805 char __user *optval, int __user *optlen)
1806{
1807 return -EOPNOTSUPP;
1808}
Eric Dumazet2a915252009-05-27 11:30:05 +00001809EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
1811int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1812 size_t len)
1813{
1814 return -EOPNOTSUPP;
1815}
Eric Dumazet2a915252009-05-27 11:30:05 +00001816EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817
1818int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1819 size_t len, int flags)
1820{
1821 return -EOPNOTSUPP;
1822}
Eric Dumazet2a915252009-05-27 11:30:05 +00001823EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
1825int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1826{
1827 /* Mirror missing mmap method error code */
1828 return -ENODEV;
1829}
Eric Dumazet2a915252009-05-27 11:30:05 +00001830EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831
1832ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1833{
1834 ssize_t res;
1835 struct msghdr msg = {.msg_flags = flags};
1836 struct kvec iov;
1837 char *kaddr = kmap(page);
1838 iov.iov_base = kaddr + offset;
1839 iov.iov_len = size;
1840 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1841 kunmap(page);
1842 return res;
1843}
Eric Dumazet2a915252009-05-27 11:30:05 +00001844EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845
1846/*
1847 * Default Socket Callbacks
1848 */
1849
1850static void sock_def_wakeup(struct sock *sk)
1851{
Eric Dumazet43815482010-04-29 11:01:49 +00001852 struct socket_wq *wq;
1853
1854 rcu_read_lock();
1855 wq = rcu_dereference(sk->sk_wq);
1856 if (wq_has_sleeper(wq))
1857 wake_up_interruptible_all(&wq->wait);
1858 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859}
1860
1861static void sock_def_error_report(struct sock *sk)
1862{
Eric Dumazet43815482010-04-29 11:01:49 +00001863 struct socket_wq *wq;
1864
1865 rcu_read_lock();
1866 wq = rcu_dereference(sk->sk_wq);
1867 if (wq_has_sleeper(wq))
1868 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001869 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00001870 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871}
1872
1873static void sock_def_readable(struct sock *sk, int len)
1874{
Eric Dumazet43815482010-04-29 11:01:49 +00001875 struct socket_wq *wq;
1876
1877 rcu_read_lock();
1878 wq = rcu_dereference(sk->sk_wq);
1879 if (wq_has_sleeper(wq))
1880 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
Davide Libenzi37e55402009-03-31 15:24:21 -07001881 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001882 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00001883 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884}
1885
1886static void sock_def_write_space(struct sock *sk)
1887{
Eric Dumazet43815482010-04-29 11:01:49 +00001888 struct socket_wq *wq;
1889
1890 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891
1892 /* Do not wake up a writer until he can make "significant"
1893 * progress. --DaveM
1894 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001895 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00001896 wq = rcu_dereference(sk->sk_wq);
1897 if (wq_has_sleeper(wq))
1898 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07001899 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900
1901 /* Should agree with poll, otherwise some programs break */
1902 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001903 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 }
1905
Eric Dumazet43815482010-04-29 11:01:49 +00001906 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907}
1908
1909static void sock_def_destruct(struct sock *sk)
1910{
Jesper Juhla51482b2005-11-08 09:41:34 -08001911 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912}
1913
1914void sk_send_sigurg(struct sock *sk)
1915{
1916 if (sk->sk_socket && sk->sk_socket->file)
1917 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001918 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919}
Eric Dumazet2a915252009-05-27 11:30:05 +00001920EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921
1922void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1923 unsigned long expires)
1924{
1925 if (!mod_timer(timer, expires))
1926 sock_hold(sk);
1927}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928EXPORT_SYMBOL(sk_reset_timer);
1929
1930void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1931{
1932 if (timer_pending(timer) && del_timer(timer))
1933 __sock_put(sk);
1934}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935EXPORT_SYMBOL(sk_stop_timer);
1936
1937void sock_init_data(struct socket *sock, struct sock *sk)
1938{
1939 skb_queue_head_init(&sk->sk_receive_queue);
1940 skb_queue_head_init(&sk->sk_write_queue);
1941 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001942#ifdef CONFIG_NET_DMA
1943 skb_queue_head_init(&sk->sk_async_wait_queue);
1944#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
1946 sk->sk_send_head = NULL;
1947
1948 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001949
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 sk->sk_allocation = GFP_KERNEL;
1951 sk->sk_rcvbuf = sysctl_rmem_default;
1952 sk->sk_sndbuf = sysctl_wmem_default;
1953 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07001954 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
1956 sock_set_flag(sk, SOCK_ZAPPED);
1957
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001958 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00001960 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 sock->sk = sk;
1962 } else
Eric Dumazet43815482010-04-29 11:01:49 +00001963 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964
Eric Dumazetb6c67122010-04-08 23:03:29 +00001965 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001967 lockdep_set_class_and_name(&sk->sk_callback_lock,
1968 af_callback_keys + sk->sk_family,
1969 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
1971 sk->sk_state_change = sock_def_wakeup;
1972 sk->sk_data_ready = sock_def_readable;
1973 sk->sk_write_space = sock_def_write_space;
1974 sk->sk_error_report = sock_def_error_report;
1975 sk->sk_destruct = sock_def_destruct;
1976
1977 sk->sk_sndmsg_page = NULL;
1978 sk->sk_sndmsg_off = 0;
1979
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001980 sk->sk_peer_pid = NULL;
1981 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 sk->sk_write_pending = 0;
1983 sk->sk_rcvlowat = 1;
1984 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1985 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1986
Eric Dumazetf37f0af2008-04-13 21:39:26 -07001987 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001989 /*
1990 * Before updating sk_refcnt, we must commit prior changes to memory
1991 * (Documentation/RCU/rculist_nulls.txt for details)
1992 */
1993 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08001995 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996}
Eric Dumazet2a915252009-05-27 11:30:05 +00001997EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
Harvey Harrisonb5606c22008-02-13 15:03:16 -08001999void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000{
2001 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002002 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002003 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002005 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002006 spin_unlock(&sk->sk_lock.slock);
2007 /*
2008 * The sk_lock has mutex_lock() semantics here:
2009 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002010 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002011 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002013EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002015void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002017 /*
2018 * The sk_lock has mutex_unlock() semantics:
2019 */
2020 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2021
2022 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 if (sk->sk_backlog.tail)
2024 __release_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002025 sk->sk_lock.owned = 0;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002026 if (waitqueue_active(&sk->sk_lock.wq))
2027 wake_up(&sk->sk_lock.wq);
2028 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029}
2030EXPORT_SYMBOL(release_sock);
2031
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002032/**
2033 * lock_sock_fast - fast version of lock_sock
2034 * @sk: socket
2035 *
2036 * This version should be used for very small section, where process wont block
2037 * return false if fast path is taken
2038 * sk_lock.slock locked, owned = 0, BH disabled
2039 * return true if slow path is taken
2040 * sk_lock.slock unlocked, owned = 1, BH enabled
2041 */
2042bool lock_sock_fast(struct sock *sk)
2043{
2044 might_sleep();
2045 spin_lock_bh(&sk->sk_lock.slock);
2046
2047 if (!sk->sk_lock.owned)
2048 /*
2049 * Note : We must disable BH
2050 */
2051 return false;
2052
2053 __lock_sock(sk);
2054 sk->sk_lock.owned = 1;
2055 spin_unlock(&sk->sk_lock.slock);
2056 /*
2057 * The sk_lock has mutex_lock() semantics here:
2058 */
2059 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2060 local_bh_enable();
2061 return true;
2062}
2063EXPORT_SYMBOL(lock_sock_fast);
2064
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002066{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002067 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002069 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002070 tv = ktime_to_timeval(sk->sk_stamp);
2071 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002073 if (tv.tv_sec == 0) {
2074 sk->sk_stamp = ktime_get_real();
2075 tv = ktime_to_timeval(sk->sk_stamp);
2076 }
2077 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002078}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079EXPORT_SYMBOL(sock_get_timestamp);
2080
Eric Dumazetae40eb12007-03-18 17:33:16 -07002081int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2082{
2083 struct timespec ts;
2084 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002085 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002086 ts = ktime_to_timespec(sk->sk_stamp);
2087 if (ts.tv_sec == -1)
2088 return -ENOENT;
2089 if (ts.tv_sec == 0) {
2090 sk->sk_stamp = ktime_get_real();
2091 ts = ktime_to_timespec(sk->sk_stamp);
2092 }
2093 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2094}
2095EXPORT_SYMBOL(sock_get_timestampns);
2096
Patrick Ohly20d49472009-02-12 05:03:38 +00002097void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002098{
Patrick Ohly20d49472009-02-12 05:03:38 +00002099 if (!sock_flag(sk, flag)) {
2100 sock_set_flag(sk, flag);
2101 /*
2102 * we just set one of the two flags which require net
2103 * time stamping, but time stamping might have been on
2104 * already because of the other one
2105 */
2106 if (!sock_flag(sk,
2107 flag == SOCK_TIMESTAMP ?
2108 SOCK_TIMESTAMPING_RX_SOFTWARE :
2109 SOCK_TIMESTAMP))
2110 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 }
2112}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
2114/*
2115 * Get a socket option on an socket.
2116 *
2117 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2118 * asynchronous errors should be reported by getsockopt. We assume
2119 * this means if you specify SO_ERROR (otherwise whats the point of it).
2120 */
2121int sock_common_getsockopt(struct socket *sock, int level, int optname,
2122 char __user *optval, int __user *optlen)
2123{
2124 struct sock *sk = sock->sk;
2125
2126 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2127}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128EXPORT_SYMBOL(sock_common_getsockopt);
2129
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002130#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002131int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2132 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002133{
2134 struct sock *sk = sock->sk;
2135
Johannes Berg1e51f952007-03-06 13:44:06 -08002136 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002137 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2138 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002139 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2140}
2141EXPORT_SYMBOL(compat_sock_common_getsockopt);
2142#endif
2143
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2145 struct msghdr *msg, size_t size, int flags)
2146{
2147 struct sock *sk = sock->sk;
2148 int addr_len = 0;
2149 int err;
2150
2151 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2152 flags & ~MSG_DONTWAIT, &addr_len);
2153 if (err >= 0)
2154 msg->msg_namelen = addr_len;
2155 return err;
2156}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157EXPORT_SYMBOL(sock_common_recvmsg);
2158
2159/*
2160 * Set socket options on an inet socket.
2161 */
2162int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002163 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164{
2165 struct sock *sk = sock->sk;
2166
2167 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2168}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169EXPORT_SYMBOL(sock_common_setsockopt);
2170
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002171#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002172int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002173 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002174{
2175 struct sock *sk = sock->sk;
2176
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002177 if (sk->sk_prot->compat_setsockopt != NULL)
2178 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2179 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002180 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2181}
2182EXPORT_SYMBOL(compat_sock_common_setsockopt);
2183#endif
2184
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185void sk_common_release(struct sock *sk)
2186{
2187 if (sk->sk_prot->destroy)
2188 sk->sk_prot->destroy(sk);
2189
2190 /*
2191 * Observation: when sock_common_release is called, processes have
2192 * no access to socket. But net still has.
2193 * Step one, detach it from networking:
2194 *
2195 * A. Remove from hash tables.
2196 */
2197
2198 sk->sk_prot->unhash(sk);
2199
2200 /*
2201 * In this point socket cannot receive new packets, but it is possible
2202 * that some packets are in flight because some CPU runs receiver and
2203 * did hash table lookup before we unhashed socket. They will achieve
2204 * receive queue and will be purged by socket destructor.
2205 *
2206 * Also we still have packets pending on receive queue and probably,
2207 * our own packets waiting in device queues. sock_destroy will drain
2208 * receive queue, but transmitted packets will delay socket destruction
2209 * until the last reference will be released.
2210 */
2211
2212 sock_orphan(sk);
2213
2214 xfrm_sk_free_policy(sk);
2215
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002216 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 sock_put(sk);
2218}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219EXPORT_SYMBOL(sk_common_release);
2220
2221static DEFINE_RWLOCK(proto_list_lock);
2222static LIST_HEAD(proto_list);
2223
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002224#ifdef CONFIG_PROC_FS
2225#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002226struct prot_inuse {
2227 int val[PROTO_INUSE_NR];
2228};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002229
2230static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002231
2232#ifdef CONFIG_NET_NS
2233void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2234{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002235 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002236}
2237EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2238
2239int sock_prot_inuse_get(struct net *net, struct proto *prot)
2240{
2241 int cpu, idx = prot->inuse_idx;
2242 int res = 0;
2243
2244 for_each_possible_cpu(cpu)
2245 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2246
2247 return res >= 0 ? res : 0;
2248}
2249EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2250
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002251static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002252{
2253 net->core.inuse = alloc_percpu(struct prot_inuse);
2254 return net->core.inuse ? 0 : -ENOMEM;
2255}
2256
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002257static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002258{
2259 free_percpu(net->core.inuse);
2260}
2261
2262static struct pernet_operations net_inuse_ops = {
2263 .init = sock_inuse_init_net,
2264 .exit = sock_inuse_exit_net,
2265};
2266
2267static __init int net_inuse_init(void)
2268{
2269 if (register_pernet_subsys(&net_inuse_ops))
2270 panic("Cannot initialize net inuse counters");
2271
2272 return 0;
2273}
2274
2275core_initcall(net_inuse_init);
2276#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002277static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2278
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002279void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002280{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002281 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002282}
2283EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2284
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002285int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002286{
2287 int cpu, idx = prot->inuse_idx;
2288 int res = 0;
2289
2290 for_each_possible_cpu(cpu)
2291 res += per_cpu(prot_inuse, cpu).val[idx];
2292
2293 return res >= 0 ? res : 0;
2294}
2295EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002296#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002297
2298static void assign_proto_idx(struct proto *prot)
2299{
2300 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2301
2302 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2303 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2304 return;
2305 }
2306
2307 set_bit(prot->inuse_idx, proto_inuse_idx);
2308}
2309
2310static void release_proto_idx(struct proto *prot)
2311{
2312 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2313 clear_bit(prot->inuse_idx, proto_inuse_idx);
2314}
2315#else
2316static inline void assign_proto_idx(struct proto *prot)
2317{
2318}
2319
2320static inline void release_proto_idx(struct proto *prot)
2321{
2322}
2323#endif
2324
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325int proto_register(struct proto *prot, int alloc_slab)
2326{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 if (alloc_slab) {
2328 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002329 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2330 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331
2332 if (prot->slab == NULL) {
2333 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2334 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002335 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002337
2338 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002339 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002340 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002341 goto out_free_sock_slab;
2342
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002343 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002344 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002345 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002346
2347 if (prot->rsk_prot->slab == NULL) {
2348 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2349 prot->name);
2350 goto out_free_request_sock_slab_name;
2351 }
2352 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002353
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002354 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002355 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002356
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002357 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002358 goto out_free_request_sock_slab;
2359
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002360 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002361 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002362 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002363 0,
2364 SLAB_HWCACHE_ALIGN |
2365 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002366 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002367 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002368 goto out_free_timewait_sock_slab_name;
2369 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 }
2371
Arnaldo Carvalho de Melo2a278052005-04-16 15:24:09 -07002372 write_lock(&proto_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002374 assign_proto_idx(prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 write_unlock(&proto_list_lock);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002376 return 0;
2377
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002378out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002379 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002380out_free_request_sock_slab:
2381 if (prot->rsk_prot && prot->rsk_prot->slab) {
2382 kmem_cache_destroy(prot->rsk_prot->slab);
2383 prot->rsk_prot->slab = NULL;
2384 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002385out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002386 if (prot->rsk_prot)
2387 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002388out_free_sock_slab:
2389 kmem_cache_destroy(prot->slab);
2390 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002391out:
2392 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394EXPORT_SYMBOL(proto_register);
2395
2396void proto_unregister(struct proto *prot)
2397{
2398 write_lock(&proto_list_lock);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002399 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002400 list_del(&prot->node);
2401 write_unlock(&proto_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402
2403 if (prot->slab != NULL) {
2404 kmem_cache_destroy(prot->slab);
2405 prot->slab = NULL;
2406 }
2407
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002408 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002409 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002410 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002411 prot->rsk_prot->slab = NULL;
2412 }
2413
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002414 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002415 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002416 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002417 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002418 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420EXPORT_SYMBOL(proto_unregister);
2421
2422#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002424 __acquires(proto_list_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425{
2426 read_lock(&proto_list_lock);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002427 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428}
2429
2430static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2431{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002432 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433}
2434
2435static void proto_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002436 __releases(proto_list_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437{
2438 read_unlock(&proto_list_lock);
2439}
2440
2441static char proto_method_implemented(const void *method)
2442{
2443 return method == NULL ? 'n' : 'y';
2444}
2445
2446static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2447{
2448 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
2449 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2450 proto->name,
2451 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002452 sock_prot_inuse_get(seq_file_net(seq), proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
2454 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2455 proto->max_header,
2456 proto->slab == NULL ? "no" : "yes",
2457 module_name(proto->owner),
2458 proto_method_implemented(proto->close),
2459 proto_method_implemented(proto->connect),
2460 proto_method_implemented(proto->disconnect),
2461 proto_method_implemented(proto->accept),
2462 proto_method_implemented(proto->ioctl),
2463 proto_method_implemented(proto->init),
2464 proto_method_implemented(proto->destroy),
2465 proto_method_implemented(proto->shutdown),
2466 proto_method_implemented(proto->setsockopt),
2467 proto_method_implemented(proto->getsockopt),
2468 proto_method_implemented(proto->sendmsg),
2469 proto_method_implemented(proto->recvmsg),
2470 proto_method_implemented(proto->sendpage),
2471 proto_method_implemented(proto->bind),
2472 proto_method_implemented(proto->backlog_rcv),
2473 proto_method_implemented(proto->hash),
2474 proto_method_implemented(proto->unhash),
2475 proto_method_implemented(proto->get_port),
2476 proto_method_implemented(proto->enter_memory_pressure));
2477}
2478
2479static int proto_seq_show(struct seq_file *seq, void *v)
2480{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002481 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2483 "protocol",
2484 "size",
2485 "sockets",
2486 "memory",
2487 "press",
2488 "maxhdr",
2489 "slab",
2490 "module",
2491 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2492 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002493 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 return 0;
2495}
2496
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002497static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 .start = proto_seq_start,
2499 .next = proto_seq_next,
2500 .stop = proto_seq_stop,
2501 .show = proto_seq_show,
2502};
2503
2504static int proto_seq_open(struct inode *inode, struct file *file)
2505{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002506 return seq_open_net(inode, file, &proto_seq_ops,
2507 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508}
2509
Arjan van de Ven9a321442007-02-12 00:55:35 -08002510static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 .owner = THIS_MODULE,
2512 .open = proto_seq_open,
2513 .read = seq_read,
2514 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002515 .release = seq_release_net,
2516};
2517
2518static __net_init int proto_init_net(struct net *net)
2519{
2520 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2521 return -ENOMEM;
2522
2523 return 0;
2524}
2525
2526static __net_exit void proto_exit_net(struct net *net)
2527{
2528 proc_net_remove(net, "protocols");
2529}
2530
2531
2532static __net_initdata struct pernet_operations proto_net_ops = {
2533 .init = proto_init_net,
2534 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535};
2536
2537static int __init proto_init(void)
2538{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002539 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540}
2541
2542subsys_initcall(proto_init);
2543
2544#endif /* PROC_FS */