blob: ca3eaee6605653d8bfed1b338d2b4c88044fac94 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Joe Perchese005d192012-05-16 19:58:40 +000092#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
96#include <linux/types.h>
97#include <linux/socket.h>
98#include <linux/in.h>
99#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/module.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/sched.h>
104#include <linux/timer.h>
105#include <linux/string.h>
106#include <linux/sockios.h>
107#include <linux/net.h>
108#include <linux/mm.h>
109#include <linux/slab.h>
110#include <linux/interrupt.h>
111#include <linux/poll.h>
112#include <linux/tcp.h>
113#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400114#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000115#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100116#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800117#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400118#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122#include <linux/netdevice.h>
123#include <net/protocol.h>
124#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200125#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700126#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000128#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#include <net/xfrm.h>
130#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700131#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000132#include <net/netprio_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134#include <linux/filter.h>
135
Satoru Moriya3847ce32011-06-17 12:00:03 +0000136#include <trace/events/sock.h>
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138#ifdef CONFIG_INET
139#include <net/tcp.h>
140#endif
141
Glauber Costa36b77a52011-12-16 00:51:59 +0000142static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000143static LIST_HEAD(proto_list);
144
Andrew Mortonc255a452012-07-31 16:43:02 -0700145#ifdef CONFIG_MEMCG_KMEM
Glauber Costa1d62e432012-04-09 19:36:33 -0300146int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000147{
148 struct proto *proto;
149 int ret = 0;
150
Glauber Costa36b77a52011-12-16 00:51:59 +0000151 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000152 list_for_each_entry(proto, &proto_list, node) {
153 if (proto->init_cgroup) {
Glauber Costa1d62e432012-04-09 19:36:33 -0300154 ret = proto->init_cgroup(memcg, ss);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000155 if (ret)
156 goto out;
157 }
158 }
159
Glauber Costa36b77a52011-12-16 00:51:59 +0000160 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000161 return ret;
162out:
163 list_for_each_entry_continue_reverse(proto, &proto_list, node)
164 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300165 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000166 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000167 return ret;
168}
169
Glauber Costa1d62e432012-04-09 19:36:33 -0300170void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000171{
172 struct proto *proto;
173
Glauber Costa36b77a52011-12-16 00:51:59 +0000174 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000175 list_for_each_entry_reverse(proto, &proto_list, node)
176 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300177 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000178 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000179}
180#endif
181
Ingo Molnarda21f242006-07-03 00:25:12 -0700182/*
183 * Each address family might have different locking rules, so we have
184 * one slock key per address family:
185 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700186static struct lock_class_key af_family_keys[AF_MAX];
187static struct lock_class_key af_family_slock_keys[AF_MAX];
188
Ingo Molnarc5905af2012-02-24 08:31:31 +0100189struct static_key memcg_socket_limit_enabled;
Glauber Costae1aab162011-12-11 21:47:03 +0000190EXPORT_SYMBOL(memcg_socket_limit_enabled);
191
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700192/*
193 * Make lock validator output more readable. (we pre-construct these
194 * strings build-time, so that runtime initialization of socket
195 * locks is fast):
196 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700197static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700198 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
199 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
200 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
201 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
202 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
203 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
204 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800205 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700206 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800207 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700208 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700209 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800210 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300211 "sk_lock-AF_NFC" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700212};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700213static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700214 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
215 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
216 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
217 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
218 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
219 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
220 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800221 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700222 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800223 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700224 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700225 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800226 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300227 "slock-AF_NFC" , "slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700228};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700229static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700230 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
231 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
232 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
233 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
234 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
235 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
236 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800237 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700238 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700239 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700240 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700241 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800242 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300243 "clock-AF_NFC" , "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700244};
Ingo Molnarda21f242006-07-03 00:25:12 -0700245
246/*
247 * sk_callback_lock locking rules are per-address-family,
248 * so split the lock classes by using a per-AF key:
249 */
250static struct lock_class_key af_callback_keys[AF_MAX];
251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252/* Take into consideration the size of the struct sk_buff overhead in the
253 * determination of these values, since that is non-constant across
254 * platforms. This makes socket queueing behavior and performance
255 * not depend upon such differences.
256 */
257#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000258#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
260#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
261
262/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700263__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200264EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700265__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200266EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700267__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
268__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300270/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700271int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000272EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Mel Gormanc93bdd02012-07-31 16:44:19 -0700274struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
275EXPORT_SYMBOL_GPL(memalloc_socks);
276
Mel Gorman7cb02402012-07-31 16:44:16 -0700277/**
278 * sk_set_memalloc - sets %SOCK_MEMALLOC
279 * @sk: socket to set it on
280 *
281 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
282 * It's the responsibility of the admin to adjust min_free_kbytes
283 * to meet the requirements
284 */
285void sk_set_memalloc(struct sock *sk)
286{
287 sock_set_flag(sk, SOCK_MEMALLOC);
288 sk->sk_allocation |= __GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700289 static_key_slow_inc(&memalloc_socks);
Mel Gorman7cb02402012-07-31 16:44:16 -0700290}
291EXPORT_SYMBOL_GPL(sk_set_memalloc);
292
293void sk_clear_memalloc(struct sock *sk)
294{
295 sock_reset_flag(sk, SOCK_MEMALLOC);
296 sk->sk_allocation &= ~__GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700297 static_key_slow_dec(&memalloc_socks);
Mel Gormanc76562b2012-07-31 16:44:41 -0700298
299 /*
300 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
301 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
302 * it has rmem allocations there is a risk that the user of the
303 * socket cannot make forward progress due to exceeding the rmem
304 * limits. By rights, sk_clear_memalloc() should only be called
305 * on sockets being torn down but warn and reset the accounting if
306 * that assumption breaks.
307 */
308 if (WARN_ON(sk->sk_forward_alloc))
309 sk_mem_reclaim(sk);
Mel Gorman7cb02402012-07-31 16:44:16 -0700310}
311EXPORT_SYMBOL_GPL(sk_clear_memalloc);
312
Mel Gormanb4b9e352012-07-31 16:44:26 -0700313int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
314{
315 int ret;
316 unsigned long pflags = current->flags;
317
318 /* these should have been dropped before queueing */
319 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
320
321 current->flags |= PF_MEMALLOC;
322 ret = sk->sk_backlog_rcv(sk, skb);
323 tsk_restore_flags(current, pflags, PF_MEMALLOC);
324
325 return ret;
326}
327EXPORT_SYMBOL(__sk_backlog_rcv);
328
Neil Horman5bc14212011-11-22 05:10:51 +0000329#if defined(CONFIG_CGROUPS)
330#if !defined(CONFIG_NET_CLS_CGROUP)
Herbert Xuf8451722010-05-24 00:12:34 -0700331int net_cls_subsys_id = -1;
332EXPORT_SYMBOL_GPL(net_cls_subsys_id);
333#endif
Neil Horman5bc14212011-11-22 05:10:51 +0000334#if !defined(CONFIG_NETPRIO_CGROUP)
335int net_prio_subsys_id = -1;
336EXPORT_SYMBOL_GPL(net_prio_subsys_id);
337#endif
338#endif
Herbert Xuf8451722010-05-24 00:12:34 -0700339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
341{
342 struct timeval tv;
343
344 if (optlen < sizeof(tv))
345 return -EINVAL;
346 if (copy_from_user(&tv, optval, sizeof(tv)))
347 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700348 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
349 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
Vasily Averinba780732007-05-24 16:58:54 -0700351 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700352 static int warned __read_mostly;
353
Vasily Averinba780732007-05-24 16:58:54 -0700354 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700355 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700356 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000357 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
358 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700359 }
Vasily Averinba780732007-05-24 16:58:54 -0700360 return 0;
361 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 *timeo_p = MAX_SCHEDULE_TIMEOUT;
363 if (tv.tv_sec == 0 && tv.tv_usec == 0)
364 return 0;
365 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
366 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
367 return 0;
368}
369
370static void sock_warn_obsolete_bsdism(const char *name)
371{
372 static int warned;
373 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900374 if (strcmp(warncomm, current->comm) && warned < 5) {
375 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000376 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
377 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 warned++;
379 }
380}
381
Eric Dumazet08e29af2011-11-28 12:04:18 +0000382#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
383
384static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900385{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000386 if (sk->sk_flags & flags) {
387 sk->sk_flags &= ~flags;
388 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000389 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 }
391}
392
393
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800394int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
395{
Eric Dumazet766e90372009-10-14 20:40:11 -0700396 int err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800397 int skb_len;
Neil Horman3b885782009-10-12 13:26:31 -0700398 unsigned long flags;
399 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800400
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000401 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700402 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000403 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700404 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800405 }
406
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700407 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800408 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700409 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800410
Mel Gormanc76562b2012-07-31 16:44:41 -0700411 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700412 atomic_inc(&sk->sk_drops);
413 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800414 }
415
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800416 skb->dev = NULL;
417 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800418
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800419 /* Cache the SKB length before we tack it onto the receive
420 * queue. Once it is added it no longer belongs to us and
421 * may be freed by other threads of control pulling packets
422 * from the queue.
423 */
424 skb_len = skb->len;
425
Eric Dumazet7fee2262010-05-11 23:19:48 +0000426 /* we escape from rcu protected region, make sure we dont leak
427 * a norefcounted dst
428 */
429 skb_dst_force(skb);
430
Neil Horman3b885782009-10-12 13:26:31 -0700431 spin_lock_irqsave(&list->lock, flags);
432 skb->dropcount = atomic_read(&sk->sk_drops);
433 __skb_queue_tail(list, skb);
434 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800435
436 if (!sock_flag(sk, SOCK_DEAD))
437 sk->sk_data_ready(sk, skb_len);
Eric Dumazet766e90372009-10-14 20:40:11 -0700438 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800439}
440EXPORT_SYMBOL(sock_queue_rcv_skb);
441
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200442int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800443{
444 int rc = NET_RX_SUCCESS;
445
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700446 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800447 goto discard_and_relse;
448
449 skb->dev = NULL;
450
Eric Dumazetf545a382012-04-22 23:34:26 +0000451 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700452 atomic_inc(&sk->sk_drops);
453 goto discard_and_relse;
454 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200455 if (nested)
456 bh_lock_sock_nested(sk);
457 else
458 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700459 if (!sock_owned_by_user(sk)) {
460 /*
461 * trylock + unlock semantics:
462 */
463 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
464
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700465 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700466
467 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Eric Dumazetf545a382012-04-22 23:34:26 +0000468 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000469 bh_unlock_sock(sk);
470 atomic_inc(&sk->sk_drops);
471 goto discard_and_relse;
472 }
473
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800474 bh_unlock_sock(sk);
475out:
476 sock_put(sk);
477 return rc;
478discard_and_relse:
479 kfree_skb(skb);
480 goto out;
481}
482EXPORT_SYMBOL(sk_receive_skb);
483
Krishna Kumarea94ff32009-10-19 23:46:45 +0000484void sk_reset_txq(struct sock *sk)
485{
486 sk_tx_queue_clear(sk);
487}
488EXPORT_SYMBOL(sk_reset_txq);
489
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800490struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
491{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000492 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800493
494 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000495 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000496 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800497 dst_release(dst);
498 return NULL;
499 }
500
501 return dst;
502}
503EXPORT_SYMBOL(__sk_dst_check);
504
505struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
506{
507 struct dst_entry *dst = sk_dst_get(sk);
508
509 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
510 sk_dst_reset(sk);
511 dst_release(dst);
512 return NULL;
513 }
514
515 return dst;
516}
517EXPORT_SYMBOL(sk_dst_check);
518
David S. Miller48788092007-09-14 16:41:03 -0700519static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
520{
521 int ret = -ENOPROTOOPT;
522#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900523 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700524 char devname[IFNAMSIZ];
525 int index;
526
527 /* Sorry... */
528 ret = -EPERM;
529 if (!capable(CAP_NET_RAW))
530 goto out;
531
532 ret = -EINVAL;
533 if (optlen < 0)
534 goto out;
535
536 /* Bind this socket to a particular device like "eth0",
537 * as specified in the passed interface name. If the
538 * name is "" or the option length is zero the socket
539 * is not bound.
540 */
541 if (optlen > IFNAMSIZ - 1)
542 optlen = IFNAMSIZ - 1;
543 memset(devname, 0, sizeof(devname));
544
545 ret = -EFAULT;
546 if (copy_from_user(devname, optval, optlen))
547 goto out;
548
David S. Miller000ba2e2009-11-05 22:37:11 -0800549 index = 0;
550 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800551 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700552
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800553 rcu_read_lock();
554 dev = dev_get_by_name_rcu(net, devname);
555 if (dev)
556 index = dev->ifindex;
557 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700558 ret = -ENODEV;
559 if (!dev)
560 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700561 }
562
563 lock_sock(sk);
564 sk->sk_bound_dev_if = index;
565 sk_dst_reset(sk);
566 release_sock(sk);
567
568 ret = 0;
569
570out:
571#endif
572
573 return ret;
574}
575
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800576static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
577{
578 if (valbool)
579 sock_set_flag(sk, bit);
580 else
581 sock_reset_flag(sk, bit);
582}
583
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584/*
585 * This is meant for all protocols to use and covers goings on
586 * at the socket level. Everything here is generic.
587 */
588
589int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700590 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591{
Eric Dumazet2a915252009-05-27 11:30:05 +0000592 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 int val;
594 int valbool;
595 struct linger ling;
596 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900597
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 /*
599 * Options without arguments
600 */
601
David S. Miller48788092007-09-14 16:41:03 -0700602 if (optname == SO_BINDTODEVICE)
603 return sock_bindtodevice(sk, optval, optlen);
604
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700605 if (optlen < sizeof(int))
606 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900607
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 if (get_user(val, (int __user *)optval))
609 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900610
Eric Dumazet2a915252009-05-27 11:30:05 +0000611 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
613 lock_sock(sk);
614
Eric Dumazet2a915252009-05-27 11:30:05 +0000615 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700616 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000617 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700618 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000619 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800620 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700621 break;
622 case SO_REUSEADDR:
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000623 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700624 break;
625 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000626 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000627 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700628 case SO_ERROR:
629 ret = -ENOPROTOOPT;
630 break;
631 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800632 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700633 break;
634 case SO_BROADCAST:
635 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
636 break;
637 case SO_SNDBUF:
638 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000639 * about it this is right. Otherwise apps have to
640 * play 'guess the biggest size' games. RCVBUF/SNDBUF
641 * are treated in BSD as hints
642 */
643 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700644set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700645 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazet82981932012-04-26 20:07:59 +0000646 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
647 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700648 sk->sk_write_space(sk);
649 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700651 case SO_SNDBUFFORCE:
652 if (!capable(CAP_NET_ADMIN)) {
653 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 break;
655 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700656 goto set_sndbuf;
657
658 case SO_RCVBUF:
659 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000660 * about it this is right. Otherwise apps have to
661 * play 'guess the biggest size' games. RCVBUF/SNDBUF
662 * are treated in BSD as hints
663 */
664 val = min_t(u32, val, sysctl_rmem_max);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700665set_rcvbuf:
666 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
667 /*
668 * We double it on the way in to account for
669 * "struct sk_buff" etc. overhead. Applications
670 * assume that the SO_RCVBUF setting they make will
671 * allow that much actual data to be received on that
672 * socket.
673 *
674 * Applications are unaware that "struct sk_buff" and
675 * other overheads allocate from the receive buffer
676 * during socket buffer allocation.
677 *
678 * And after considering the possible alternatives,
679 * returning the value we actually used in getsockopt
680 * is the most desirable behavior.
681 */
Eric Dumazet82981932012-04-26 20:07:59 +0000682 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700683 break;
684
685 case SO_RCVBUFFORCE:
686 if (!capable(CAP_NET_ADMIN)) {
687 ret = -EPERM;
688 break;
689 }
690 goto set_rcvbuf;
691
692 case SO_KEEPALIVE:
693#ifdef CONFIG_INET
694 if (sk->sk_protocol == IPPROTO_TCP)
695 tcp_set_keepalive(sk, valbool);
696#endif
697 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
698 break;
699
700 case SO_OOBINLINE:
701 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
702 break;
703
704 case SO_NO_CHECK:
705 sk->sk_no_check = valbool;
706 break;
707
708 case SO_PRIORITY:
709 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
710 sk->sk_priority = val;
711 else
712 ret = -EPERM;
713 break;
714
715 case SO_LINGER:
716 if (optlen < sizeof(ling)) {
717 ret = -EINVAL; /* 1003.1g */
718 break;
719 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000720 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700721 ret = -EFAULT;
722 break;
723 }
724 if (!ling.l_onoff)
725 sock_reset_flag(sk, SOCK_LINGER);
726 else {
727#if (BITS_PER_LONG == 32)
728 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
729 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
730 else
731#endif
732 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
733 sock_set_flag(sk, SOCK_LINGER);
734 }
735 break;
736
737 case SO_BSDCOMPAT:
738 sock_warn_obsolete_bsdism("setsockopt");
739 break;
740
741 case SO_PASSCRED:
742 if (valbool)
743 set_bit(SOCK_PASSCRED, &sock->flags);
744 else
745 clear_bit(SOCK_PASSCRED, &sock->flags);
746 break;
747
748 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700749 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700750 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700751 if (optname == SO_TIMESTAMP)
752 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
753 else
754 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700755 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000756 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700757 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700758 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700759 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
760 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700761 break;
762
Patrick Ohly20d49472009-02-12 05:03:38 +0000763 case SO_TIMESTAMPING:
764 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000765 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000766 break;
767 }
768 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
769 val & SOF_TIMESTAMPING_TX_HARDWARE);
770 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
771 val & SOF_TIMESTAMPING_TX_SOFTWARE);
772 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
773 val & SOF_TIMESTAMPING_RX_HARDWARE);
774 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
775 sock_enable_timestamp(sk,
776 SOCK_TIMESTAMPING_RX_SOFTWARE);
777 else
778 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000779 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000780 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
781 val & SOF_TIMESTAMPING_SOFTWARE);
782 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
783 val & SOF_TIMESTAMPING_SYS_HARDWARE);
784 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
785 val & SOF_TIMESTAMPING_RAW_HARDWARE);
786 break;
787
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700788 case SO_RCVLOWAT:
789 if (val < 0)
790 val = INT_MAX;
791 sk->sk_rcvlowat = val ? : 1;
792 break;
793
794 case SO_RCVTIMEO:
795 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
796 break;
797
798 case SO_SNDTIMEO:
799 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
800 break;
801
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700802 case SO_ATTACH_FILTER:
803 ret = -EINVAL;
804 if (optlen == sizeof(struct sock_fprog)) {
805 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700807 ret = -EFAULT;
808 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700811 ret = sk_attach_filter(&fprog, sk);
812 }
813 break;
814
815 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700816 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700817 break;
818
819 case SO_PASSSEC:
820 if (valbool)
821 set_bit(SOCK_PASSSEC, &sock->flags);
822 else
823 clear_bit(SOCK_PASSSEC, &sock->flags);
824 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800825 case SO_MARK:
826 if (!capable(CAP_NET_ADMIN))
827 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000828 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800829 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800830 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 /* We implement the SO_SNDLOWAT etc to
833 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700834 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000835 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700836 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100837
838 case SO_WIFI_STATUS:
839 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
840 break;
841
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000842 case SO_PEEK_OFF:
843 if (sock->ops->set_peek_off)
844 sock->ops->set_peek_off(sk, val);
845 else
846 ret = -EOPNOTSUPP;
847 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000848
849 case SO_NOFCS:
850 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
851 break;
852
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700853 default:
854 ret = -ENOPROTOOPT;
855 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900856 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 release_sock(sk);
858 return ret;
859}
Eric Dumazet2a915252009-05-27 11:30:05 +0000860EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
862
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000863void cred_to_ucred(struct pid *pid, const struct cred *cred,
864 struct ucred *ucred)
865{
866 ucred->pid = pid_vnr(pid);
867 ucred->uid = ucred->gid = -1;
868 if (cred) {
869 struct user_namespace *current_ns = current_user_ns();
870
Eric W. Biederman76b6db02012-03-14 15:24:19 -0700871 ucred->uid = from_kuid(current_ns, cred->euid);
872 ucred->gid = from_kgid(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000873 }
874}
David S. Miller39247732010-06-16 16:18:25 -0700875EXPORT_SYMBOL_GPL(cred_to_ucred);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000876
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877int sock_getsockopt(struct socket *sock, int level, int optname,
878 char __user *optval, int __user *optlen)
879{
880 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900881
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700882 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900883 int val;
884 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 struct timeval tm;
886 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900887
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -0800888 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900890
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700891 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900892 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700893 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900895
Eugene Teo50fee1d2009-02-23 15:38:41 -0800896 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -0800897
Eric Dumazet2a915252009-05-27 11:30:05 +0000898 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700899 case SO_DEBUG:
900 v.val = sock_flag(sk, SOCK_DBG);
901 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900902
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700903 case SO_DONTROUTE:
904 v.val = sock_flag(sk, SOCK_LOCALROUTE);
905 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900906
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700907 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000908 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700909 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700911 case SO_SNDBUF:
912 v.val = sk->sk_sndbuf;
913 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900914
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700915 case SO_RCVBUF:
916 v.val = sk->sk_rcvbuf;
917 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700919 case SO_REUSEADDR:
920 v.val = sk->sk_reuse;
921 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700923 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000924 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700925 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700927 case SO_TYPE:
928 v.val = sk->sk_type;
929 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000931 case SO_PROTOCOL:
932 v.val = sk->sk_protocol;
933 break;
934
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000935 case SO_DOMAIN:
936 v.val = sk->sk_family;
937 break;
938
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700939 case SO_ERROR:
940 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +0000941 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700942 v.val = xchg(&sk->sk_err_soft, 0);
943 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700945 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000946 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700947 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900948
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700949 case SO_NO_CHECK:
950 v.val = sk->sk_no_check;
951 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700953 case SO_PRIORITY:
954 v.val = sk->sk_priority;
955 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900956
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700957 case SO_LINGER:
958 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000959 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700960 v.ling.l_linger = sk->sk_lingertime / HZ;
961 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900962
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700963 case SO_BSDCOMPAT:
964 sock_warn_obsolete_bsdism("getsockopt");
965 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700967 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700968 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
969 !sock_flag(sk, SOCK_RCVTSTAMPNS);
970 break;
971
972 case SO_TIMESTAMPNS:
973 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700974 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
Patrick Ohly20d49472009-02-12 05:03:38 +0000976 case SO_TIMESTAMPING:
977 v.val = 0;
978 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
979 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
980 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
981 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
982 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
983 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
984 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
985 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
986 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
987 v.val |= SOF_TIMESTAMPING_SOFTWARE;
988 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
989 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
990 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
991 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
992 break;
993
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700994 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000995 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700996 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
997 v.tm.tv_sec = 0;
998 v.tm.tv_usec = 0;
999 } else {
1000 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1001 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001003 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001005 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001006 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001007 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1008 v.tm.tv_sec = 0;
1009 v.tm.tv_usec = 0;
1010 } else {
1011 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1012 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1013 }
1014 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001016 case SO_RCVLOWAT:
1017 v.val = sk->sk_rcvlowat;
1018 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001019
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001020 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +00001021 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001022 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001024 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +00001025 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001026 break;
1027
1028 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001029 {
1030 struct ucred peercred;
1031 if (len > sizeof(peercred))
1032 len = sizeof(peercred);
1033 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1034 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001035 return -EFAULT;
1036 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001037 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001038
1039 case SO_PEERNAME:
1040 {
1041 char address[128];
1042
1043 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1044 return -ENOTCONN;
1045 if (lv < len)
1046 return -EINVAL;
1047 if (copy_to_user(optval, address, len))
1048 return -EFAULT;
1049 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001051
1052 /* Dubious BSD thing... Probably nobody even uses it, but
1053 * the UNIX standard wants it for whatever reason... -DaveM
1054 */
1055 case SO_ACCEPTCONN:
1056 v.val = sk->sk_state == TCP_LISTEN;
1057 break;
1058
1059 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001060 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001061 break;
1062
1063 case SO_PEERSEC:
1064 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1065
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001066 case SO_MARK:
1067 v.val = sk->sk_mark;
1068 break;
1069
Neil Horman3b885782009-10-12 13:26:31 -07001070 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001071 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001072 break;
1073
Johannes Berg6e3e9392011-11-09 10:15:42 +01001074 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001075 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001076 break;
1077
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001078 case SO_PEEK_OFF:
1079 if (!sock->ops->set_peek_off)
1080 return -EOPNOTSUPP;
1081
1082 v.val = sk->sk_peek_off;
1083 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001084 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001085 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001086 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001087 default:
1088 return -ENOPROTOOPT;
1089 }
1090
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 if (len > lv)
1092 len = lv;
1093 if (copy_to_user(optval, &v, len))
1094 return -EFAULT;
1095lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001096 if (put_user(len, optlen))
1097 return -EFAULT;
1098 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099}
1100
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001101/*
1102 * Initialize an sk_lock.
1103 *
1104 * (We also register the sk_lock with the lock validator.)
1105 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001106static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001107{
Peter Zijlstraed075362006-12-06 20:35:24 -08001108 sock_lock_init_class_and_name(sk,
1109 af_family_slock_key_strings[sk->sk_family],
1110 af_family_slock_keys + sk->sk_family,
1111 af_family_key_strings[sk->sk_family],
1112 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001113}
1114
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001115/*
1116 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1117 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001118 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001119 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001120static void sock_copy(struct sock *nsk, const struct sock *osk)
1121{
1122#ifdef CONFIG_SECURITY_NETWORK
1123 void *sptr = nsk->sk_security;
1124#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001125 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1126
1127 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1128 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1129
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001130#ifdef CONFIG_SECURITY_NETWORK
1131 nsk->sk_security = sptr;
1132 security_sk_clone(osk, nsk);
1133#endif
1134}
1135
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001136/*
1137 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1138 * un-modified. Special care is taken when initializing object to zero.
1139 */
1140static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1141{
1142 if (offsetof(struct sock, sk_node.next) != 0)
1143 memset(sk, 0, offsetof(struct sock, sk_node.next));
1144 memset(&sk->sk_node.pprev, 0,
1145 size - offsetof(struct sock, sk_node.pprev));
1146}
1147
1148void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1149{
1150 unsigned long nulls1, nulls2;
1151
1152 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1153 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1154 if (nulls1 > nulls2)
1155 swap(nulls1, nulls2);
1156
1157 if (nulls1 != 0)
1158 memset((char *)sk, 0, nulls1);
1159 memset((char *)sk + nulls1 + sizeof(void *), 0,
1160 nulls2 - nulls1 - sizeof(void *));
1161 memset((char *)sk + nulls2 + sizeof(void *), 0,
1162 size - nulls2 - sizeof(void *));
1163}
1164EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1165
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001166static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1167 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001168{
1169 struct sock *sk;
1170 struct kmem_cache *slab;
1171
1172 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001173 if (slab != NULL) {
1174 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1175 if (!sk)
1176 return sk;
1177 if (priority & __GFP_ZERO) {
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001178 if (prot->clear_sk)
1179 prot->clear_sk(sk, prot->obj_size);
1180 else
1181 sk_prot_clear_nulls(sk, prot->obj_size);
Eric Dumazete912b112009-07-08 19:36:05 +00001182 }
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001183 } else
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001184 sk = kmalloc(prot->obj_size, priority);
1185
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001186 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001187 kmemcheck_annotate_bitfield(sk, flags);
1188
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001189 if (security_sk_alloc(sk, family, priority))
1190 goto out_free;
1191
1192 if (!try_module_get(prot->owner))
1193 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001194 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001195 }
1196
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001197 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001198
1199out_free_sec:
1200 security_sk_free(sk);
1201out_free:
1202 if (slab != NULL)
1203 kmem_cache_free(slab, sk);
1204 else
1205 kfree(sk);
1206 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001207}
1208
1209static void sk_prot_free(struct proto *prot, struct sock *sk)
1210{
1211 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001212 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001213
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001214 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001215 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001216
1217 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001218 if (slab != NULL)
1219 kmem_cache_free(slab, sk);
1220 else
1221 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001222 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001223}
1224
Herbert Xuf8451722010-05-24 00:12:34 -07001225#ifdef CONFIG_CGROUPS
Daniel Wagner8fb974c2012-09-12 16:12:02 +02001226#if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
Herbert Xuf8451722010-05-24 00:12:34 -07001227void sock_update_classid(struct sock *sk)
1228{
Paul E. McKenney11441822010-10-06 17:15:35 -07001229 u32 classid;
Herbert Xuf8451722010-05-24 00:12:34 -07001230
Paul E. McKenney11441822010-10-06 17:15:35 -07001231 rcu_read_lock(); /* doing current task, which cannot vanish. */
1232 classid = task_cls_classid(current);
1233 rcu_read_unlock();
Herbert Xuf8451722010-05-24 00:12:34 -07001234 if (classid && classid != sk->sk_classid)
1235 sk->sk_classid = classid;
1236}
Herbert Xu82862742010-05-24 00:14:10 -07001237EXPORT_SYMBOL(sock_update_classid);
Daniel Wagner8fb974c2012-09-12 16:12:02 +02001238#endif
Neil Horman5bc14212011-11-22 05:10:51 +00001239
Daniel Wagner51e4e7f2012-09-12 16:12:03 +02001240#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
John Fastabend406a3c62012-07-20 10:39:25 +00001241void sock_update_netprioidx(struct sock *sk, struct task_struct *task)
Neil Horman5bc14212011-11-22 05:10:51 +00001242{
Neil Horman5bc14212011-11-22 05:10:51 +00001243 if (in_interrupt())
1244 return;
Neil Horman2b73bc62012-02-10 05:43:38 +00001245
John Fastabend406a3c62012-07-20 10:39:25 +00001246 sk->sk_cgrp_prioidx = task_netprioidx(task);
Neil Horman5bc14212011-11-22 05:10:51 +00001247}
1248EXPORT_SYMBOL_GPL(sock_update_netprioidx);
Herbert Xuf8451722010-05-24 00:12:34 -07001249#endif
Daniel Wagner51e4e7f2012-09-12 16:12:03 +02001250#endif
Herbert Xuf8451722010-05-24 00:12:34 -07001251
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252/**
1253 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001254 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001255 * @family: protocol family
1256 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1257 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001259struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001260 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001262 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001264 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001266 sk->sk_family = family;
1267 /*
1268 * See comment in struct sock definition to understand
1269 * why we need sk_prot_creator -acme
1270 */
1271 sk->sk_prot = sk->sk_prot_creator = prot;
1272 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001273 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001274 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001275
1276 sock_update_classid(sk);
John Fastabend406a3c62012-07-20 10:39:25 +00001277 sock_update_netprioidx(sk, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 }
Frank Filza79af592005-09-27 15:23:38 -07001279
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001280 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281}
Eric Dumazet2a915252009-05-27 11:30:05 +00001282EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283
Eric Dumazet2b85a342009-06-11 02:55:43 -07001284static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285{
1286 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287
1288 if (sk->sk_destruct)
1289 sk->sk_destruct(sk);
1290
Paul E. McKenneya898def2010-02-22 17:04:49 -08001291 filter = rcu_dereference_check(sk->sk_filter,
1292 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001294 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001295 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 }
1297
Eric Dumazet08e29af2011-11-28 12:04:18 +00001298 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299
1300 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001301 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1302 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001304 if (sk->sk_peer_cred)
1305 put_cred(sk->sk_peer_cred);
1306 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001307 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001308 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001310
1311void sk_free(struct sock *sk)
1312{
1313 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001314 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001315 * some packets are still in some tx queue.
1316 * If not null, sock_wfree() will call __sk_free(sk) later
1317 */
1318 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1319 __sk_free(sk);
1320}
Eric Dumazet2a915252009-05-27 11:30:05 +00001321EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322
Denis V. Lunevedf02082008-02-29 11:18:32 -08001323/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001324 * Last sock_put should drop reference to sk->sk_net. It has already
1325 * been dropped in sk_change_net. Taking reference to stopping namespace
Denis V. Lunevedf02082008-02-29 11:18:32 -08001326 * is not an option.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001327 * Take reference to a socket to remove it from hash _alive_ and after that
Denis V. Lunevedf02082008-02-29 11:18:32 -08001328 * destroy it in the context of init_net.
1329 */
1330void sk_release_kernel(struct sock *sk)
1331{
1332 if (sk == NULL || sk->sk_socket == NULL)
1333 return;
1334
1335 sock_hold(sk);
1336 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001337 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001338 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001339 sock_put(sk);
1340}
David S. Miller45af1752008-02-29 11:33:19 -08001341EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001342
Stephen Rothwell475f1b52012-01-09 16:33:16 +11001343static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1344{
1345 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1346 sock_update_memcg(newsk);
1347}
1348
Eric Dumazete56c57d2011-11-08 17:07:07 -05001349/**
1350 * sk_clone_lock - clone a socket, and lock its clone
1351 * @sk: the socket to clone
1352 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1353 *
1354 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1355 */
1356struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001357{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001358 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001359
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001360 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001361 if (newsk != NULL) {
1362 struct sk_filter *filter;
1363
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001364 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001365
1366 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001367 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001368 sk_node_init(&newsk->sk_node);
1369 sock_lock_init(newsk);
1370 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001371 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001372 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001373
1374 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001375 /*
1376 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1377 */
1378 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001379 atomic_set(&newsk->sk_omem_alloc, 0);
1380 skb_queue_head_init(&newsk->sk_receive_queue);
1381 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001382#ifdef CONFIG_NET_DMA
1383 skb_queue_head_init(&newsk->sk_async_wait_queue);
1384#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001385
Eric Dumazetb6c67122010-04-08 23:03:29 +00001386 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001387 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001388 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1389 af_callback_keys + newsk->sk_family,
1390 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001391
1392 newsk->sk_dst_cache = NULL;
1393 newsk->sk_wmem_queued = 0;
1394 newsk->sk_forward_alloc = 0;
1395 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001396 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1397
1398 sock_reset_flag(newsk, SOCK_DONE);
1399 skb_queue_head_init(&newsk->sk_error_queue);
1400
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001401 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001402 if (filter != NULL)
1403 sk_filter_charge(newsk, filter);
1404
1405 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1406 /* It is still raw copy of parent, so invalidate
1407 * destructor and make plain sk_free() */
1408 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001409 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001410 sk_free(newsk);
1411 newsk = NULL;
1412 goto out;
1413 }
1414
1415 newsk->sk_err = 0;
1416 newsk->sk_priority = 0;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001417 /*
1418 * Before updating sk_refcnt, we must commit prior changes to memory
1419 * (Documentation/RCU/rculist_nulls.txt for details)
1420 */
1421 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001422 atomic_set(&newsk->sk_refcnt, 2);
1423
1424 /*
1425 * Increment the counter in the same struct proto as the master
1426 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1427 * is the same as sk->sk_prot->socks, as this field was copied
1428 * with memcpy).
1429 *
1430 * This _changes_ the previous behaviour, where
1431 * tcp_create_openreq_child always was incrementing the
1432 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1433 * to be taken into account in all callers. -acme
1434 */
1435 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001436 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001437 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001438
Glauber Costaf3f511e2012-01-05 20:16:39 +00001439 sk_update_clone(sk, newsk);
1440
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001441 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001442 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001443
Eric Dumazet08e29af2011-11-28 12:04:18 +00001444 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001445 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001446 }
1447out:
1448 return newsk;
1449}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001450EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001451
Andi Kleen99580892007-04-20 17:12:43 -07001452void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1453{
1454 __sk_dst_set(sk, dst);
1455 sk->sk_route_caps = dst->dev->features;
1456 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001457 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001458 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001459 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001460 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001461 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001462 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001463 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001464 sk->sk_gso_max_size = dst->dev->gso_max_size;
Ben Hutchings14853482012-07-30 16:11:42 +00001465 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001466 }
Andi Kleen99580892007-04-20 17:12:43 -07001467 }
1468}
1469EXPORT_SYMBOL_GPL(sk_setup_caps);
1470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471void __init sk_init(void)
1472{
Jan Beulich44813742009-09-21 17:03:05 -07001473 if (totalram_pages <= 4096) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 sysctl_wmem_max = 32767;
1475 sysctl_rmem_max = 32767;
1476 sysctl_wmem_default = 32767;
1477 sysctl_rmem_default = 32767;
Jan Beulich44813742009-09-21 17:03:05 -07001478 } else if (totalram_pages >= 131072) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 sysctl_wmem_max = 131071;
1480 sysctl_rmem_max = 131071;
1481 }
1482}
1483
1484/*
1485 * Simple resource managers for sockets.
1486 */
1487
1488
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001489/*
1490 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 */
1492void sock_wfree(struct sk_buff *skb)
1493{
1494 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001495 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
Eric Dumazetd99927f2009-09-24 10:49:24 +00001497 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1498 /*
1499 * Keep a reference on sk_wmem_alloc, this will be released
1500 * after sk_write_space() call
1501 */
1502 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001504 len = 1;
1505 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001506 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001507 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1508 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001509 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001510 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001511 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512}
Eric Dumazet2a915252009-05-27 11:30:05 +00001513EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001515/*
1516 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 */
1518void sock_rfree(struct sk_buff *skb)
1519{
1520 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001521 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
Eric Dumazetd361fd52010-07-10 22:45:17 +00001523 atomic_sub(len, &sk->sk_rmem_alloc);
1524 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525}
Eric Dumazet2a915252009-05-27 11:30:05 +00001526EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527
David S. Miller41063e92012-06-19 21:22:05 -07001528void sock_edemux(struct sk_buff *skb)
1529{
1530 sock_put(skb->sk);
1531}
1532EXPORT_SYMBOL(sock_edemux);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533
1534int sock_i_uid(struct sock *sk)
1535{
1536 int uid;
1537
Eric Dumazetf064af12010-09-22 12:43:39 +00001538 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001540 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 return uid;
1542}
Eric Dumazet2a915252009-05-27 11:30:05 +00001543EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544
1545unsigned long sock_i_ino(struct sock *sk)
1546{
1547 unsigned long ino;
1548
Eric Dumazetf064af12010-09-22 12:43:39 +00001549 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001551 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 return ino;
1553}
Eric Dumazet2a915252009-05-27 11:30:05 +00001554EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
1556/*
1557 * Allocate a skb from the socket's send buffer.
1558 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001559struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001560 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561{
1562 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001563 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 if (skb) {
1565 skb_set_owner_w(skb, sk);
1566 return skb;
1567 }
1568 }
1569 return NULL;
1570}
Eric Dumazet2a915252009-05-27 11:30:05 +00001571EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572
1573/*
1574 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001575 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001576struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001577 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578{
1579 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1580 struct sk_buff *skb = alloc_skb(size, priority);
1581 if (skb) {
1582 skb_set_owner_r(skb, sk);
1583 return skb;
1584 }
1585 }
1586 return NULL;
1587}
1588
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001589/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001591 */
Al Virodd0fc662005-10-07 07:46:04 +01001592void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593{
Eric Dumazet95c96172012-04-15 05:58:06 +00001594 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1596 void *mem;
1597 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001598 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 */
1600 atomic_add(size, &sk->sk_omem_alloc);
1601 mem = kmalloc(size, priority);
1602 if (mem)
1603 return mem;
1604 atomic_sub(size, &sk->sk_omem_alloc);
1605 }
1606 return NULL;
1607}
Eric Dumazet2a915252009-05-27 11:30:05 +00001608EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609
1610/*
1611 * Free an option memory block.
1612 */
1613void sock_kfree_s(struct sock *sk, void *mem, int size)
1614{
1615 kfree(mem);
1616 atomic_sub(size, &sk->sk_omem_alloc);
1617}
Eric Dumazet2a915252009-05-27 11:30:05 +00001618EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619
1620/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1621 I think, these locks should be removed for datagram sockets.
1622 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001623static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624{
1625 DEFINE_WAIT(wait);
1626
1627 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1628 for (;;) {
1629 if (!timeo)
1630 break;
1631 if (signal_pending(current))
1632 break;
1633 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001634 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1636 break;
1637 if (sk->sk_shutdown & SEND_SHUTDOWN)
1638 break;
1639 if (sk->sk_err)
1640 break;
1641 timeo = schedule_timeout(timeo);
1642 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001643 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 return timeo;
1645}
1646
1647
1648/*
1649 * Generic send/receive buffer handlers
1650 */
1651
Herbert Xu4cc7f682009-02-04 16:55:54 -08001652struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1653 unsigned long data_len, int noblock,
1654 int *errcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655{
1656 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001657 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 long timeo;
1659 int err;
Jason Wangcc9b17a2012-05-30 21:18:10 +00001660 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1661
1662 err = -EMSGSIZE;
1663 if (npages > MAX_SKB_FRAGS)
1664 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665
1666 gfp_mask = sk->sk_allocation;
1667 if (gfp_mask & __GFP_WAIT)
1668 gfp_mask |= __GFP_REPEAT;
1669
1670 timeo = sock_sndtimeo(sk, noblock);
1671 while (1) {
1672 err = sock_error(sk);
1673 if (err != 0)
1674 goto failure;
1675
1676 err = -EPIPE;
1677 if (sk->sk_shutdown & SEND_SHUTDOWN)
1678 goto failure;
1679
1680 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001681 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 int i;
1684
1685 /* No pages, we're done... */
1686 if (!data_len)
1687 break;
1688
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 skb->truesize += data_len;
1690 skb_shinfo(skb)->nr_frags = npages;
1691 for (i = 0; i < npages; i++) {
1692 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
1694 page = alloc_pages(sk->sk_allocation, 0);
1695 if (!page) {
1696 err = -ENOBUFS;
1697 skb_shinfo(skb)->nr_frags = i;
1698 kfree_skb(skb);
1699 goto failure;
1700 }
1701
Ian Campbellea2ab692011-08-22 23:44:58 +00001702 __skb_fill_page_desc(skb, i,
1703 page, 0,
1704 (data_len >= PAGE_SIZE ?
1705 PAGE_SIZE :
1706 data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 data_len -= PAGE_SIZE;
1708 }
1709
1710 /* Full success... */
1711 break;
1712 }
1713 err = -ENOBUFS;
1714 goto failure;
1715 }
1716 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1717 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1718 err = -EAGAIN;
1719 if (!timeo)
1720 goto failure;
1721 if (signal_pending(current))
1722 goto interrupted;
1723 timeo = sock_wait_for_wmem(sk, timeo);
1724 }
1725
1726 skb_set_owner_w(skb, sk);
1727 return skb;
1728
1729interrupted:
1730 err = sock_intr_errno(timeo);
1731failure:
1732 *errcode = err;
1733 return NULL;
1734}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001735EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001737struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 int noblock, int *errcode)
1739{
1740 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1741}
Eric Dumazet2a915252009-05-27 11:30:05 +00001742EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
1744static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001745 __releases(&sk->sk_lock.slock)
1746 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747{
1748 DEFINE_WAIT(wait);
1749
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001750 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1752 TASK_UNINTERRUPTIBLE);
1753 spin_unlock_bh(&sk->sk_lock.slock);
1754 schedule();
1755 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001756 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 break;
1758 }
1759 finish_wait(&sk->sk_lock.wq, &wait);
1760}
1761
1762static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001763 __releases(&sk->sk_lock.slock)
1764 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765{
1766 struct sk_buff *skb = sk->sk_backlog.head;
1767
1768 do {
1769 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1770 bh_unlock_sock(sk);
1771
1772 do {
1773 struct sk_buff *next = skb->next;
1774
Eric Dumazete4cbb022012-04-30 16:07:09 +00001775 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00001776 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001778 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
1780 /*
1781 * We are in process context here with softirqs
1782 * disabled, use cond_resched_softirq() to preempt.
1783 * This is safe to do because we've taken the backlog
1784 * queue private:
1785 */
1786 cond_resched_softirq();
1787
1788 skb = next;
1789 } while (skb != NULL);
1790
1791 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001792 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001793
1794 /*
1795 * Doing the zeroing here guarantee we can not loop forever
1796 * while a wild producer attempts to flood us.
1797 */
1798 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799}
1800
1801/**
1802 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001803 * @sk: sock to wait on
1804 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 *
1806 * Now socket state including sk->sk_err is changed only under lock,
1807 * hence we may omit checks after joining wait queue.
1808 * We check receive queue before schedule() only as optimization;
1809 * it is very likely that release_sock() added new data.
1810 */
1811int sk_wait_data(struct sock *sk, long *timeo)
1812{
1813 int rc;
1814 DEFINE_WAIT(wait);
1815
Eric Dumazetaa395142010-04-20 13:03:51 +00001816 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1818 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1819 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001820 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 return rc;
1822}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823EXPORT_SYMBOL(sk_wait_data);
1824
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001825/**
1826 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1827 * @sk: socket
1828 * @size: memory size to allocate
1829 * @kind: allocation type
1830 *
1831 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1832 * rmem allocation. This function assumes that protocols which have
1833 * memory_pressure use sk_wmem_queued as write buffer accounting.
1834 */
1835int __sk_mem_schedule(struct sock *sk, int size, int kind)
1836{
1837 struct proto *prot = sk->sk_prot;
1838 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00001839 long allocated;
Glauber Costae1aab162011-12-11 21:47:03 +00001840 int parent_status = UNDER_LIMIT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001841
1842 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001843
Glauber Costae1aab162011-12-11 21:47:03 +00001844 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001845
1846 /* Under limit. */
Glauber Costae1aab162011-12-11 21:47:03 +00001847 if (parent_status == UNDER_LIMIT &&
1848 allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00001849 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001850 return 1;
1851 }
1852
Glauber Costae1aab162011-12-11 21:47:03 +00001853 /* Under pressure. (we or our parents) */
1854 if ((parent_status > SOFT_LIMIT) ||
1855 allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00001856 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001857
Glauber Costae1aab162011-12-11 21:47:03 +00001858 /* Over hard limit (we or our parents) */
1859 if ((parent_status == OVER_LIMIT) ||
1860 (allocated > sk_prot_mem_limits(sk, 2)))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001861 goto suppress_allocation;
1862
1863 /* guarantee minimum buffer size under pressure */
1864 if (kind == SK_MEM_RECV) {
1865 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1866 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001867
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001868 } else { /* SK_MEM_SEND */
1869 if (sk->sk_type == SOCK_STREAM) {
1870 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1871 return 1;
1872 } else if (atomic_read(&sk->sk_wmem_alloc) <
1873 prot->sysctl_wmem[0])
1874 return 1;
1875 }
1876
Glauber Costa180d8cd2011-12-11 21:47:02 +00001877 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08001878 int alloc;
1879
Glauber Costa180d8cd2011-12-11 21:47:02 +00001880 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08001881 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001882 alloc = sk_sockets_allocated_read_positive(sk);
1883 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001884 sk_mem_pages(sk->sk_wmem_queued +
1885 atomic_read(&sk->sk_rmem_alloc) +
1886 sk->sk_forward_alloc))
1887 return 1;
1888 }
1889
1890suppress_allocation:
1891
1892 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1893 sk_stream_moderate_sndbuf(sk);
1894
1895 /* Fail only if socket is _under_ its sndbuf.
1896 * In this case we cannot block, so that we have to fail.
1897 */
1898 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1899 return 1;
1900 }
1901
Satoru Moriya3847ce32011-06-17 12:00:03 +00001902 trace_sock_exceed_buf_limit(sk, prot, allocated);
1903
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001904 /* Alas. Undo changes. */
1905 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001906
Glauber Costa0e90b312012-01-20 04:57:16 +00001907 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00001908
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001909 return 0;
1910}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001911EXPORT_SYMBOL(__sk_mem_schedule);
1912
1913/**
1914 * __sk_reclaim - reclaim memory_allocated
1915 * @sk: socket
1916 */
1917void __sk_mem_reclaim(struct sock *sk)
1918{
Glauber Costa180d8cd2011-12-11 21:47:02 +00001919 sk_memory_allocated_sub(sk,
Glauber Costa0e90b312012-01-20 04:57:16 +00001920 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001921 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1922
Glauber Costa180d8cd2011-12-11 21:47:02 +00001923 if (sk_under_memory_pressure(sk) &&
1924 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
1925 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001926}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001927EXPORT_SYMBOL(__sk_mem_reclaim);
1928
1929
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930/*
1931 * Set of default routines for initialising struct proto_ops when
1932 * the protocol does not support a particular function. In certain
1933 * cases where it makes no sense for a protocol to have a "do nothing"
1934 * function, some default processing is provided.
1935 */
1936
1937int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1938{
1939 return -EOPNOTSUPP;
1940}
Eric Dumazet2a915252009-05-27 11:30:05 +00001941EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001943int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 int len, int flags)
1945{
1946 return -EOPNOTSUPP;
1947}
Eric Dumazet2a915252009-05-27 11:30:05 +00001948EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949
1950int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1951{
1952 return -EOPNOTSUPP;
1953}
Eric Dumazet2a915252009-05-27 11:30:05 +00001954EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
1956int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1957{
1958 return -EOPNOTSUPP;
1959}
Eric Dumazet2a915252009-05-27 11:30:05 +00001960EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001962int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 int *len, int peer)
1964{
1965 return -EOPNOTSUPP;
1966}
Eric Dumazet2a915252009-05-27 11:30:05 +00001967EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
Eric Dumazet2a915252009-05-27 11:30:05 +00001969unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970{
1971 return 0;
1972}
Eric Dumazet2a915252009-05-27 11:30:05 +00001973EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974
1975int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1976{
1977 return -EOPNOTSUPP;
1978}
Eric Dumazet2a915252009-05-27 11:30:05 +00001979EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
1981int sock_no_listen(struct socket *sock, int backlog)
1982{
1983 return -EOPNOTSUPP;
1984}
Eric Dumazet2a915252009-05-27 11:30:05 +00001985EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
1987int sock_no_shutdown(struct socket *sock, int how)
1988{
1989 return -EOPNOTSUPP;
1990}
Eric Dumazet2a915252009-05-27 11:30:05 +00001991EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
1993int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07001994 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995{
1996 return -EOPNOTSUPP;
1997}
Eric Dumazet2a915252009-05-27 11:30:05 +00001998EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
2000int sock_no_getsockopt(struct socket *sock, int level, int optname,
2001 char __user *optval, int __user *optlen)
2002{
2003 return -EOPNOTSUPP;
2004}
Eric Dumazet2a915252009-05-27 11:30:05 +00002005EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006
2007int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2008 size_t len)
2009{
2010 return -EOPNOTSUPP;
2011}
Eric Dumazet2a915252009-05-27 11:30:05 +00002012EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013
2014int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2015 size_t len, int flags)
2016{
2017 return -EOPNOTSUPP;
2018}
Eric Dumazet2a915252009-05-27 11:30:05 +00002019EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020
2021int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2022{
2023 /* Mirror missing mmap method error code */
2024 return -ENODEV;
2025}
Eric Dumazet2a915252009-05-27 11:30:05 +00002026EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
2028ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2029{
2030 ssize_t res;
2031 struct msghdr msg = {.msg_flags = flags};
2032 struct kvec iov;
2033 char *kaddr = kmap(page);
2034 iov.iov_base = kaddr + offset;
2035 iov.iov_len = size;
2036 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2037 kunmap(page);
2038 return res;
2039}
Eric Dumazet2a915252009-05-27 11:30:05 +00002040EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
2042/*
2043 * Default Socket Callbacks
2044 */
2045
2046static void sock_def_wakeup(struct sock *sk)
2047{
Eric Dumazet43815482010-04-29 11:01:49 +00002048 struct socket_wq *wq;
2049
2050 rcu_read_lock();
2051 wq = rcu_dereference(sk->sk_wq);
2052 if (wq_has_sleeper(wq))
2053 wake_up_interruptible_all(&wq->wait);
2054 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055}
2056
2057static void sock_def_error_report(struct sock *sk)
2058{
Eric Dumazet43815482010-04-29 11:01:49 +00002059 struct socket_wq *wq;
2060
2061 rcu_read_lock();
2062 wq = rcu_dereference(sk->sk_wq);
2063 if (wq_has_sleeper(wq))
2064 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002065 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002066 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067}
2068
2069static void sock_def_readable(struct sock *sk, int len)
2070{
Eric Dumazet43815482010-04-29 11:01:49 +00002071 struct socket_wq *wq;
2072
2073 rcu_read_lock();
2074 wq = rcu_dereference(sk->sk_wq);
2075 if (wq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002076 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002077 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002078 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002079 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080}
2081
2082static void sock_def_write_space(struct sock *sk)
2083{
Eric Dumazet43815482010-04-29 11:01:49 +00002084 struct socket_wq *wq;
2085
2086 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
2088 /* Do not wake up a writer until he can make "significant"
2089 * progress. --DaveM
2090 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002091 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002092 wq = rcu_dereference(sk->sk_wq);
2093 if (wq_has_sleeper(wq))
2094 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002095 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096
2097 /* Should agree with poll, otherwise some programs break */
2098 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002099 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 }
2101
Eric Dumazet43815482010-04-29 11:01:49 +00002102 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103}
2104
2105static void sock_def_destruct(struct sock *sk)
2106{
Jesper Juhla51482b2005-11-08 09:41:34 -08002107 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108}
2109
2110void sk_send_sigurg(struct sock *sk)
2111{
2112 if (sk->sk_socket && sk->sk_socket->file)
2113 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002114 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115}
Eric Dumazet2a915252009-05-27 11:30:05 +00002116EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
2118void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2119 unsigned long expires)
2120{
2121 if (!mod_timer(timer, expires))
2122 sock_hold(sk);
2123}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124EXPORT_SYMBOL(sk_reset_timer);
2125
2126void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2127{
2128 if (timer_pending(timer) && del_timer(timer))
2129 __sock_put(sk);
2130}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131EXPORT_SYMBOL(sk_stop_timer);
2132
2133void sock_init_data(struct socket *sock, struct sock *sk)
2134{
2135 skb_queue_head_init(&sk->sk_receive_queue);
2136 skb_queue_head_init(&sk->sk_write_queue);
2137 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07002138#ifdef CONFIG_NET_DMA
2139 skb_queue_head_init(&sk->sk_async_wait_queue);
2140#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
2142 sk->sk_send_head = NULL;
2143
2144 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002145
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 sk->sk_allocation = GFP_KERNEL;
2147 sk->sk_rcvbuf = sysctl_rmem_default;
2148 sk->sk_sndbuf = sysctl_wmem_default;
2149 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002150 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
2152 sock_set_flag(sk, SOCK_ZAPPED);
2153
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002154 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002156 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 sock->sk = sk;
2158 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002159 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
Eric Dumazetb6c67122010-04-08 23:03:29 +00002161 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07002163 lockdep_set_class_and_name(&sk->sk_callback_lock,
2164 af_callback_keys + sk->sk_family,
2165 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166
2167 sk->sk_state_change = sock_def_wakeup;
2168 sk->sk_data_ready = sock_def_readable;
2169 sk->sk_write_space = sock_def_write_space;
2170 sk->sk_error_report = sock_def_error_report;
2171 sk->sk_destruct = sock_def_destruct;
2172
2173 sk->sk_sndmsg_page = NULL;
2174 sk->sk_sndmsg_off = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002175 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002177 sk->sk_peer_pid = NULL;
2178 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 sk->sk_write_pending = 0;
2180 sk->sk_rcvlowat = 1;
2181 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2182 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2183
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002184 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002186 /*
2187 * Before updating sk_refcnt, we must commit prior changes to memory
2188 * (Documentation/RCU/rculist_nulls.txt for details)
2189 */
2190 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002192 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193}
Eric Dumazet2a915252009-05-27 11:30:05 +00002194EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002196void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197{
2198 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002199 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002200 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002202 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002203 spin_unlock(&sk->sk_lock.slock);
2204 /*
2205 * The sk_lock has mutex_lock() semantics here:
2206 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002207 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002208 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002210EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002212void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002214 /*
2215 * The sk_lock has mutex_unlock() semantics:
2216 */
2217 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2218
2219 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 if (sk->sk_backlog.tail)
2221 __release_sock(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002222
2223 if (sk->sk_prot->release_cb)
2224 sk->sk_prot->release_cb(sk);
2225
John Heffnerd2e91172007-09-12 10:44:19 +02002226 sk->sk_lock.owned = 0;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002227 if (waitqueue_active(&sk->sk_lock.wq))
2228 wake_up(&sk->sk_lock.wq);
2229 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230}
2231EXPORT_SYMBOL(release_sock);
2232
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002233/**
2234 * lock_sock_fast - fast version of lock_sock
2235 * @sk: socket
2236 *
2237 * This version should be used for very small section, where process wont block
2238 * return false if fast path is taken
2239 * sk_lock.slock locked, owned = 0, BH disabled
2240 * return true if slow path is taken
2241 * sk_lock.slock unlocked, owned = 1, BH enabled
2242 */
2243bool lock_sock_fast(struct sock *sk)
2244{
2245 might_sleep();
2246 spin_lock_bh(&sk->sk_lock.slock);
2247
2248 if (!sk->sk_lock.owned)
2249 /*
2250 * Note : We must disable BH
2251 */
2252 return false;
2253
2254 __lock_sock(sk);
2255 sk->sk_lock.owned = 1;
2256 spin_unlock(&sk->sk_lock.slock);
2257 /*
2258 * The sk_lock has mutex_lock() semantics here:
2259 */
2260 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2261 local_bh_enable();
2262 return true;
2263}
2264EXPORT_SYMBOL(lock_sock_fast);
2265
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002267{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002268 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002270 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002271 tv = ktime_to_timeval(sk->sk_stamp);
2272 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002274 if (tv.tv_sec == 0) {
2275 sk->sk_stamp = ktime_get_real();
2276 tv = ktime_to_timeval(sk->sk_stamp);
2277 }
2278 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002279}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280EXPORT_SYMBOL(sock_get_timestamp);
2281
Eric Dumazetae40eb12007-03-18 17:33:16 -07002282int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2283{
2284 struct timespec ts;
2285 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002286 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002287 ts = ktime_to_timespec(sk->sk_stamp);
2288 if (ts.tv_sec == -1)
2289 return -ENOENT;
2290 if (ts.tv_sec == 0) {
2291 sk->sk_stamp = ktime_get_real();
2292 ts = ktime_to_timespec(sk->sk_stamp);
2293 }
2294 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2295}
2296EXPORT_SYMBOL(sock_get_timestampns);
2297
Patrick Ohly20d49472009-02-12 05:03:38 +00002298void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002299{
Patrick Ohly20d49472009-02-12 05:03:38 +00002300 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002301 unsigned long previous_flags = sk->sk_flags;
2302
Patrick Ohly20d49472009-02-12 05:03:38 +00002303 sock_set_flag(sk, flag);
2304 /*
2305 * we just set one of the two flags which require net
2306 * time stamping, but time stamping might have been on
2307 * already because of the other one
2308 */
Eric Dumazet08e29af2011-11-28 12:04:18 +00002309 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002310 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 }
2312}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
2314/*
2315 * Get a socket option on an socket.
2316 *
2317 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2318 * asynchronous errors should be reported by getsockopt. We assume
2319 * this means if you specify SO_ERROR (otherwise whats the point of it).
2320 */
2321int sock_common_getsockopt(struct socket *sock, int level, int optname,
2322 char __user *optval, int __user *optlen)
2323{
2324 struct sock *sk = sock->sk;
2325
2326 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2327}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328EXPORT_SYMBOL(sock_common_getsockopt);
2329
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002330#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002331int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2332 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002333{
2334 struct sock *sk = sock->sk;
2335
Johannes Berg1e51f952007-03-06 13:44:06 -08002336 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002337 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2338 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002339 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2340}
2341EXPORT_SYMBOL(compat_sock_common_getsockopt);
2342#endif
2343
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2345 struct msghdr *msg, size_t size, int flags)
2346{
2347 struct sock *sk = sock->sk;
2348 int addr_len = 0;
2349 int err;
2350
2351 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2352 flags & ~MSG_DONTWAIT, &addr_len);
2353 if (err >= 0)
2354 msg->msg_namelen = addr_len;
2355 return err;
2356}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357EXPORT_SYMBOL(sock_common_recvmsg);
2358
2359/*
2360 * Set socket options on an inet socket.
2361 */
2362int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002363 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364{
2365 struct sock *sk = sock->sk;
2366
2367 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2368}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369EXPORT_SYMBOL(sock_common_setsockopt);
2370
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002371#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002372int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002373 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002374{
2375 struct sock *sk = sock->sk;
2376
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002377 if (sk->sk_prot->compat_setsockopt != NULL)
2378 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2379 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002380 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2381}
2382EXPORT_SYMBOL(compat_sock_common_setsockopt);
2383#endif
2384
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385void sk_common_release(struct sock *sk)
2386{
2387 if (sk->sk_prot->destroy)
2388 sk->sk_prot->destroy(sk);
2389
2390 /*
2391 * Observation: when sock_common_release is called, processes have
2392 * no access to socket. But net still has.
2393 * Step one, detach it from networking:
2394 *
2395 * A. Remove from hash tables.
2396 */
2397
2398 sk->sk_prot->unhash(sk);
2399
2400 /*
2401 * In this point socket cannot receive new packets, but it is possible
2402 * that some packets are in flight because some CPU runs receiver and
2403 * did hash table lookup before we unhashed socket. They will achieve
2404 * receive queue and will be purged by socket destructor.
2405 *
2406 * Also we still have packets pending on receive queue and probably,
2407 * our own packets waiting in device queues. sock_destroy will drain
2408 * receive queue, but transmitted packets will delay socket destruction
2409 * until the last reference will be released.
2410 */
2411
2412 sock_orphan(sk);
2413
2414 xfrm_sk_free_policy(sk);
2415
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002416 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 sock_put(sk);
2418}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419EXPORT_SYMBOL(sk_common_release);
2420
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002421#ifdef CONFIG_PROC_FS
2422#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002423struct prot_inuse {
2424 int val[PROTO_INUSE_NR];
2425};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002426
2427static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002428
2429#ifdef CONFIG_NET_NS
2430void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2431{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002432 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002433}
2434EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2435
2436int sock_prot_inuse_get(struct net *net, struct proto *prot)
2437{
2438 int cpu, idx = prot->inuse_idx;
2439 int res = 0;
2440
2441 for_each_possible_cpu(cpu)
2442 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2443
2444 return res >= 0 ? res : 0;
2445}
2446EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2447
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002448static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002449{
2450 net->core.inuse = alloc_percpu(struct prot_inuse);
2451 return net->core.inuse ? 0 : -ENOMEM;
2452}
2453
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002454static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002455{
2456 free_percpu(net->core.inuse);
2457}
2458
2459static struct pernet_operations net_inuse_ops = {
2460 .init = sock_inuse_init_net,
2461 .exit = sock_inuse_exit_net,
2462};
2463
2464static __init int net_inuse_init(void)
2465{
2466 if (register_pernet_subsys(&net_inuse_ops))
2467 panic("Cannot initialize net inuse counters");
2468
2469 return 0;
2470}
2471
2472core_initcall(net_inuse_init);
2473#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002474static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2475
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002476void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002477{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002478 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002479}
2480EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2481
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002482int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002483{
2484 int cpu, idx = prot->inuse_idx;
2485 int res = 0;
2486
2487 for_each_possible_cpu(cpu)
2488 res += per_cpu(prot_inuse, cpu).val[idx];
2489
2490 return res >= 0 ? res : 0;
2491}
2492EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002493#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002494
2495static void assign_proto_idx(struct proto *prot)
2496{
2497 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2498
2499 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00002500 pr_err("PROTO_INUSE_NR exhausted\n");
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002501 return;
2502 }
2503
2504 set_bit(prot->inuse_idx, proto_inuse_idx);
2505}
2506
2507static void release_proto_idx(struct proto *prot)
2508{
2509 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2510 clear_bit(prot->inuse_idx, proto_inuse_idx);
2511}
2512#else
2513static inline void assign_proto_idx(struct proto *prot)
2514{
2515}
2516
2517static inline void release_proto_idx(struct proto *prot)
2518{
2519}
2520#endif
2521
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522int proto_register(struct proto *prot, int alloc_slab)
2523{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 if (alloc_slab) {
2525 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002526 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2527 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528
2529 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002530 pr_crit("%s: Can't create sock SLAB cache!\n",
2531 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002532 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002534
2535 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002536 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002537 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002538 goto out_free_sock_slab;
2539
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002540 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002541 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002542 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002543
2544 if (prot->rsk_prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002545 pr_crit("%s: Can't create request sock SLAB cache!\n",
2546 prot->name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002547 goto out_free_request_sock_slab_name;
2548 }
2549 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002550
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002551 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002552 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002553
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002554 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002555 goto out_free_request_sock_slab;
2556
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002557 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002558 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002559 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002560 0,
2561 SLAB_HWCACHE_ALIGN |
2562 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002563 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002564 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002565 goto out_free_timewait_sock_slab_name;
2566 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 }
2568
Glauber Costa36b77a52011-12-16 00:51:59 +00002569 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002571 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002572 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002573 return 0;
2574
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002575out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002576 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002577out_free_request_sock_slab:
2578 if (prot->rsk_prot && prot->rsk_prot->slab) {
2579 kmem_cache_destroy(prot->rsk_prot->slab);
2580 prot->rsk_prot->slab = NULL;
2581 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002582out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002583 if (prot->rsk_prot)
2584 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002585out_free_sock_slab:
2586 kmem_cache_destroy(prot->slab);
2587 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002588out:
2589 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591EXPORT_SYMBOL(proto_register);
2592
2593void proto_unregister(struct proto *prot)
2594{
Glauber Costa36b77a52011-12-16 00:51:59 +00002595 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002596 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002597 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002598 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599
2600 if (prot->slab != NULL) {
2601 kmem_cache_destroy(prot->slab);
2602 prot->slab = NULL;
2603 }
2604
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002605 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002606 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002607 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002608 prot->rsk_prot->slab = NULL;
2609 }
2610
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002611 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002612 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002613 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002614 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002615 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617EXPORT_SYMBOL(proto_unregister);
2618
2619#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002621 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622{
Glauber Costa36b77a52011-12-16 00:51:59 +00002623 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002624 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625}
2626
2627static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2628{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002629 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630}
2631
2632static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002633 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634{
Glauber Costa36b77a52011-12-16 00:51:59 +00002635 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636}
2637
2638static char proto_method_implemented(const void *method)
2639{
2640 return method == NULL ? 'n' : 'y';
2641}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002642static long sock_prot_memory_allocated(struct proto *proto)
2643{
Jeffrin Josecb75a362012-04-25 19:17:29 +05302644 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002645}
2646
2647static char *sock_prot_memory_pressure(struct proto *proto)
2648{
2649 return proto->memory_pressure != NULL ?
2650 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2651}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652
2653static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2654{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002655
Eric Dumazet8d987e52010-11-09 23:24:26 +00002656 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2658 proto->name,
2659 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002660 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00002661 sock_prot_memory_allocated(proto),
2662 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663 proto->max_header,
2664 proto->slab == NULL ? "no" : "yes",
2665 module_name(proto->owner),
2666 proto_method_implemented(proto->close),
2667 proto_method_implemented(proto->connect),
2668 proto_method_implemented(proto->disconnect),
2669 proto_method_implemented(proto->accept),
2670 proto_method_implemented(proto->ioctl),
2671 proto_method_implemented(proto->init),
2672 proto_method_implemented(proto->destroy),
2673 proto_method_implemented(proto->shutdown),
2674 proto_method_implemented(proto->setsockopt),
2675 proto_method_implemented(proto->getsockopt),
2676 proto_method_implemented(proto->sendmsg),
2677 proto_method_implemented(proto->recvmsg),
2678 proto_method_implemented(proto->sendpage),
2679 proto_method_implemented(proto->bind),
2680 proto_method_implemented(proto->backlog_rcv),
2681 proto_method_implemented(proto->hash),
2682 proto_method_implemented(proto->unhash),
2683 proto_method_implemented(proto->get_port),
2684 proto_method_implemented(proto->enter_memory_pressure));
2685}
2686
2687static int proto_seq_show(struct seq_file *seq, void *v)
2688{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002689 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2691 "protocol",
2692 "size",
2693 "sockets",
2694 "memory",
2695 "press",
2696 "maxhdr",
2697 "slab",
2698 "module",
2699 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2700 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002701 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 return 0;
2703}
2704
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002705static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 .start = proto_seq_start,
2707 .next = proto_seq_next,
2708 .stop = proto_seq_stop,
2709 .show = proto_seq_show,
2710};
2711
2712static int proto_seq_open(struct inode *inode, struct file *file)
2713{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002714 return seq_open_net(inode, file, &proto_seq_ops,
2715 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716}
2717
Arjan van de Ven9a321442007-02-12 00:55:35 -08002718static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 .owner = THIS_MODULE,
2720 .open = proto_seq_open,
2721 .read = seq_read,
2722 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002723 .release = seq_release_net,
2724};
2725
2726static __net_init int proto_init_net(struct net *net)
2727{
2728 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2729 return -ENOMEM;
2730
2731 return 0;
2732}
2733
2734static __net_exit void proto_exit_net(struct net *net)
2735{
2736 proc_net_remove(net, "protocols");
2737}
2738
2739
2740static __net_initdata struct pernet_operations proto_net_ops = {
2741 .init = proto_init_net,
2742 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743};
2744
2745static int __init proto_init(void)
2746{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002747 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748}
2749
2750subsys_initcall(proto_init);
2751
2752#endif /* PROC_FS */