blob: 929bdcc2383b809e46b82665b8ea9445d23870f4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Joe Perchese005d192012-05-16 19:58:40 +000092#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
96#include <linux/types.h>
97#include <linux/socket.h>
98#include <linux/in.h>
99#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/module.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/sched.h>
104#include <linux/timer.h>
105#include <linux/string.h>
106#include <linux/sockios.h>
107#include <linux/net.h>
108#include <linux/mm.h>
109#include <linux/slab.h>
110#include <linux/interrupt.h>
111#include <linux/poll.h>
112#include <linux/tcp.h>
113#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400114#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000115#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100116#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800117#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400118#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122#include <linux/netdevice.h>
123#include <net/protocol.h>
124#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200125#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700126#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000128#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#include <net/xfrm.h>
130#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700131#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000132#include <net/netprio_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134#include <linux/filter.h>
135
Satoru Moriya3847ce32011-06-17 12:00:03 +0000136#include <trace/events/sock.h>
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138#ifdef CONFIG_INET
139#include <net/tcp.h>
140#endif
141
Glauber Costa36b77a52011-12-16 00:51:59 +0000142static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000143static LIST_HEAD(proto_list);
144
145#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
Glauber Costa1d62e432012-04-09 19:36:33 -0300146int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000147{
148 struct proto *proto;
149 int ret = 0;
150
Glauber Costa36b77a52011-12-16 00:51:59 +0000151 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000152 list_for_each_entry(proto, &proto_list, node) {
153 if (proto->init_cgroup) {
Glauber Costa1d62e432012-04-09 19:36:33 -0300154 ret = proto->init_cgroup(memcg, ss);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000155 if (ret)
156 goto out;
157 }
158 }
159
Glauber Costa36b77a52011-12-16 00:51:59 +0000160 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000161 return ret;
162out:
163 list_for_each_entry_continue_reverse(proto, &proto_list, node)
164 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300165 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000166 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000167 return ret;
168}
169
Glauber Costa1d62e432012-04-09 19:36:33 -0300170void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000171{
172 struct proto *proto;
173
Glauber Costa36b77a52011-12-16 00:51:59 +0000174 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000175 list_for_each_entry_reverse(proto, &proto_list, node)
176 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300177 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000178 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000179}
180#endif
181
Ingo Molnarda21f242006-07-03 00:25:12 -0700182/*
183 * Each address family might have different locking rules, so we have
184 * one slock key per address family:
185 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700186static struct lock_class_key af_family_keys[AF_MAX];
187static struct lock_class_key af_family_slock_keys[AF_MAX];
188
Ingo Molnarc5905af2012-02-24 08:31:31 +0100189struct static_key memcg_socket_limit_enabled;
Glauber Costae1aab162011-12-11 21:47:03 +0000190EXPORT_SYMBOL(memcg_socket_limit_enabled);
191
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700192/*
193 * Make lock validator output more readable. (we pre-construct these
194 * strings build-time, so that runtime initialization of socket
195 * locks is fast):
196 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700197static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700198 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
199 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
200 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
201 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
202 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
203 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
204 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800205 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700206 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800207 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700208 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700209 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800210 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300211 "sk_lock-AF_NFC" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700212};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700213static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700214 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
215 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
216 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
217 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
218 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
219 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
220 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800221 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700222 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800223 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700224 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700225 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800226 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300227 "slock-AF_NFC" , "slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700228};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700229static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700230 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
231 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
232 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
233 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
234 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
235 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
236 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800237 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700238 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700239 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700240 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700241 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800242 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300243 "clock-AF_NFC" , "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700244};
Ingo Molnarda21f242006-07-03 00:25:12 -0700245
246/*
247 * sk_callback_lock locking rules are per-address-family,
248 * so split the lock classes by using a per-AF key:
249 */
250static struct lock_class_key af_callback_keys[AF_MAX];
251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252/* Take into consideration the size of the struct sk_buff overhead in the
253 * determination of these values, since that is non-constant across
254 * platforms. This makes socket queueing behavior and performance
255 * not depend upon such differences.
256 */
257#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000258#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
260#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
261
262/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700263__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200264EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700265__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200266EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700267__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
268__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300270/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700271int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000272EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Neil Horman5bc14212011-11-22 05:10:51 +0000274#if defined(CONFIG_CGROUPS)
275#if !defined(CONFIG_NET_CLS_CGROUP)
Herbert Xuf8451722010-05-24 00:12:34 -0700276int net_cls_subsys_id = -1;
277EXPORT_SYMBOL_GPL(net_cls_subsys_id);
278#endif
Neil Horman5bc14212011-11-22 05:10:51 +0000279#if !defined(CONFIG_NETPRIO_CGROUP)
280int net_prio_subsys_id = -1;
281EXPORT_SYMBOL_GPL(net_prio_subsys_id);
282#endif
283#endif
Herbert Xuf8451722010-05-24 00:12:34 -0700284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
286{
287 struct timeval tv;
288
289 if (optlen < sizeof(tv))
290 return -EINVAL;
291 if (copy_from_user(&tv, optval, sizeof(tv)))
292 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700293 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
294 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Vasily Averinba780732007-05-24 16:58:54 -0700296 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700297 static int warned __read_mostly;
298
Vasily Averinba780732007-05-24 16:58:54 -0700299 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700300 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700301 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000302 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
303 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700304 }
Vasily Averinba780732007-05-24 16:58:54 -0700305 return 0;
306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 *timeo_p = MAX_SCHEDULE_TIMEOUT;
308 if (tv.tv_sec == 0 && tv.tv_usec == 0)
309 return 0;
310 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
311 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
312 return 0;
313}
314
315static void sock_warn_obsolete_bsdism(const char *name)
316{
317 static int warned;
318 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900319 if (strcmp(warncomm, current->comm) && warned < 5) {
320 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000321 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
322 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 warned++;
324 }
325}
326
Eric Dumazet08e29af2011-11-28 12:04:18 +0000327#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
328
329static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900330{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000331 if (sk->sk_flags & flags) {
332 sk->sk_flags &= ~flags;
333 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000334 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 }
336}
337
338
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800339int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
340{
Eric Dumazet766e90372009-10-14 20:40:11 -0700341 int err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800342 int skb_len;
Neil Horman3b885782009-10-12 13:26:31 -0700343 unsigned long flags;
344 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800345
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000346 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700347 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000348 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700349 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800350 }
351
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700352 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800353 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700354 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800355
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800356 if (!sk_rmem_schedule(sk, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700357 atomic_inc(&sk->sk_drops);
358 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800359 }
360
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800361 skb->dev = NULL;
362 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800363
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800364 /* Cache the SKB length before we tack it onto the receive
365 * queue. Once it is added it no longer belongs to us and
366 * may be freed by other threads of control pulling packets
367 * from the queue.
368 */
369 skb_len = skb->len;
370
Eric Dumazet7fee2262010-05-11 23:19:48 +0000371 /* we escape from rcu protected region, make sure we dont leak
372 * a norefcounted dst
373 */
374 skb_dst_force(skb);
375
Neil Horman3b885782009-10-12 13:26:31 -0700376 spin_lock_irqsave(&list->lock, flags);
377 skb->dropcount = atomic_read(&sk->sk_drops);
378 __skb_queue_tail(list, skb);
379 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800380
381 if (!sock_flag(sk, SOCK_DEAD))
382 sk->sk_data_ready(sk, skb_len);
Eric Dumazet766e90372009-10-14 20:40:11 -0700383 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800384}
385EXPORT_SYMBOL(sock_queue_rcv_skb);
386
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200387int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800388{
389 int rc = NET_RX_SUCCESS;
390
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700391 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800392 goto discard_and_relse;
393
394 skb->dev = NULL;
395
Eric Dumazetf545a382012-04-22 23:34:26 +0000396 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700397 atomic_inc(&sk->sk_drops);
398 goto discard_and_relse;
399 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200400 if (nested)
401 bh_lock_sock_nested(sk);
402 else
403 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700404 if (!sock_owned_by_user(sk)) {
405 /*
406 * trylock + unlock semantics:
407 */
408 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
409
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700410 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700411
412 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Eric Dumazetf545a382012-04-22 23:34:26 +0000413 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000414 bh_unlock_sock(sk);
415 atomic_inc(&sk->sk_drops);
416 goto discard_and_relse;
417 }
418
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800419 bh_unlock_sock(sk);
420out:
421 sock_put(sk);
422 return rc;
423discard_and_relse:
424 kfree_skb(skb);
425 goto out;
426}
427EXPORT_SYMBOL(sk_receive_skb);
428
Krishna Kumarea94ff32009-10-19 23:46:45 +0000429void sk_reset_txq(struct sock *sk)
430{
431 sk_tx_queue_clear(sk);
432}
433EXPORT_SYMBOL(sk_reset_txq);
434
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800435struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
436{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000437 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800438
439 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000440 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000441 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800442 dst_release(dst);
443 return NULL;
444 }
445
446 return dst;
447}
448EXPORT_SYMBOL(__sk_dst_check);
449
450struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
451{
452 struct dst_entry *dst = sk_dst_get(sk);
453
454 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
455 sk_dst_reset(sk);
456 dst_release(dst);
457 return NULL;
458 }
459
460 return dst;
461}
462EXPORT_SYMBOL(sk_dst_check);
463
David S. Miller48788092007-09-14 16:41:03 -0700464static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
465{
466 int ret = -ENOPROTOOPT;
467#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900468 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700469 char devname[IFNAMSIZ];
470 int index;
471
472 /* Sorry... */
473 ret = -EPERM;
474 if (!capable(CAP_NET_RAW))
475 goto out;
476
477 ret = -EINVAL;
478 if (optlen < 0)
479 goto out;
480
481 /* Bind this socket to a particular device like "eth0",
482 * as specified in the passed interface name. If the
483 * name is "" or the option length is zero the socket
484 * is not bound.
485 */
486 if (optlen > IFNAMSIZ - 1)
487 optlen = IFNAMSIZ - 1;
488 memset(devname, 0, sizeof(devname));
489
490 ret = -EFAULT;
491 if (copy_from_user(devname, optval, optlen))
492 goto out;
493
David S. Miller000ba2e2009-11-05 22:37:11 -0800494 index = 0;
495 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800496 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700497
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800498 rcu_read_lock();
499 dev = dev_get_by_name_rcu(net, devname);
500 if (dev)
501 index = dev->ifindex;
502 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700503 ret = -ENODEV;
504 if (!dev)
505 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700506 }
507
508 lock_sock(sk);
509 sk->sk_bound_dev_if = index;
510 sk_dst_reset(sk);
511 release_sock(sk);
512
513 ret = 0;
514
515out:
516#endif
517
518 return ret;
519}
520
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800521static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
522{
523 if (valbool)
524 sock_set_flag(sk, bit);
525 else
526 sock_reset_flag(sk, bit);
527}
528
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529/*
530 * This is meant for all protocols to use and covers goings on
531 * at the socket level. Everything here is generic.
532 */
533
534int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700535 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536{
Eric Dumazet2a915252009-05-27 11:30:05 +0000537 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 int val;
539 int valbool;
540 struct linger ling;
541 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900542
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 /*
544 * Options without arguments
545 */
546
David S. Miller48788092007-09-14 16:41:03 -0700547 if (optname == SO_BINDTODEVICE)
548 return sock_bindtodevice(sk, optval, optlen);
549
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700550 if (optlen < sizeof(int))
551 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 if (get_user(val, (int __user *)optval))
554 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900555
Eric Dumazet2a915252009-05-27 11:30:05 +0000556 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
558 lock_sock(sk);
559
Eric Dumazet2a915252009-05-27 11:30:05 +0000560 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700561 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000562 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700563 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000564 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800565 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700566 break;
567 case SO_REUSEADDR:
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000568 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700569 break;
570 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000571 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000572 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700573 case SO_ERROR:
574 ret = -ENOPROTOOPT;
575 break;
576 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800577 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700578 break;
579 case SO_BROADCAST:
580 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
581 break;
582 case SO_SNDBUF:
583 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000584 * about it this is right. Otherwise apps have to
585 * play 'guess the biggest size' games. RCVBUF/SNDBUF
586 * are treated in BSD as hints
587 */
588 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700589set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700590 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazet82981932012-04-26 20:07:59 +0000591 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
592 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700593 sk->sk_write_space(sk);
594 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700596 case SO_SNDBUFFORCE:
597 if (!capable(CAP_NET_ADMIN)) {
598 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 break;
600 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700601 goto set_sndbuf;
602
603 case SO_RCVBUF:
604 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000605 * about it this is right. Otherwise apps have to
606 * play 'guess the biggest size' games. RCVBUF/SNDBUF
607 * are treated in BSD as hints
608 */
609 val = min_t(u32, val, sysctl_rmem_max);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700610set_rcvbuf:
611 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
612 /*
613 * We double it on the way in to account for
614 * "struct sk_buff" etc. overhead. Applications
615 * assume that the SO_RCVBUF setting they make will
616 * allow that much actual data to be received on that
617 * socket.
618 *
619 * Applications are unaware that "struct sk_buff" and
620 * other overheads allocate from the receive buffer
621 * during socket buffer allocation.
622 *
623 * And after considering the possible alternatives,
624 * returning the value we actually used in getsockopt
625 * is the most desirable behavior.
626 */
Eric Dumazet82981932012-04-26 20:07:59 +0000627 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700628 break;
629
630 case SO_RCVBUFFORCE:
631 if (!capable(CAP_NET_ADMIN)) {
632 ret = -EPERM;
633 break;
634 }
635 goto set_rcvbuf;
636
637 case SO_KEEPALIVE:
638#ifdef CONFIG_INET
639 if (sk->sk_protocol == IPPROTO_TCP)
640 tcp_set_keepalive(sk, valbool);
641#endif
642 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
643 break;
644
645 case SO_OOBINLINE:
646 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
647 break;
648
649 case SO_NO_CHECK:
650 sk->sk_no_check = valbool;
651 break;
652
653 case SO_PRIORITY:
654 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
655 sk->sk_priority = val;
656 else
657 ret = -EPERM;
658 break;
659
660 case SO_LINGER:
661 if (optlen < sizeof(ling)) {
662 ret = -EINVAL; /* 1003.1g */
663 break;
664 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000665 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700666 ret = -EFAULT;
667 break;
668 }
669 if (!ling.l_onoff)
670 sock_reset_flag(sk, SOCK_LINGER);
671 else {
672#if (BITS_PER_LONG == 32)
673 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
674 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
675 else
676#endif
677 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
678 sock_set_flag(sk, SOCK_LINGER);
679 }
680 break;
681
682 case SO_BSDCOMPAT:
683 sock_warn_obsolete_bsdism("setsockopt");
684 break;
685
686 case SO_PASSCRED:
687 if (valbool)
688 set_bit(SOCK_PASSCRED, &sock->flags);
689 else
690 clear_bit(SOCK_PASSCRED, &sock->flags);
691 break;
692
693 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700694 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700695 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700696 if (optname == SO_TIMESTAMP)
697 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
698 else
699 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700700 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000701 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700702 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700703 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700704 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
705 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700706 break;
707
Patrick Ohly20d49472009-02-12 05:03:38 +0000708 case SO_TIMESTAMPING:
709 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000710 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000711 break;
712 }
713 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
714 val & SOF_TIMESTAMPING_TX_HARDWARE);
715 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
716 val & SOF_TIMESTAMPING_TX_SOFTWARE);
717 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
718 val & SOF_TIMESTAMPING_RX_HARDWARE);
719 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
720 sock_enable_timestamp(sk,
721 SOCK_TIMESTAMPING_RX_SOFTWARE);
722 else
723 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000724 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000725 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
726 val & SOF_TIMESTAMPING_SOFTWARE);
727 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
728 val & SOF_TIMESTAMPING_SYS_HARDWARE);
729 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
730 val & SOF_TIMESTAMPING_RAW_HARDWARE);
731 break;
732
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700733 case SO_RCVLOWAT:
734 if (val < 0)
735 val = INT_MAX;
736 sk->sk_rcvlowat = val ? : 1;
737 break;
738
739 case SO_RCVTIMEO:
740 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
741 break;
742
743 case SO_SNDTIMEO:
744 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
745 break;
746
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700747 case SO_ATTACH_FILTER:
748 ret = -EINVAL;
749 if (optlen == sizeof(struct sock_fprog)) {
750 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700752 ret = -EFAULT;
753 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700756 ret = sk_attach_filter(&fprog, sk);
757 }
758 break;
759
760 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700761 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700762 break;
763
764 case SO_PASSSEC:
765 if (valbool)
766 set_bit(SOCK_PASSSEC, &sock->flags);
767 else
768 clear_bit(SOCK_PASSSEC, &sock->flags);
769 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800770 case SO_MARK:
771 if (!capable(CAP_NET_ADMIN))
772 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000773 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800774 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800775 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 /* We implement the SO_SNDLOWAT etc to
778 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700779 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000780 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700781 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100782
783 case SO_WIFI_STATUS:
784 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
785 break;
786
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000787 case SO_PEEK_OFF:
788 if (sock->ops->set_peek_off)
789 sock->ops->set_peek_off(sk, val);
790 else
791 ret = -EOPNOTSUPP;
792 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000793
794 case SO_NOFCS:
795 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
796 break;
797
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700798 default:
799 ret = -ENOPROTOOPT;
800 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900801 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 release_sock(sk);
803 return ret;
804}
Eric Dumazet2a915252009-05-27 11:30:05 +0000805EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806
807
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000808void cred_to_ucred(struct pid *pid, const struct cred *cred,
809 struct ucred *ucred)
810{
811 ucred->pid = pid_vnr(pid);
812 ucred->uid = ucred->gid = -1;
813 if (cred) {
814 struct user_namespace *current_ns = current_user_ns();
815
Eric W. Biederman76b6db02012-03-14 15:24:19 -0700816 ucred->uid = from_kuid(current_ns, cred->euid);
817 ucred->gid = from_kgid(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000818 }
819}
David S. Miller39247732010-06-16 16:18:25 -0700820EXPORT_SYMBOL_GPL(cred_to_ucred);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822int sock_getsockopt(struct socket *sock, int level, int optname,
823 char __user *optval, int __user *optlen)
824{
825 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900826
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700827 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900828 int val;
829 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 struct timeval tm;
831 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900832
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -0800833 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900835
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700836 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900837 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700838 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900840
Eugene Teo50fee1d2009-02-23 15:38:41 -0800841 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -0800842
Eric Dumazet2a915252009-05-27 11:30:05 +0000843 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700844 case SO_DEBUG:
845 v.val = sock_flag(sk, SOCK_DBG);
846 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900847
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700848 case SO_DONTROUTE:
849 v.val = sock_flag(sk, SOCK_LOCALROUTE);
850 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900851
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700852 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000853 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700854 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700856 case SO_SNDBUF:
857 v.val = sk->sk_sndbuf;
858 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900859
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700860 case SO_RCVBUF:
861 v.val = sk->sk_rcvbuf;
862 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700864 case SO_REUSEADDR:
865 v.val = sk->sk_reuse;
866 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700868 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000869 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700870 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700872 case SO_TYPE:
873 v.val = sk->sk_type;
874 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000876 case SO_PROTOCOL:
877 v.val = sk->sk_protocol;
878 break;
879
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000880 case SO_DOMAIN:
881 v.val = sk->sk_family;
882 break;
883
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700884 case SO_ERROR:
885 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +0000886 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700887 v.val = xchg(&sk->sk_err_soft, 0);
888 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700890 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000891 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700892 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900893
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700894 case SO_NO_CHECK:
895 v.val = sk->sk_no_check;
896 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700898 case SO_PRIORITY:
899 v.val = sk->sk_priority;
900 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900901
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700902 case SO_LINGER:
903 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000904 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700905 v.ling.l_linger = sk->sk_lingertime / HZ;
906 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900907
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700908 case SO_BSDCOMPAT:
909 sock_warn_obsolete_bsdism("getsockopt");
910 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700912 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700913 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
914 !sock_flag(sk, SOCK_RCVTSTAMPNS);
915 break;
916
917 case SO_TIMESTAMPNS:
918 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700919 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
Patrick Ohly20d49472009-02-12 05:03:38 +0000921 case SO_TIMESTAMPING:
922 v.val = 0;
923 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
924 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
925 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
926 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
927 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
928 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
929 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
930 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
931 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
932 v.val |= SOF_TIMESTAMPING_SOFTWARE;
933 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
934 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
935 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
936 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
937 break;
938
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700939 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000940 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700941 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
942 v.tm.tv_sec = 0;
943 v.tm.tv_usec = 0;
944 } else {
945 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
946 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700948 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700950 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000951 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700952 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
953 v.tm.tv_sec = 0;
954 v.tm.tv_usec = 0;
955 } else {
956 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
957 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
958 }
959 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700961 case SO_RCVLOWAT:
962 v.val = sk->sk_rcvlowat;
963 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700964
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700965 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +0000966 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700967 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700969 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +0000970 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700971 break;
972
973 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000974 {
975 struct ucred peercred;
976 if (len > sizeof(peercred))
977 len = sizeof(peercred);
978 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
979 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700980 return -EFAULT;
981 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000982 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700983
984 case SO_PEERNAME:
985 {
986 char address[128];
987
988 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
989 return -ENOTCONN;
990 if (lv < len)
991 return -EINVAL;
992 if (copy_to_user(optval, address, len))
993 return -EFAULT;
994 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700996
997 /* Dubious BSD thing... Probably nobody even uses it, but
998 * the UNIX standard wants it for whatever reason... -DaveM
999 */
1000 case SO_ACCEPTCONN:
1001 v.val = sk->sk_state == TCP_LISTEN;
1002 break;
1003
1004 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001005 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001006 break;
1007
1008 case SO_PEERSEC:
1009 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1010
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001011 case SO_MARK:
1012 v.val = sk->sk_mark;
1013 break;
1014
Neil Horman3b885782009-10-12 13:26:31 -07001015 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001016 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001017 break;
1018
Johannes Berg6e3e9392011-11-09 10:15:42 +01001019 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001020 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001021 break;
1022
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001023 case SO_PEEK_OFF:
1024 if (!sock->ops->set_peek_off)
1025 return -EOPNOTSUPP;
1026
1027 v.val = sk->sk_peek_off;
1028 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001029 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001030 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001031 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001032 default:
1033 return -ENOPROTOOPT;
1034 }
1035
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 if (len > lv)
1037 len = lv;
1038 if (copy_to_user(optval, &v, len))
1039 return -EFAULT;
1040lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001041 if (put_user(len, optlen))
1042 return -EFAULT;
1043 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044}
1045
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001046/*
1047 * Initialize an sk_lock.
1048 *
1049 * (We also register the sk_lock with the lock validator.)
1050 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001051static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001052{
Peter Zijlstraed075362006-12-06 20:35:24 -08001053 sock_lock_init_class_and_name(sk,
1054 af_family_slock_key_strings[sk->sk_family],
1055 af_family_slock_keys + sk->sk_family,
1056 af_family_key_strings[sk->sk_family],
1057 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001058}
1059
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001060/*
1061 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1062 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001063 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001064 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001065static void sock_copy(struct sock *nsk, const struct sock *osk)
1066{
1067#ifdef CONFIG_SECURITY_NETWORK
1068 void *sptr = nsk->sk_security;
1069#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001070 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1071
1072 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1073 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1074
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001075#ifdef CONFIG_SECURITY_NETWORK
1076 nsk->sk_security = sptr;
1077 security_sk_clone(osk, nsk);
1078#endif
1079}
1080
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001081/*
1082 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1083 * un-modified. Special care is taken when initializing object to zero.
1084 */
1085static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1086{
1087 if (offsetof(struct sock, sk_node.next) != 0)
1088 memset(sk, 0, offsetof(struct sock, sk_node.next));
1089 memset(&sk->sk_node.pprev, 0,
1090 size - offsetof(struct sock, sk_node.pprev));
1091}
1092
1093void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1094{
1095 unsigned long nulls1, nulls2;
1096
1097 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1098 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1099 if (nulls1 > nulls2)
1100 swap(nulls1, nulls2);
1101
1102 if (nulls1 != 0)
1103 memset((char *)sk, 0, nulls1);
1104 memset((char *)sk + nulls1 + sizeof(void *), 0,
1105 nulls2 - nulls1 - sizeof(void *));
1106 memset((char *)sk + nulls2 + sizeof(void *), 0,
1107 size - nulls2 - sizeof(void *));
1108}
1109EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1110
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001111static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1112 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001113{
1114 struct sock *sk;
1115 struct kmem_cache *slab;
1116
1117 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001118 if (slab != NULL) {
1119 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1120 if (!sk)
1121 return sk;
1122 if (priority & __GFP_ZERO) {
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001123 if (prot->clear_sk)
1124 prot->clear_sk(sk, prot->obj_size);
1125 else
1126 sk_prot_clear_nulls(sk, prot->obj_size);
Eric Dumazete912b112009-07-08 19:36:05 +00001127 }
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001128 } else
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001129 sk = kmalloc(prot->obj_size, priority);
1130
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001131 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001132 kmemcheck_annotate_bitfield(sk, flags);
1133
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001134 if (security_sk_alloc(sk, family, priority))
1135 goto out_free;
1136
1137 if (!try_module_get(prot->owner))
1138 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001139 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001140 }
1141
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001142 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001143
1144out_free_sec:
1145 security_sk_free(sk);
1146out_free:
1147 if (slab != NULL)
1148 kmem_cache_free(slab, sk);
1149 else
1150 kfree(sk);
1151 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001152}
1153
1154static void sk_prot_free(struct proto *prot, struct sock *sk)
1155{
1156 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001157 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001158
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001159 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001160 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001161
1162 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001163 if (slab != NULL)
1164 kmem_cache_free(slab, sk);
1165 else
1166 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001167 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001168}
1169
Herbert Xuf8451722010-05-24 00:12:34 -07001170#ifdef CONFIG_CGROUPS
1171void sock_update_classid(struct sock *sk)
1172{
Paul E. McKenney11441822010-10-06 17:15:35 -07001173 u32 classid;
Herbert Xuf8451722010-05-24 00:12:34 -07001174
Paul E. McKenney11441822010-10-06 17:15:35 -07001175 rcu_read_lock(); /* doing current task, which cannot vanish. */
1176 classid = task_cls_classid(current);
1177 rcu_read_unlock();
Herbert Xuf8451722010-05-24 00:12:34 -07001178 if (classid && classid != sk->sk_classid)
1179 sk->sk_classid = classid;
1180}
Herbert Xu82862742010-05-24 00:14:10 -07001181EXPORT_SYMBOL(sock_update_classid);
Neil Horman5bc14212011-11-22 05:10:51 +00001182
1183void sock_update_netprioidx(struct sock *sk)
1184{
Neil Horman5bc14212011-11-22 05:10:51 +00001185 if (in_interrupt())
1186 return;
Neil Horman2b73bc62012-02-10 05:43:38 +00001187
1188 sk->sk_cgrp_prioidx = task_netprioidx(current);
Neil Horman5bc14212011-11-22 05:10:51 +00001189}
1190EXPORT_SYMBOL_GPL(sock_update_netprioidx);
Herbert Xuf8451722010-05-24 00:12:34 -07001191#endif
1192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193/**
1194 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001195 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001196 * @family: protocol family
1197 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1198 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001200struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001201 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001203 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001205 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001207 sk->sk_family = family;
1208 /*
1209 * See comment in struct sock definition to understand
1210 * why we need sk_prot_creator -acme
1211 */
1212 sk->sk_prot = sk->sk_prot_creator = prot;
1213 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001214 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001215 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001216
1217 sock_update_classid(sk);
Neil Horman5bc14212011-11-22 05:10:51 +00001218 sock_update_netprioidx(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 }
Frank Filza79af592005-09-27 15:23:38 -07001220
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001221 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222}
Eric Dumazet2a915252009-05-27 11:30:05 +00001223EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
Eric Dumazet2b85a342009-06-11 02:55:43 -07001225static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226{
1227 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
1229 if (sk->sk_destruct)
1230 sk->sk_destruct(sk);
1231
Paul E. McKenneya898def2010-02-22 17:04:49 -08001232 filter = rcu_dereference_check(sk->sk_filter,
1233 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001235 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001236 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 }
1238
Eric Dumazet08e29af2011-11-28 12:04:18 +00001239 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
1241 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001242 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1243 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001245 if (sk->sk_peer_cred)
1246 put_cred(sk->sk_peer_cred);
1247 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001248 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001249 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001251
1252void sk_free(struct sock *sk)
1253{
1254 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001255 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001256 * some packets are still in some tx queue.
1257 * If not null, sock_wfree() will call __sk_free(sk) later
1258 */
1259 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1260 __sk_free(sk);
1261}
Eric Dumazet2a915252009-05-27 11:30:05 +00001262EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Denis V. Lunevedf02082008-02-29 11:18:32 -08001264/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001265 * Last sock_put should drop reference to sk->sk_net. It has already
1266 * been dropped in sk_change_net. Taking reference to stopping namespace
Denis V. Lunevedf02082008-02-29 11:18:32 -08001267 * is not an option.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001268 * Take reference to a socket to remove it from hash _alive_ and after that
Denis V. Lunevedf02082008-02-29 11:18:32 -08001269 * destroy it in the context of init_net.
1270 */
1271void sk_release_kernel(struct sock *sk)
1272{
1273 if (sk == NULL || sk->sk_socket == NULL)
1274 return;
1275
1276 sock_hold(sk);
1277 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001278 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001279 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001280 sock_put(sk);
1281}
David S. Miller45af1752008-02-29 11:33:19 -08001282EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001283
Stephen Rothwell475f1b52012-01-09 16:33:16 +11001284static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1285{
1286 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1287 sock_update_memcg(newsk);
1288}
1289
Eric Dumazete56c57d2011-11-08 17:07:07 -05001290/**
1291 * sk_clone_lock - clone a socket, and lock its clone
1292 * @sk: the socket to clone
1293 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1294 *
1295 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1296 */
1297struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001298{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001299 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001300
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001301 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001302 if (newsk != NULL) {
1303 struct sk_filter *filter;
1304
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001305 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001306
1307 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001308 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001309 sk_node_init(&newsk->sk_node);
1310 sock_lock_init(newsk);
1311 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001312 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001313 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001314
1315 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001316 /*
1317 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1318 */
1319 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001320 atomic_set(&newsk->sk_omem_alloc, 0);
1321 skb_queue_head_init(&newsk->sk_receive_queue);
1322 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001323#ifdef CONFIG_NET_DMA
1324 skb_queue_head_init(&newsk->sk_async_wait_queue);
1325#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001326
Eric Dumazetb6c67122010-04-08 23:03:29 +00001327 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001328 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001329 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1330 af_callback_keys + newsk->sk_family,
1331 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001332
1333 newsk->sk_dst_cache = NULL;
1334 newsk->sk_wmem_queued = 0;
1335 newsk->sk_forward_alloc = 0;
1336 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001337 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1338
1339 sock_reset_flag(newsk, SOCK_DONE);
1340 skb_queue_head_init(&newsk->sk_error_queue);
1341
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001342 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001343 if (filter != NULL)
1344 sk_filter_charge(newsk, filter);
1345
1346 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1347 /* It is still raw copy of parent, so invalidate
1348 * destructor and make plain sk_free() */
1349 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001350 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001351 sk_free(newsk);
1352 newsk = NULL;
1353 goto out;
1354 }
1355
1356 newsk->sk_err = 0;
1357 newsk->sk_priority = 0;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001358 /*
1359 * Before updating sk_refcnt, we must commit prior changes to memory
1360 * (Documentation/RCU/rculist_nulls.txt for details)
1361 */
1362 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001363 atomic_set(&newsk->sk_refcnt, 2);
1364
1365 /*
1366 * Increment the counter in the same struct proto as the master
1367 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1368 * is the same as sk->sk_prot->socks, as this field was copied
1369 * with memcpy).
1370 *
1371 * This _changes_ the previous behaviour, where
1372 * tcp_create_openreq_child always was incrementing the
1373 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1374 * to be taken into account in all callers. -acme
1375 */
1376 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001377 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001378 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001379
Glauber Costaf3f511e2012-01-05 20:16:39 +00001380 sk_update_clone(sk, newsk);
1381
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001382 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001383 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001384
Eric Dumazet08e29af2011-11-28 12:04:18 +00001385 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001386 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001387 }
1388out:
1389 return newsk;
1390}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001391EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001392
Andi Kleen99580892007-04-20 17:12:43 -07001393void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1394{
1395 __sk_dst_set(sk, dst);
1396 sk->sk_route_caps = dst->dev->features;
1397 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001398 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001399 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001400 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001401 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001402 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001403 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001404 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001405 sk->sk_gso_max_size = dst->dev->gso_max_size;
1406 }
Andi Kleen99580892007-04-20 17:12:43 -07001407 }
1408}
1409EXPORT_SYMBOL_GPL(sk_setup_caps);
1410
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411void __init sk_init(void)
1412{
Jan Beulich44813742009-09-21 17:03:05 -07001413 if (totalram_pages <= 4096) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 sysctl_wmem_max = 32767;
1415 sysctl_rmem_max = 32767;
1416 sysctl_wmem_default = 32767;
1417 sysctl_rmem_default = 32767;
Jan Beulich44813742009-09-21 17:03:05 -07001418 } else if (totalram_pages >= 131072) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 sysctl_wmem_max = 131071;
1420 sysctl_rmem_max = 131071;
1421 }
1422}
1423
1424/*
1425 * Simple resource managers for sockets.
1426 */
1427
1428
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001429/*
1430 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 */
1432void sock_wfree(struct sk_buff *skb)
1433{
1434 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001435 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
Eric Dumazetd99927f2009-09-24 10:49:24 +00001437 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1438 /*
1439 * Keep a reference on sk_wmem_alloc, this will be released
1440 * after sk_write_space() call
1441 */
1442 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001444 len = 1;
1445 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001446 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001447 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1448 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001449 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001450 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001451 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452}
Eric Dumazet2a915252009-05-27 11:30:05 +00001453EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001455/*
1456 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 */
1458void sock_rfree(struct sk_buff *skb)
1459{
1460 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001461 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
Eric Dumazetd361fd52010-07-10 22:45:17 +00001463 atomic_sub(len, &sk->sk_rmem_alloc);
1464 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465}
Eric Dumazet2a915252009-05-27 11:30:05 +00001466EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
David S. Miller41063e92012-06-19 21:22:05 -07001468void sock_edemux(struct sk_buff *skb)
1469{
1470 sock_put(skb->sk);
1471}
1472EXPORT_SYMBOL(sock_edemux);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
1474int sock_i_uid(struct sock *sk)
1475{
1476 int uid;
1477
Eric Dumazetf064af12010-09-22 12:43:39 +00001478 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001480 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 return uid;
1482}
Eric Dumazet2a915252009-05-27 11:30:05 +00001483EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
1485unsigned long sock_i_ino(struct sock *sk)
1486{
1487 unsigned long ino;
1488
Eric Dumazetf064af12010-09-22 12:43:39 +00001489 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001491 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 return ino;
1493}
Eric Dumazet2a915252009-05-27 11:30:05 +00001494EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495
1496/*
1497 * Allocate a skb from the socket's send buffer.
1498 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001499struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001500 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501{
1502 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001503 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 if (skb) {
1505 skb_set_owner_w(skb, sk);
1506 return skb;
1507 }
1508 }
1509 return NULL;
1510}
Eric Dumazet2a915252009-05-27 11:30:05 +00001511EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
1513/*
1514 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001515 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001516struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001517 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518{
1519 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1520 struct sk_buff *skb = alloc_skb(size, priority);
1521 if (skb) {
1522 skb_set_owner_r(skb, sk);
1523 return skb;
1524 }
1525 }
1526 return NULL;
1527}
1528
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001529/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001531 */
Al Virodd0fc662005-10-07 07:46:04 +01001532void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533{
Eric Dumazet95c96172012-04-15 05:58:06 +00001534 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1536 void *mem;
1537 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001538 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 */
1540 atomic_add(size, &sk->sk_omem_alloc);
1541 mem = kmalloc(size, priority);
1542 if (mem)
1543 return mem;
1544 atomic_sub(size, &sk->sk_omem_alloc);
1545 }
1546 return NULL;
1547}
Eric Dumazet2a915252009-05-27 11:30:05 +00001548EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
1550/*
1551 * Free an option memory block.
1552 */
1553void sock_kfree_s(struct sock *sk, void *mem, int size)
1554{
1555 kfree(mem);
1556 atomic_sub(size, &sk->sk_omem_alloc);
1557}
Eric Dumazet2a915252009-05-27 11:30:05 +00001558EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
1560/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1561 I think, these locks should be removed for datagram sockets.
1562 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001563static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564{
1565 DEFINE_WAIT(wait);
1566
1567 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1568 for (;;) {
1569 if (!timeo)
1570 break;
1571 if (signal_pending(current))
1572 break;
1573 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001574 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1576 break;
1577 if (sk->sk_shutdown & SEND_SHUTDOWN)
1578 break;
1579 if (sk->sk_err)
1580 break;
1581 timeo = schedule_timeout(timeo);
1582 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001583 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 return timeo;
1585}
1586
1587
1588/*
1589 * Generic send/receive buffer handlers
1590 */
1591
Herbert Xu4cc7f682009-02-04 16:55:54 -08001592struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1593 unsigned long data_len, int noblock,
1594 int *errcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595{
1596 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001597 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 long timeo;
1599 int err;
Jason Wangcc9b17a2012-05-30 21:18:10 +00001600 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1601
1602 err = -EMSGSIZE;
1603 if (npages > MAX_SKB_FRAGS)
1604 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
1606 gfp_mask = sk->sk_allocation;
1607 if (gfp_mask & __GFP_WAIT)
1608 gfp_mask |= __GFP_REPEAT;
1609
1610 timeo = sock_sndtimeo(sk, noblock);
1611 while (1) {
1612 err = sock_error(sk);
1613 if (err != 0)
1614 goto failure;
1615
1616 err = -EPIPE;
1617 if (sk->sk_shutdown & SEND_SHUTDOWN)
1618 goto failure;
1619
1620 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001621 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 int i;
1624
1625 /* No pages, we're done... */
1626 if (!data_len)
1627 break;
1628
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 skb->truesize += data_len;
1630 skb_shinfo(skb)->nr_frags = npages;
1631 for (i = 0; i < npages; i++) {
1632 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
1634 page = alloc_pages(sk->sk_allocation, 0);
1635 if (!page) {
1636 err = -ENOBUFS;
1637 skb_shinfo(skb)->nr_frags = i;
1638 kfree_skb(skb);
1639 goto failure;
1640 }
1641
Ian Campbellea2ab692011-08-22 23:44:58 +00001642 __skb_fill_page_desc(skb, i,
1643 page, 0,
1644 (data_len >= PAGE_SIZE ?
1645 PAGE_SIZE :
1646 data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 data_len -= PAGE_SIZE;
1648 }
1649
1650 /* Full success... */
1651 break;
1652 }
1653 err = -ENOBUFS;
1654 goto failure;
1655 }
1656 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1657 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1658 err = -EAGAIN;
1659 if (!timeo)
1660 goto failure;
1661 if (signal_pending(current))
1662 goto interrupted;
1663 timeo = sock_wait_for_wmem(sk, timeo);
1664 }
1665
1666 skb_set_owner_w(skb, sk);
1667 return skb;
1668
1669interrupted:
1670 err = sock_intr_errno(timeo);
1671failure:
1672 *errcode = err;
1673 return NULL;
1674}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001675EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001677struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 int noblock, int *errcode)
1679{
1680 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1681}
Eric Dumazet2a915252009-05-27 11:30:05 +00001682EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
1684static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001685 __releases(&sk->sk_lock.slock)
1686 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687{
1688 DEFINE_WAIT(wait);
1689
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001690 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1692 TASK_UNINTERRUPTIBLE);
1693 spin_unlock_bh(&sk->sk_lock.slock);
1694 schedule();
1695 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001696 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 break;
1698 }
1699 finish_wait(&sk->sk_lock.wq, &wait);
1700}
1701
1702static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001703 __releases(&sk->sk_lock.slock)
1704 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705{
1706 struct sk_buff *skb = sk->sk_backlog.head;
1707
1708 do {
1709 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1710 bh_unlock_sock(sk);
1711
1712 do {
1713 struct sk_buff *next = skb->next;
1714
Eric Dumazete4cbb022012-04-30 16:07:09 +00001715 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00001716 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001718 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719
1720 /*
1721 * We are in process context here with softirqs
1722 * disabled, use cond_resched_softirq() to preempt.
1723 * This is safe to do because we've taken the backlog
1724 * queue private:
1725 */
1726 cond_resched_softirq();
1727
1728 skb = next;
1729 } while (skb != NULL);
1730
1731 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001732 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001733
1734 /*
1735 * Doing the zeroing here guarantee we can not loop forever
1736 * while a wild producer attempts to flood us.
1737 */
1738 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739}
1740
1741/**
1742 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001743 * @sk: sock to wait on
1744 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 *
1746 * Now socket state including sk->sk_err is changed only under lock,
1747 * hence we may omit checks after joining wait queue.
1748 * We check receive queue before schedule() only as optimization;
1749 * it is very likely that release_sock() added new data.
1750 */
1751int sk_wait_data(struct sock *sk, long *timeo)
1752{
1753 int rc;
1754 DEFINE_WAIT(wait);
1755
Eric Dumazetaa395142010-04-20 13:03:51 +00001756 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1758 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1759 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001760 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 return rc;
1762}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763EXPORT_SYMBOL(sk_wait_data);
1764
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001765/**
1766 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1767 * @sk: socket
1768 * @size: memory size to allocate
1769 * @kind: allocation type
1770 *
1771 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1772 * rmem allocation. This function assumes that protocols which have
1773 * memory_pressure use sk_wmem_queued as write buffer accounting.
1774 */
1775int __sk_mem_schedule(struct sock *sk, int size, int kind)
1776{
1777 struct proto *prot = sk->sk_prot;
1778 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00001779 long allocated;
Glauber Costae1aab162011-12-11 21:47:03 +00001780 int parent_status = UNDER_LIMIT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001781
1782 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001783
Glauber Costae1aab162011-12-11 21:47:03 +00001784 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001785
1786 /* Under limit. */
Glauber Costae1aab162011-12-11 21:47:03 +00001787 if (parent_status == UNDER_LIMIT &&
1788 allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00001789 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001790 return 1;
1791 }
1792
Glauber Costae1aab162011-12-11 21:47:03 +00001793 /* Under pressure. (we or our parents) */
1794 if ((parent_status > SOFT_LIMIT) ||
1795 allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00001796 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001797
Glauber Costae1aab162011-12-11 21:47:03 +00001798 /* Over hard limit (we or our parents) */
1799 if ((parent_status == OVER_LIMIT) ||
1800 (allocated > sk_prot_mem_limits(sk, 2)))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001801 goto suppress_allocation;
1802
1803 /* guarantee minimum buffer size under pressure */
1804 if (kind == SK_MEM_RECV) {
1805 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1806 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001807
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001808 } else { /* SK_MEM_SEND */
1809 if (sk->sk_type == SOCK_STREAM) {
1810 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1811 return 1;
1812 } else if (atomic_read(&sk->sk_wmem_alloc) <
1813 prot->sysctl_wmem[0])
1814 return 1;
1815 }
1816
Glauber Costa180d8cd2011-12-11 21:47:02 +00001817 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08001818 int alloc;
1819
Glauber Costa180d8cd2011-12-11 21:47:02 +00001820 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08001821 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001822 alloc = sk_sockets_allocated_read_positive(sk);
1823 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001824 sk_mem_pages(sk->sk_wmem_queued +
1825 atomic_read(&sk->sk_rmem_alloc) +
1826 sk->sk_forward_alloc))
1827 return 1;
1828 }
1829
1830suppress_allocation:
1831
1832 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1833 sk_stream_moderate_sndbuf(sk);
1834
1835 /* Fail only if socket is _under_ its sndbuf.
1836 * In this case we cannot block, so that we have to fail.
1837 */
1838 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1839 return 1;
1840 }
1841
Satoru Moriya3847ce32011-06-17 12:00:03 +00001842 trace_sock_exceed_buf_limit(sk, prot, allocated);
1843
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001844 /* Alas. Undo changes. */
1845 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001846
Glauber Costa0e90b312012-01-20 04:57:16 +00001847 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00001848
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001849 return 0;
1850}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001851EXPORT_SYMBOL(__sk_mem_schedule);
1852
1853/**
1854 * __sk_reclaim - reclaim memory_allocated
1855 * @sk: socket
1856 */
1857void __sk_mem_reclaim(struct sock *sk)
1858{
Glauber Costa180d8cd2011-12-11 21:47:02 +00001859 sk_memory_allocated_sub(sk,
Glauber Costa0e90b312012-01-20 04:57:16 +00001860 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001861 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1862
Glauber Costa180d8cd2011-12-11 21:47:02 +00001863 if (sk_under_memory_pressure(sk) &&
1864 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
1865 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001866}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001867EXPORT_SYMBOL(__sk_mem_reclaim);
1868
1869
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870/*
1871 * Set of default routines for initialising struct proto_ops when
1872 * the protocol does not support a particular function. In certain
1873 * cases where it makes no sense for a protocol to have a "do nothing"
1874 * function, some default processing is provided.
1875 */
1876
1877int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1878{
1879 return -EOPNOTSUPP;
1880}
Eric Dumazet2a915252009-05-27 11:30:05 +00001881EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001883int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 int len, int flags)
1885{
1886 return -EOPNOTSUPP;
1887}
Eric Dumazet2a915252009-05-27 11:30:05 +00001888EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889
1890int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1891{
1892 return -EOPNOTSUPP;
1893}
Eric Dumazet2a915252009-05-27 11:30:05 +00001894EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1897{
1898 return -EOPNOTSUPP;
1899}
Eric Dumazet2a915252009-05-27 11:30:05 +00001900EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001902int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 int *len, int peer)
1904{
1905 return -EOPNOTSUPP;
1906}
Eric Dumazet2a915252009-05-27 11:30:05 +00001907EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
Eric Dumazet2a915252009-05-27 11:30:05 +00001909unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910{
1911 return 0;
1912}
Eric Dumazet2a915252009-05-27 11:30:05 +00001913EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914
1915int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1916{
1917 return -EOPNOTSUPP;
1918}
Eric Dumazet2a915252009-05-27 11:30:05 +00001919EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920
1921int sock_no_listen(struct socket *sock, int backlog)
1922{
1923 return -EOPNOTSUPP;
1924}
Eric Dumazet2a915252009-05-27 11:30:05 +00001925EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926
1927int sock_no_shutdown(struct socket *sock, int how)
1928{
1929 return -EOPNOTSUPP;
1930}
Eric Dumazet2a915252009-05-27 11:30:05 +00001931EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
1933int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07001934 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935{
1936 return -EOPNOTSUPP;
1937}
Eric Dumazet2a915252009-05-27 11:30:05 +00001938EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939
1940int sock_no_getsockopt(struct socket *sock, int level, int optname,
1941 char __user *optval, int __user *optlen)
1942{
1943 return -EOPNOTSUPP;
1944}
Eric Dumazet2a915252009-05-27 11:30:05 +00001945EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946
1947int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1948 size_t len)
1949{
1950 return -EOPNOTSUPP;
1951}
Eric Dumazet2a915252009-05-27 11:30:05 +00001952EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
1954int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1955 size_t len, int flags)
1956{
1957 return -EOPNOTSUPP;
1958}
Eric Dumazet2a915252009-05-27 11:30:05 +00001959EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
1961int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1962{
1963 /* Mirror missing mmap method error code */
1964 return -ENODEV;
1965}
Eric Dumazet2a915252009-05-27 11:30:05 +00001966EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
1968ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1969{
1970 ssize_t res;
1971 struct msghdr msg = {.msg_flags = flags};
1972 struct kvec iov;
1973 char *kaddr = kmap(page);
1974 iov.iov_base = kaddr + offset;
1975 iov.iov_len = size;
1976 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1977 kunmap(page);
1978 return res;
1979}
Eric Dumazet2a915252009-05-27 11:30:05 +00001980EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
1982/*
1983 * Default Socket Callbacks
1984 */
1985
1986static void sock_def_wakeup(struct sock *sk)
1987{
Eric Dumazet43815482010-04-29 11:01:49 +00001988 struct socket_wq *wq;
1989
1990 rcu_read_lock();
1991 wq = rcu_dereference(sk->sk_wq);
1992 if (wq_has_sleeper(wq))
1993 wake_up_interruptible_all(&wq->wait);
1994 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995}
1996
1997static void sock_def_error_report(struct sock *sk)
1998{
Eric Dumazet43815482010-04-29 11:01:49 +00001999 struct socket_wq *wq;
2000
2001 rcu_read_lock();
2002 wq = rcu_dereference(sk->sk_wq);
2003 if (wq_has_sleeper(wq))
2004 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002005 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002006 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007}
2008
2009static void sock_def_readable(struct sock *sk, int len)
2010{
Eric Dumazet43815482010-04-29 11:01:49 +00002011 struct socket_wq *wq;
2012
2013 rcu_read_lock();
2014 wq = rcu_dereference(sk->sk_wq);
2015 if (wq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002016 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002017 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002018 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002019 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020}
2021
2022static void sock_def_write_space(struct sock *sk)
2023{
Eric Dumazet43815482010-04-29 11:01:49 +00002024 struct socket_wq *wq;
2025
2026 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
2028 /* Do not wake up a writer until he can make "significant"
2029 * progress. --DaveM
2030 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002031 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002032 wq = rcu_dereference(sk->sk_wq);
2033 if (wq_has_sleeper(wq))
2034 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002035 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036
2037 /* Should agree with poll, otherwise some programs break */
2038 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002039 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 }
2041
Eric Dumazet43815482010-04-29 11:01:49 +00002042 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043}
2044
2045static void sock_def_destruct(struct sock *sk)
2046{
Jesper Juhla51482b2005-11-08 09:41:34 -08002047 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048}
2049
2050void sk_send_sigurg(struct sock *sk)
2051{
2052 if (sk->sk_socket && sk->sk_socket->file)
2053 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002054 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055}
Eric Dumazet2a915252009-05-27 11:30:05 +00002056EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
2058void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2059 unsigned long expires)
2060{
2061 if (!mod_timer(timer, expires))
2062 sock_hold(sk);
2063}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064EXPORT_SYMBOL(sk_reset_timer);
2065
2066void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2067{
2068 if (timer_pending(timer) && del_timer(timer))
2069 __sock_put(sk);
2070}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071EXPORT_SYMBOL(sk_stop_timer);
2072
2073void sock_init_data(struct socket *sock, struct sock *sk)
2074{
2075 skb_queue_head_init(&sk->sk_receive_queue);
2076 skb_queue_head_init(&sk->sk_write_queue);
2077 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07002078#ifdef CONFIG_NET_DMA
2079 skb_queue_head_init(&sk->sk_async_wait_queue);
2080#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
2082 sk->sk_send_head = NULL;
2083
2084 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002085
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 sk->sk_allocation = GFP_KERNEL;
2087 sk->sk_rcvbuf = sysctl_rmem_default;
2088 sk->sk_sndbuf = sysctl_wmem_default;
2089 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002090 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091
2092 sock_set_flag(sk, SOCK_ZAPPED);
2093
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002094 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002096 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 sock->sk = sk;
2098 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002099 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100
Eric Dumazetb6c67122010-04-08 23:03:29 +00002101 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07002103 lockdep_set_class_and_name(&sk->sk_callback_lock,
2104 af_callback_keys + sk->sk_family,
2105 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106
2107 sk->sk_state_change = sock_def_wakeup;
2108 sk->sk_data_ready = sock_def_readable;
2109 sk->sk_write_space = sock_def_write_space;
2110 sk->sk_error_report = sock_def_error_report;
2111 sk->sk_destruct = sock_def_destruct;
2112
2113 sk->sk_sndmsg_page = NULL;
2114 sk->sk_sndmsg_off = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002115 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002117 sk->sk_peer_pid = NULL;
2118 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 sk->sk_write_pending = 0;
2120 sk->sk_rcvlowat = 1;
2121 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2122 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2123
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002124 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002126 /*
2127 * Before updating sk_refcnt, we must commit prior changes to memory
2128 * (Documentation/RCU/rculist_nulls.txt for details)
2129 */
2130 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002132 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133}
Eric Dumazet2a915252009-05-27 11:30:05 +00002134EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002136void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137{
2138 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002139 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002140 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002142 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002143 spin_unlock(&sk->sk_lock.slock);
2144 /*
2145 * The sk_lock has mutex_lock() semantics here:
2146 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002147 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002148 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002150EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002152void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002154 /*
2155 * The sk_lock has mutex_unlock() semantics:
2156 */
2157 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2158
2159 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 if (sk->sk_backlog.tail)
2161 __release_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002162 sk->sk_lock.owned = 0;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002163 if (waitqueue_active(&sk->sk_lock.wq))
2164 wake_up(&sk->sk_lock.wq);
2165 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166}
2167EXPORT_SYMBOL(release_sock);
2168
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002169/**
2170 * lock_sock_fast - fast version of lock_sock
2171 * @sk: socket
2172 *
2173 * This version should be used for very small section, where process wont block
2174 * return false if fast path is taken
2175 * sk_lock.slock locked, owned = 0, BH disabled
2176 * return true if slow path is taken
2177 * sk_lock.slock unlocked, owned = 1, BH enabled
2178 */
2179bool lock_sock_fast(struct sock *sk)
2180{
2181 might_sleep();
2182 spin_lock_bh(&sk->sk_lock.slock);
2183
2184 if (!sk->sk_lock.owned)
2185 /*
2186 * Note : We must disable BH
2187 */
2188 return false;
2189
2190 __lock_sock(sk);
2191 sk->sk_lock.owned = 1;
2192 spin_unlock(&sk->sk_lock.slock);
2193 /*
2194 * The sk_lock has mutex_lock() semantics here:
2195 */
2196 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2197 local_bh_enable();
2198 return true;
2199}
2200EXPORT_SYMBOL(lock_sock_fast);
2201
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002203{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002204 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002206 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002207 tv = ktime_to_timeval(sk->sk_stamp);
2208 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002210 if (tv.tv_sec == 0) {
2211 sk->sk_stamp = ktime_get_real();
2212 tv = ktime_to_timeval(sk->sk_stamp);
2213 }
2214 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002215}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216EXPORT_SYMBOL(sock_get_timestamp);
2217
Eric Dumazetae40eb12007-03-18 17:33:16 -07002218int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2219{
2220 struct timespec ts;
2221 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002222 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002223 ts = ktime_to_timespec(sk->sk_stamp);
2224 if (ts.tv_sec == -1)
2225 return -ENOENT;
2226 if (ts.tv_sec == 0) {
2227 sk->sk_stamp = ktime_get_real();
2228 ts = ktime_to_timespec(sk->sk_stamp);
2229 }
2230 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2231}
2232EXPORT_SYMBOL(sock_get_timestampns);
2233
Patrick Ohly20d49472009-02-12 05:03:38 +00002234void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002235{
Patrick Ohly20d49472009-02-12 05:03:38 +00002236 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002237 unsigned long previous_flags = sk->sk_flags;
2238
Patrick Ohly20d49472009-02-12 05:03:38 +00002239 sock_set_flag(sk, flag);
2240 /*
2241 * we just set one of the two flags which require net
2242 * time stamping, but time stamping might have been on
2243 * already because of the other one
2244 */
Eric Dumazet08e29af2011-11-28 12:04:18 +00002245 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002246 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 }
2248}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249
2250/*
2251 * Get a socket option on an socket.
2252 *
2253 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2254 * asynchronous errors should be reported by getsockopt. We assume
2255 * this means if you specify SO_ERROR (otherwise whats the point of it).
2256 */
2257int sock_common_getsockopt(struct socket *sock, int level, int optname,
2258 char __user *optval, int __user *optlen)
2259{
2260 struct sock *sk = sock->sk;
2261
2262 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2263}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264EXPORT_SYMBOL(sock_common_getsockopt);
2265
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002266#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002267int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2268 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002269{
2270 struct sock *sk = sock->sk;
2271
Johannes Berg1e51f952007-03-06 13:44:06 -08002272 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002273 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2274 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002275 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2276}
2277EXPORT_SYMBOL(compat_sock_common_getsockopt);
2278#endif
2279
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2281 struct msghdr *msg, size_t size, int flags)
2282{
2283 struct sock *sk = sock->sk;
2284 int addr_len = 0;
2285 int err;
2286
2287 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2288 flags & ~MSG_DONTWAIT, &addr_len);
2289 if (err >= 0)
2290 msg->msg_namelen = addr_len;
2291 return err;
2292}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293EXPORT_SYMBOL(sock_common_recvmsg);
2294
2295/*
2296 * Set socket options on an inet socket.
2297 */
2298int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002299 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300{
2301 struct sock *sk = sock->sk;
2302
2303 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2304}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305EXPORT_SYMBOL(sock_common_setsockopt);
2306
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002307#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002308int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002309 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002310{
2311 struct sock *sk = sock->sk;
2312
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002313 if (sk->sk_prot->compat_setsockopt != NULL)
2314 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2315 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002316 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2317}
2318EXPORT_SYMBOL(compat_sock_common_setsockopt);
2319#endif
2320
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321void sk_common_release(struct sock *sk)
2322{
2323 if (sk->sk_prot->destroy)
2324 sk->sk_prot->destroy(sk);
2325
2326 /*
2327 * Observation: when sock_common_release is called, processes have
2328 * no access to socket. But net still has.
2329 * Step one, detach it from networking:
2330 *
2331 * A. Remove from hash tables.
2332 */
2333
2334 sk->sk_prot->unhash(sk);
2335
2336 /*
2337 * In this point socket cannot receive new packets, but it is possible
2338 * that some packets are in flight because some CPU runs receiver and
2339 * did hash table lookup before we unhashed socket. They will achieve
2340 * receive queue and will be purged by socket destructor.
2341 *
2342 * Also we still have packets pending on receive queue and probably,
2343 * our own packets waiting in device queues. sock_destroy will drain
2344 * receive queue, but transmitted packets will delay socket destruction
2345 * until the last reference will be released.
2346 */
2347
2348 sock_orphan(sk);
2349
2350 xfrm_sk_free_policy(sk);
2351
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002352 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 sock_put(sk);
2354}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355EXPORT_SYMBOL(sk_common_release);
2356
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002357#ifdef CONFIG_PROC_FS
2358#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002359struct prot_inuse {
2360 int val[PROTO_INUSE_NR];
2361};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002362
2363static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002364
2365#ifdef CONFIG_NET_NS
2366void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2367{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002368 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002369}
2370EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2371
2372int sock_prot_inuse_get(struct net *net, struct proto *prot)
2373{
2374 int cpu, idx = prot->inuse_idx;
2375 int res = 0;
2376
2377 for_each_possible_cpu(cpu)
2378 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2379
2380 return res >= 0 ? res : 0;
2381}
2382EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2383
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002384static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002385{
2386 net->core.inuse = alloc_percpu(struct prot_inuse);
2387 return net->core.inuse ? 0 : -ENOMEM;
2388}
2389
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002390static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002391{
2392 free_percpu(net->core.inuse);
2393}
2394
2395static struct pernet_operations net_inuse_ops = {
2396 .init = sock_inuse_init_net,
2397 .exit = sock_inuse_exit_net,
2398};
2399
2400static __init int net_inuse_init(void)
2401{
2402 if (register_pernet_subsys(&net_inuse_ops))
2403 panic("Cannot initialize net inuse counters");
2404
2405 return 0;
2406}
2407
2408core_initcall(net_inuse_init);
2409#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002410static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2411
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002412void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002413{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002414 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002415}
2416EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2417
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002418int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002419{
2420 int cpu, idx = prot->inuse_idx;
2421 int res = 0;
2422
2423 for_each_possible_cpu(cpu)
2424 res += per_cpu(prot_inuse, cpu).val[idx];
2425
2426 return res >= 0 ? res : 0;
2427}
2428EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002429#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002430
2431static void assign_proto_idx(struct proto *prot)
2432{
2433 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2434
2435 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00002436 pr_err("PROTO_INUSE_NR exhausted\n");
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002437 return;
2438 }
2439
2440 set_bit(prot->inuse_idx, proto_inuse_idx);
2441}
2442
2443static void release_proto_idx(struct proto *prot)
2444{
2445 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2446 clear_bit(prot->inuse_idx, proto_inuse_idx);
2447}
2448#else
2449static inline void assign_proto_idx(struct proto *prot)
2450{
2451}
2452
2453static inline void release_proto_idx(struct proto *prot)
2454{
2455}
2456#endif
2457
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458int proto_register(struct proto *prot, int alloc_slab)
2459{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 if (alloc_slab) {
2461 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002462 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2463 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464
2465 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002466 pr_crit("%s: Can't create sock SLAB cache!\n",
2467 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002468 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002470
2471 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002472 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002473 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002474 goto out_free_sock_slab;
2475
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002476 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002477 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002478 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002479
2480 if (prot->rsk_prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002481 pr_crit("%s: Can't create request sock SLAB cache!\n",
2482 prot->name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002483 goto out_free_request_sock_slab_name;
2484 }
2485 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002486
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002487 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002488 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002489
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002490 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002491 goto out_free_request_sock_slab;
2492
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002493 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002494 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002495 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002496 0,
2497 SLAB_HWCACHE_ALIGN |
2498 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002499 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002500 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002501 goto out_free_timewait_sock_slab_name;
2502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 }
2504
Glauber Costa36b77a52011-12-16 00:51:59 +00002505 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002507 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002508 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002509 return 0;
2510
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002511out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002512 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002513out_free_request_sock_slab:
2514 if (prot->rsk_prot && prot->rsk_prot->slab) {
2515 kmem_cache_destroy(prot->rsk_prot->slab);
2516 prot->rsk_prot->slab = NULL;
2517 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002518out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002519 if (prot->rsk_prot)
2520 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002521out_free_sock_slab:
2522 kmem_cache_destroy(prot->slab);
2523 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002524out:
2525 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527EXPORT_SYMBOL(proto_register);
2528
2529void proto_unregister(struct proto *prot)
2530{
Glauber Costa36b77a52011-12-16 00:51:59 +00002531 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002532 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002533 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002534 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535
2536 if (prot->slab != NULL) {
2537 kmem_cache_destroy(prot->slab);
2538 prot->slab = NULL;
2539 }
2540
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002541 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002542 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002543 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002544 prot->rsk_prot->slab = NULL;
2545 }
2546
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002547 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002548 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002549 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002550 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553EXPORT_SYMBOL(proto_unregister);
2554
2555#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002557 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558{
Glauber Costa36b77a52011-12-16 00:51:59 +00002559 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002560 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561}
2562
2563static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2564{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002565 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566}
2567
2568static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002569 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570{
Glauber Costa36b77a52011-12-16 00:51:59 +00002571 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572}
2573
2574static char proto_method_implemented(const void *method)
2575{
2576 return method == NULL ? 'n' : 'y';
2577}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002578static long sock_prot_memory_allocated(struct proto *proto)
2579{
Jeffrin Josecb75a362012-04-25 19:17:29 +05302580 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002581}
2582
2583static char *sock_prot_memory_pressure(struct proto *proto)
2584{
2585 return proto->memory_pressure != NULL ?
2586 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2587}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588
2589static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2590{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002591
Eric Dumazet8d987e52010-11-09 23:24:26 +00002592 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2594 proto->name,
2595 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002596 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00002597 sock_prot_memory_allocated(proto),
2598 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 proto->max_header,
2600 proto->slab == NULL ? "no" : "yes",
2601 module_name(proto->owner),
2602 proto_method_implemented(proto->close),
2603 proto_method_implemented(proto->connect),
2604 proto_method_implemented(proto->disconnect),
2605 proto_method_implemented(proto->accept),
2606 proto_method_implemented(proto->ioctl),
2607 proto_method_implemented(proto->init),
2608 proto_method_implemented(proto->destroy),
2609 proto_method_implemented(proto->shutdown),
2610 proto_method_implemented(proto->setsockopt),
2611 proto_method_implemented(proto->getsockopt),
2612 proto_method_implemented(proto->sendmsg),
2613 proto_method_implemented(proto->recvmsg),
2614 proto_method_implemented(proto->sendpage),
2615 proto_method_implemented(proto->bind),
2616 proto_method_implemented(proto->backlog_rcv),
2617 proto_method_implemented(proto->hash),
2618 proto_method_implemented(proto->unhash),
2619 proto_method_implemented(proto->get_port),
2620 proto_method_implemented(proto->enter_memory_pressure));
2621}
2622
2623static int proto_seq_show(struct seq_file *seq, void *v)
2624{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002625 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2627 "protocol",
2628 "size",
2629 "sockets",
2630 "memory",
2631 "press",
2632 "maxhdr",
2633 "slab",
2634 "module",
2635 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2636 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002637 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 return 0;
2639}
2640
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002641static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 .start = proto_seq_start,
2643 .next = proto_seq_next,
2644 .stop = proto_seq_stop,
2645 .show = proto_seq_show,
2646};
2647
2648static int proto_seq_open(struct inode *inode, struct file *file)
2649{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002650 return seq_open_net(inode, file, &proto_seq_ops,
2651 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652}
2653
Arjan van de Ven9a321442007-02-12 00:55:35 -08002654static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 .owner = THIS_MODULE,
2656 .open = proto_seq_open,
2657 .read = seq_read,
2658 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002659 .release = seq_release_net,
2660};
2661
2662static __net_init int proto_init_net(struct net *net)
2663{
2664 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2665 return -ENOMEM;
2666
2667 return 0;
2668}
2669
2670static __net_exit void proto_exit_net(struct net *net)
2671{
2672 proc_net_remove(net, "protocols");
2673}
2674
2675
2676static __net_initdata struct pernet_operations proto_net_ops = {
2677 .init = proto_init_net,
2678 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679};
2680
2681static int __init proto_init(void)
2682{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002683 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684}
2685
2686subsys_initcall(proto_init);
2687
2688#endif /* PROC_FS */