blob: 548d716c5f62ac8c9d90d0959d19022714f9585d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Joe Perchese005d192012-05-16 19:58:40 +000092#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
96#include <linux/types.h>
97#include <linux/socket.h>
98#include <linux/in.h>
99#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/module.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/sched.h>
104#include <linux/timer.h>
105#include <linux/string.h>
106#include <linux/sockios.h>
107#include <linux/net.h>
108#include <linux/mm.h>
109#include <linux/slab.h>
110#include <linux/interrupt.h>
111#include <linux/poll.h>
112#include <linux/tcp.h>
113#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400114#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000115#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100116#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800117#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400118#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122#include <linux/netdevice.h>
123#include <net/protocol.h>
124#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200125#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700126#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000128#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#include <net/xfrm.h>
130#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700131#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000132#include <net/netprio_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134#include <linux/filter.h>
135
Satoru Moriya3847ce32011-06-17 12:00:03 +0000136#include <trace/events/sock.h>
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138#ifdef CONFIG_INET
139#include <net/tcp.h>
140#endif
141
Eliezer Tamir076bb0c2013-07-10 17:13:17 +0300142#include <net/busy_poll.h>
Eliezer Tamir06021292013-06-10 11:39:50 +0300143
Glauber Costa36b77a52011-12-16 00:51:59 +0000144static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000145static LIST_HEAD(proto_list);
146
Andrew Mortonc255a452012-07-31 16:43:02 -0700147#ifdef CONFIG_MEMCG_KMEM
Glauber Costa1d62e432012-04-09 19:36:33 -0300148int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000149{
150 struct proto *proto;
151 int ret = 0;
152
Glauber Costa36b77a52011-12-16 00:51:59 +0000153 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000154 list_for_each_entry(proto, &proto_list, node) {
155 if (proto->init_cgroup) {
Glauber Costa1d62e432012-04-09 19:36:33 -0300156 ret = proto->init_cgroup(memcg, ss);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000157 if (ret)
158 goto out;
159 }
160 }
161
Glauber Costa36b77a52011-12-16 00:51:59 +0000162 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000163 return ret;
164out:
165 list_for_each_entry_continue_reverse(proto, &proto_list, node)
166 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300167 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000168 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000169 return ret;
170}
171
Glauber Costa1d62e432012-04-09 19:36:33 -0300172void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000173{
174 struct proto *proto;
175
Glauber Costa36b77a52011-12-16 00:51:59 +0000176 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000177 list_for_each_entry_reverse(proto, &proto_list, node)
178 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300179 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000180 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000181}
182#endif
183
Ingo Molnarda21f242006-07-03 00:25:12 -0700184/*
185 * Each address family might have different locking rules, so we have
186 * one slock key per address family:
187 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700188static struct lock_class_key af_family_keys[AF_MAX];
189static struct lock_class_key af_family_slock_keys[AF_MAX];
190
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000191#if defined(CONFIG_MEMCG_KMEM)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100192struct static_key memcg_socket_limit_enabled;
Glauber Costae1aab162011-12-11 21:47:03 +0000193EXPORT_SYMBOL(memcg_socket_limit_enabled);
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000194#endif
Glauber Costae1aab162011-12-11 21:47:03 +0000195
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700196/*
197 * Make lock validator output more readable. (we pre-construct these
198 * strings build-time, so that runtime initialization of socket
199 * locks is fast):
200 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700201static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700202 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
203 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
204 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
205 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
206 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
207 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
208 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800209 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700210 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800211 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700212 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700213 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800214 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000215 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700216};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700217static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700218 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
219 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
220 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
221 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
222 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
223 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
224 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800225 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700226 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800227 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700228 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700229 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800230 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000231 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700232};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700233static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700234 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
235 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
236 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
237 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
238 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
239 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
240 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800241 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700242 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700243 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700244 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700245 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800246 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000247 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700248};
Ingo Molnarda21f242006-07-03 00:25:12 -0700249
250/*
251 * sk_callback_lock locking rules are per-address-family,
252 * so split the lock classes by using a per-AF key:
253 */
254static struct lock_class_key af_callback_keys[AF_MAX];
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256/* Take into consideration the size of the struct sk_buff overhead in the
257 * determination of these values, since that is non-constant across
258 * platforms. This makes socket queueing behavior and performance
259 * not depend upon such differences.
260 */
261#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000262#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
264#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
265
266/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700267__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200268EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700269__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200270EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700271__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
272__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300274/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700275int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000276EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Mel Gormanc93bdd02012-07-31 16:44:19 -0700278struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
279EXPORT_SYMBOL_GPL(memalloc_socks);
280
Mel Gorman7cb02402012-07-31 16:44:16 -0700281/**
282 * sk_set_memalloc - sets %SOCK_MEMALLOC
283 * @sk: socket to set it on
284 *
285 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
286 * It's the responsibility of the admin to adjust min_free_kbytes
287 * to meet the requirements
288 */
289void sk_set_memalloc(struct sock *sk)
290{
291 sock_set_flag(sk, SOCK_MEMALLOC);
292 sk->sk_allocation |= __GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700293 static_key_slow_inc(&memalloc_socks);
Mel Gorman7cb02402012-07-31 16:44:16 -0700294}
295EXPORT_SYMBOL_GPL(sk_set_memalloc);
296
297void sk_clear_memalloc(struct sock *sk)
298{
299 sock_reset_flag(sk, SOCK_MEMALLOC);
300 sk->sk_allocation &= ~__GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700301 static_key_slow_dec(&memalloc_socks);
Mel Gormanc76562b2012-07-31 16:44:41 -0700302
303 /*
304 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
305 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
306 * it has rmem allocations there is a risk that the user of the
307 * socket cannot make forward progress due to exceeding the rmem
308 * limits. By rights, sk_clear_memalloc() should only be called
309 * on sockets being torn down but warn and reset the accounting if
310 * that assumption breaks.
311 */
312 if (WARN_ON(sk->sk_forward_alloc))
313 sk_mem_reclaim(sk);
Mel Gorman7cb02402012-07-31 16:44:16 -0700314}
315EXPORT_SYMBOL_GPL(sk_clear_memalloc);
316
Mel Gormanb4b9e352012-07-31 16:44:26 -0700317int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
318{
319 int ret;
320 unsigned long pflags = current->flags;
321
322 /* these should have been dropped before queueing */
323 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
324
325 current->flags |= PF_MEMALLOC;
326 ret = sk->sk_backlog_rcv(sk, skb);
327 tsk_restore_flags(current, pflags, PF_MEMALLOC);
328
329 return ret;
330}
331EXPORT_SYMBOL(__sk_backlog_rcv);
332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
334{
335 struct timeval tv;
336
337 if (optlen < sizeof(tv))
338 return -EINVAL;
339 if (copy_from_user(&tv, optval, sizeof(tv)))
340 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700341 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
342 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
Vasily Averinba780732007-05-24 16:58:54 -0700344 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700345 static int warned __read_mostly;
346
Vasily Averinba780732007-05-24 16:58:54 -0700347 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700348 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700349 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000350 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
351 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700352 }
Vasily Averinba780732007-05-24 16:58:54 -0700353 return 0;
354 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 *timeo_p = MAX_SCHEDULE_TIMEOUT;
356 if (tv.tv_sec == 0 && tv.tv_usec == 0)
357 return 0;
358 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
359 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
360 return 0;
361}
362
363static void sock_warn_obsolete_bsdism(const char *name)
364{
365 static int warned;
366 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900367 if (strcmp(warncomm, current->comm) && warned < 5) {
368 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000369 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
370 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 warned++;
372 }
373}
374
Eric Dumazet08e29af2011-11-28 12:04:18 +0000375#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
376
377static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900378{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000379 if (sk->sk_flags & flags) {
380 sk->sk_flags &= ~flags;
381 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000382 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 }
384}
385
386
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800387int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
388{
Eric Dumazet766e90372009-10-14 20:40:11 -0700389 int err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800390 int skb_len;
Neil Horman3b885782009-10-12 13:26:31 -0700391 unsigned long flags;
392 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800393
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000394 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700395 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000396 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700397 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800398 }
399
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700400 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800401 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700402 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800403
Mel Gormanc76562b2012-07-31 16:44:41 -0700404 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700405 atomic_inc(&sk->sk_drops);
406 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800407 }
408
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800409 skb->dev = NULL;
410 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800411
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800412 /* Cache the SKB length before we tack it onto the receive
413 * queue. Once it is added it no longer belongs to us and
414 * may be freed by other threads of control pulling packets
415 * from the queue.
416 */
417 skb_len = skb->len;
418
Eric Dumazet7fee2262010-05-11 23:19:48 +0000419 /* we escape from rcu protected region, make sure we dont leak
420 * a norefcounted dst
421 */
422 skb_dst_force(skb);
423
Neil Horman3b885782009-10-12 13:26:31 -0700424 spin_lock_irqsave(&list->lock, flags);
425 skb->dropcount = atomic_read(&sk->sk_drops);
426 __skb_queue_tail(list, skb);
427 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800428
429 if (!sock_flag(sk, SOCK_DEAD))
430 sk->sk_data_ready(sk, skb_len);
Eric Dumazet766e90372009-10-14 20:40:11 -0700431 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800432}
433EXPORT_SYMBOL(sock_queue_rcv_skb);
434
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200435int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800436{
437 int rc = NET_RX_SUCCESS;
438
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700439 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800440 goto discard_and_relse;
441
442 skb->dev = NULL;
443
Eric Dumazetf545a382012-04-22 23:34:26 +0000444 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700445 atomic_inc(&sk->sk_drops);
446 goto discard_and_relse;
447 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200448 if (nested)
449 bh_lock_sock_nested(sk);
450 else
451 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700452 if (!sock_owned_by_user(sk)) {
453 /*
454 * trylock + unlock semantics:
455 */
456 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
457
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700458 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700459
460 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Eric Dumazetf545a382012-04-22 23:34:26 +0000461 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000462 bh_unlock_sock(sk);
463 atomic_inc(&sk->sk_drops);
464 goto discard_and_relse;
465 }
466
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800467 bh_unlock_sock(sk);
468out:
469 sock_put(sk);
470 return rc;
471discard_and_relse:
472 kfree_skb(skb);
473 goto out;
474}
475EXPORT_SYMBOL(sk_receive_skb);
476
Krishna Kumarea94ff32009-10-19 23:46:45 +0000477void sk_reset_txq(struct sock *sk)
478{
479 sk_tx_queue_clear(sk);
480}
481EXPORT_SYMBOL(sk_reset_txq);
482
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800483struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
484{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000485 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800486
487 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000488 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000489 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800490 dst_release(dst);
491 return NULL;
492 }
493
494 return dst;
495}
496EXPORT_SYMBOL(__sk_dst_check);
497
498struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
499{
500 struct dst_entry *dst = sk_dst_get(sk);
501
502 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
503 sk_dst_reset(sk);
504 dst_release(dst);
505 return NULL;
506 }
507
508 return dst;
509}
510EXPORT_SYMBOL(sk_dst_check);
511
Brian Haleyc91f6df2012-11-26 05:21:08 +0000512static int sock_setbindtodevice(struct sock *sk, char __user *optval,
513 int optlen)
David S. Miller48788092007-09-14 16:41:03 -0700514{
515 int ret = -ENOPROTOOPT;
516#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900517 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700518 char devname[IFNAMSIZ];
519 int index;
520
521 /* Sorry... */
522 ret = -EPERM;
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000523 if (!ns_capable(net->user_ns, CAP_NET_RAW))
David S. Miller48788092007-09-14 16:41:03 -0700524 goto out;
525
526 ret = -EINVAL;
527 if (optlen < 0)
528 goto out;
529
530 /* Bind this socket to a particular device like "eth0",
531 * as specified in the passed interface name. If the
532 * name is "" or the option length is zero the socket
533 * is not bound.
534 */
535 if (optlen > IFNAMSIZ - 1)
536 optlen = IFNAMSIZ - 1;
537 memset(devname, 0, sizeof(devname));
538
539 ret = -EFAULT;
540 if (copy_from_user(devname, optval, optlen))
541 goto out;
542
David S. Miller000ba2e2009-11-05 22:37:11 -0800543 index = 0;
544 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800545 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700546
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800547 rcu_read_lock();
548 dev = dev_get_by_name_rcu(net, devname);
549 if (dev)
550 index = dev->ifindex;
551 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700552 ret = -ENODEV;
553 if (!dev)
554 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700555 }
556
557 lock_sock(sk);
558 sk->sk_bound_dev_if = index;
559 sk_dst_reset(sk);
560 release_sock(sk);
561
562 ret = 0;
563
564out:
565#endif
566
567 return ret;
568}
569
Brian Haleyc91f6df2012-11-26 05:21:08 +0000570static int sock_getbindtodevice(struct sock *sk, char __user *optval,
571 int __user *optlen, int len)
572{
573 int ret = -ENOPROTOOPT;
574#ifdef CONFIG_NETDEVICES
575 struct net *net = sock_net(sk);
Brian Haleyc91f6df2012-11-26 05:21:08 +0000576 char devname[IFNAMSIZ];
Brian Haleyc91f6df2012-11-26 05:21:08 +0000577
578 if (sk->sk_bound_dev_if == 0) {
579 len = 0;
580 goto zero;
581 }
582
583 ret = -EINVAL;
584 if (len < IFNAMSIZ)
585 goto out;
586
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200587 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
588 if (ret)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000589 goto out;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000590
591 len = strlen(devname) + 1;
592
593 ret = -EFAULT;
594 if (copy_to_user(optval, devname, len))
595 goto out;
596
597zero:
598 ret = -EFAULT;
599 if (put_user(len, optlen))
600 goto out;
601
602 ret = 0;
603
604out:
605#endif
606
607 return ret;
608}
609
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800610static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
611{
612 if (valbool)
613 sock_set_flag(sk, bit);
614 else
615 sock_reset_flag(sk, bit);
616}
617
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618/*
619 * This is meant for all protocols to use and covers goings on
620 * at the socket level. Everything here is generic.
621 */
622
623int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700624 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625{
Eric Dumazet2a915252009-05-27 11:30:05 +0000626 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 int val;
628 int valbool;
629 struct linger ling;
630 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900631
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 /*
633 * Options without arguments
634 */
635
David S. Miller48788092007-09-14 16:41:03 -0700636 if (optname == SO_BINDTODEVICE)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000637 return sock_setbindtodevice(sk, optval, optlen);
David S. Miller48788092007-09-14 16:41:03 -0700638
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700639 if (optlen < sizeof(int))
640 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 if (get_user(val, (int __user *)optval))
643 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900644
Eric Dumazet2a915252009-05-27 11:30:05 +0000645 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
647 lock_sock(sk);
648
Eric Dumazet2a915252009-05-27 11:30:05 +0000649 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700650 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000651 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700652 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000653 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800654 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700655 break;
656 case SO_REUSEADDR:
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000657 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700658 break;
Tom Herbert055dc212013-01-22 09:49:50 +0000659 case SO_REUSEPORT:
660 sk->sk_reuseport = valbool;
661 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700662 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000663 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000664 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700665 case SO_ERROR:
666 ret = -ENOPROTOOPT;
667 break;
668 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800669 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700670 break;
671 case SO_BROADCAST:
672 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
673 break;
674 case SO_SNDBUF:
675 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000676 * about it this is right. Otherwise apps have to
677 * play 'guess the biggest size' games. RCVBUF/SNDBUF
678 * are treated in BSD as hints
679 */
680 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700681set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700682 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazet82981932012-04-26 20:07:59 +0000683 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
684 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700685 sk->sk_write_space(sk);
686 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700688 case SO_SNDBUFFORCE:
689 if (!capable(CAP_NET_ADMIN)) {
690 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 break;
692 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700693 goto set_sndbuf;
694
695 case SO_RCVBUF:
696 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000697 * about it this is right. Otherwise apps have to
698 * play 'guess the biggest size' games. RCVBUF/SNDBUF
699 * are treated in BSD as hints
700 */
701 val = min_t(u32, val, sysctl_rmem_max);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700702set_rcvbuf:
703 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
704 /*
705 * We double it on the way in to account for
706 * "struct sk_buff" etc. overhead. Applications
707 * assume that the SO_RCVBUF setting they make will
708 * allow that much actual data to be received on that
709 * socket.
710 *
711 * Applications are unaware that "struct sk_buff" and
712 * other overheads allocate from the receive buffer
713 * during socket buffer allocation.
714 *
715 * And after considering the possible alternatives,
716 * returning the value we actually used in getsockopt
717 * is the most desirable behavior.
718 */
Eric Dumazet82981932012-04-26 20:07:59 +0000719 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700720 break;
721
722 case SO_RCVBUFFORCE:
723 if (!capable(CAP_NET_ADMIN)) {
724 ret = -EPERM;
725 break;
726 }
727 goto set_rcvbuf;
728
729 case SO_KEEPALIVE:
730#ifdef CONFIG_INET
Eric Dumazet3e109862012-09-24 07:00:11 +0000731 if (sk->sk_protocol == IPPROTO_TCP &&
732 sk->sk_type == SOCK_STREAM)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700733 tcp_set_keepalive(sk, valbool);
734#endif
735 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
736 break;
737
738 case SO_OOBINLINE:
739 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
740 break;
741
742 case SO_NO_CHECK:
743 sk->sk_no_check = valbool;
744 break;
745
746 case SO_PRIORITY:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000747 if ((val >= 0 && val <= 6) ||
748 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700749 sk->sk_priority = val;
750 else
751 ret = -EPERM;
752 break;
753
754 case SO_LINGER:
755 if (optlen < sizeof(ling)) {
756 ret = -EINVAL; /* 1003.1g */
757 break;
758 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000759 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700760 ret = -EFAULT;
761 break;
762 }
763 if (!ling.l_onoff)
764 sock_reset_flag(sk, SOCK_LINGER);
765 else {
766#if (BITS_PER_LONG == 32)
767 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
768 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
769 else
770#endif
771 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
772 sock_set_flag(sk, SOCK_LINGER);
773 }
774 break;
775
776 case SO_BSDCOMPAT:
777 sock_warn_obsolete_bsdism("setsockopt");
778 break;
779
780 case SO_PASSCRED:
781 if (valbool)
782 set_bit(SOCK_PASSCRED, &sock->flags);
783 else
784 clear_bit(SOCK_PASSCRED, &sock->flags);
785 break;
786
787 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700788 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700789 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700790 if (optname == SO_TIMESTAMP)
791 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
792 else
793 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700794 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000795 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700796 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700797 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700798 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
799 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700800 break;
801
Patrick Ohly20d49472009-02-12 05:03:38 +0000802 case SO_TIMESTAMPING:
803 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000804 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000805 break;
806 }
807 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
808 val & SOF_TIMESTAMPING_TX_HARDWARE);
809 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
810 val & SOF_TIMESTAMPING_TX_SOFTWARE);
811 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
812 val & SOF_TIMESTAMPING_RX_HARDWARE);
813 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
814 sock_enable_timestamp(sk,
815 SOCK_TIMESTAMPING_RX_SOFTWARE);
816 else
817 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000818 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000819 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
820 val & SOF_TIMESTAMPING_SOFTWARE);
821 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
822 val & SOF_TIMESTAMPING_SYS_HARDWARE);
823 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
824 val & SOF_TIMESTAMPING_RAW_HARDWARE);
825 break;
826
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700827 case SO_RCVLOWAT:
828 if (val < 0)
829 val = INT_MAX;
830 sk->sk_rcvlowat = val ? : 1;
831 break;
832
833 case SO_RCVTIMEO:
834 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
835 break;
836
837 case SO_SNDTIMEO:
838 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
839 break;
840
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700841 case SO_ATTACH_FILTER:
842 ret = -EINVAL;
843 if (optlen == sizeof(struct sock_fprog)) {
844 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700846 ret = -EFAULT;
847 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700850 ret = sk_attach_filter(&fprog, sk);
851 }
852 break;
853
854 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700855 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700856 break;
857
Vincent Bernatd59577b2013-01-16 22:55:49 +0100858 case SO_LOCK_FILTER:
859 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
860 ret = -EPERM;
861 else
862 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
863 break;
864
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700865 case SO_PASSSEC:
866 if (valbool)
867 set_bit(SOCK_PASSSEC, &sock->flags);
868 else
869 clear_bit(SOCK_PASSSEC, &sock->flags);
870 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800871 case SO_MARK:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000872 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800873 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000874 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800875 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800876 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700877
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 /* We implement the SO_SNDLOWAT etc to
879 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700880 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000881 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700882 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100883
884 case SO_WIFI_STATUS:
885 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
886 break;
887
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000888 case SO_PEEK_OFF:
889 if (sock->ops->set_peek_off)
890 sock->ops->set_peek_off(sk, val);
891 else
892 ret = -EOPNOTSUPP;
893 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000894
895 case SO_NOFCS:
896 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
897 break;
898
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +0000899 case SO_SELECT_ERR_QUEUE:
900 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
901 break;
902
Eliezer Tamirdafcc432013-06-14 16:33:57 +0300903#ifdef CONFIG_NET_LL_RX_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +0300904 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +0300905 /* allow unprivileged users to decrease the value */
906 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
907 ret = -EPERM;
908 else {
909 if (val < 0)
910 ret = -EINVAL;
911 else
912 sk->sk_ll_usec = val;
913 }
914 break;
915#endif
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700916 default:
917 ret = -ENOPROTOOPT;
918 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900919 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 release_sock(sk);
921 return ret;
922}
Eric Dumazet2a915252009-05-27 11:30:05 +0000923EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
925
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000926void cred_to_ucred(struct pid *pid, const struct cred *cred,
927 struct ucred *ucred)
928{
929 ucred->pid = pid_vnr(pid);
930 ucred->uid = ucred->gid = -1;
931 if (cred) {
932 struct user_namespace *current_ns = current_user_ns();
933
Eric W. Biedermanb2e4f542012-05-23 16:39:45 -0600934 ucred->uid = from_kuid_munged(current_ns, cred->euid);
935 ucred->gid = from_kgid_munged(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000936 }
937}
David S. Miller39247732010-06-16 16:18:25 -0700938EXPORT_SYMBOL_GPL(cred_to_ucred);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000939
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940int sock_getsockopt(struct socket *sock, int level, int optname,
941 char __user *optval, int __user *optlen)
942{
943 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900944
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700945 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900946 int val;
947 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 struct timeval tm;
949 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900950
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -0800951 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900953
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700954 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900955 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700956 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900958
Eugene Teo50fee1d2009-02-23 15:38:41 -0800959 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -0800960
Eric Dumazet2a915252009-05-27 11:30:05 +0000961 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700962 case SO_DEBUG:
963 v.val = sock_flag(sk, SOCK_DBG);
964 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900965
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700966 case SO_DONTROUTE:
967 v.val = sock_flag(sk, SOCK_LOCALROUTE);
968 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900969
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700970 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000971 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700972 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700974 case SO_SNDBUF:
975 v.val = sk->sk_sndbuf;
976 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900977
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700978 case SO_RCVBUF:
979 v.val = sk->sk_rcvbuf;
980 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700982 case SO_REUSEADDR:
983 v.val = sk->sk_reuse;
984 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
Tom Herbert055dc212013-01-22 09:49:50 +0000986 case SO_REUSEPORT:
987 v.val = sk->sk_reuseport;
988 break;
989
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700990 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +0000991 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700992 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700994 case SO_TYPE:
995 v.val = sk->sk_type;
996 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000998 case SO_PROTOCOL:
999 v.val = sk->sk_protocol;
1000 break;
1001
Jan Engelhardt0d6038e2009-08-04 07:28:29 +00001002 case SO_DOMAIN:
1003 v.val = sk->sk_family;
1004 break;
1005
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001006 case SO_ERROR:
1007 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +00001008 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001009 v.val = xchg(&sk->sk_err_soft, 0);
1010 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001012 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001013 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001014 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001015
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001016 case SO_NO_CHECK:
1017 v.val = sk->sk_no_check;
1018 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001020 case SO_PRIORITY:
1021 v.val = sk->sk_priority;
1022 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001023
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001024 case SO_LINGER:
1025 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001026 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001027 v.ling.l_linger = sk->sk_lingertime / HZ;
1028 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001029
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001030 case SO_BSDCOMPAT:
1031 sock_warn_obsolete_bsdism("getsockopt");
1032 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001034 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -07001035 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1036 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1037 break;
1038
1039 case SO_TIMESTAMPNS:
1040 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001041 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
Patrick Ohly20d49472009-02-12 05:03:38 +00001043 case SO_TIMESTAMPING:
1044 v.val = 0;
1045 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
1046 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
1047 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
1048 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
1049 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
1050 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
1051 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1052 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
1053 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
1054 v.val |= SOF_TIMESTAMPING_SOFTWARE;
1055 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
1056 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
1057 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
1058 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
1059 break;
1060
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001061 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001062 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001063 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1064 v.tm.tv_sec = 0;
1065 v.tm.tv_usec = 0;
1066 } else {
1067 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1068 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001070 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001072 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001073 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001074 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1075 v.tm.tv_sec = 0;
1076 v.tm.tv_usec = 0;
1077 } else {
1078 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1079 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1080 }
1081 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001083 case SO_RCVLOWAT:
1084 v.val = sk->sk_rcvlowat;
1085 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001086
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001087 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +00001088 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001089 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001091 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +00001092 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001093 break;
1094
1095 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001096 {
1097 struct ucred peercred;
1098 if (len > sizeof(peercred))
1099 len = sizeof(peercred);
1100 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1101 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001102 return -EFAULT;
1103 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001104 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001105
1106 case SO_PEERNAME:
1107 {
1108 char address[128];
1109
1110 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1111 return -ENOTCONN;
1112 if (lv < len)
1113 return -EINVAL;
1114 if (copy_to_user(optval, address, len))
1115 return -EFAULT;
1116 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001118
1119 /* Dubious BSD thing... Probably nobody even uses it, but
1120 * the UNIX standard wants it for whatever reason... -DaveM
1121 */
1122 case SO_ACCEPTCONN:
1123 v.val = sk->sk_state == TCP_LISTEN;
1124 break;
1125
1126 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001127 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001128 break;
1129
1130 case SO_PEERSEC:
1131 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1132
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001133 case SO_MARK:
1134 v.val = sk->sk_mark;
1135 break;
1136
Neil Horman3b885782009-10-12 13:26:31 -07001137 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001138 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001139 break;
1140
Johannes Berg6e3e9392011-11-09 10:15:42 +01001141 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001142 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001143 break;
1144
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001145 case SO_PEEK_OFF:
1146 if (!sock->ops->set_peek_off)
1147 return -EOPNOTSUPP;
1148
1149 v.val = sk->sk_peek_off;
1150 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001151 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001152 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001153 break;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001154
Pavel Emelyanovf7b86bf2012-10-18 23:55:56 +00001155 case SO_BINDTODEVICE:
Brian Haleyc91f6df2012-11-26 05:21:08 +00001156 return sock_getbindtodevice(sk, optval, optlen, len);
1157
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001158 case SO_GET_FILTER:
1159 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1160 if (len < 0)
1161 return len;
1162
1163 goto lenout;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001164
Vincent Bernatd59577b2013-01-16 22:55:49 +01001165 case SO_LOCK_FILTER:
1166 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1167 break;
1168
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00001169 case SO_SELECT_ERR_QUEUE:
1170 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1171 break;
1172
Eliezer Tamirdafcc432013-06-14 16:33:57 +03001173#ifdef CONFIG_NET_LL_RX_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03001174 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +03001175 v.val = sk->sk_ll_usec;
1176 break;
1177#endif
1178
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001179 default:
1180 return -ENOPROTOOPT;
1181 }
1182
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 if (len > lv)
1184 len = lv;
1185 if (copy_to_user(optval, &v, len))
1186 return -EFAULT;
1187lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001188 if (put_user(len, optlen))
1189 return -EFAULT;
1190 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191}
1192
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001193/*
1194 * Initialize an sk_lock.
1195 *
1196 * (We also register the sk_lock with the lock validator.)
1197 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001198static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001199{
Peter Zijlstraed075362006-12-06 20:35:24 -08001200 sock_lock_init_class_and_name(sk,
1201 af_family_slock_key_strings[sk->sk_family],
1202 af_family_slock_keys + sk->sk_family,
1203 af_family_key_strings[sk->sk_family],
1204 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001205}
1206
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001207/*
1208 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1209 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001210 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001211 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001212static void sock_copy(struct sock *nsk, const struct sock *osk)
1213{
1214#ifdef CONFIG_SECURITY_NETWORK
1215 void *sptr = nsk->sk_security;
1216#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001217 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1218
1219 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1220 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1221
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001222#ifdef CONFIG_SECURITY_NETWORK
1223 nsk->sk_security = sptr;
1224 security_sk_clone(osk, nsk);
1225#endif
1226}
1227
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001228void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1229{
1230 unsigned long nulls1, nulls2;
1231
1232 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1233 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1234 if (nulls1 > nulls2)
1235 swap(nulls1, nulls2);
1236
1237 if (nulls1 != 0)
1238 memset((char *)sk, 0, nulls1);
1239 memset((char *)sk + nulls1 + sizeof(void *), 0,
1240 nulls2 - nulls1 - sizeof(void *));
1241 memset((char *)sk + nulls2 + sizeof(void *), 0,
1242 size - nulls2 - sizeof(void *));
1243}
1244EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1245
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001246static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1247 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001248{
1249 struct sock *sk;
1250 struct kmem_cache *slab;
1251
1252 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001253 if (slab != NULL) {
1254 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1255 if (!sk)
1256 return sk;
1257 if (priority & __GFP_ZERO) {
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001258 if (prot->clear_sk)
1259 prot->clear_sk(sk, prot->obj_size);
1260 else
1261 sk_prot_clear_nulls(sk, prot->obj_size);
Eric Dumazete912b112009-07-08 19:36:05 +00001262 }
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001263 } else
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001264 sk = kmalloc(prot->obj_size, priority);
1265
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001266 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001267 kmemcheck_annotate_bitfield(sk, flags);
1268
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001269 if (security_sk_alloc(sk, family, priority))
1270 goto out_free;
1271
1272 if (!try_module_get(prot->owner))
1273 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001274 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001275 }
1276
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001277 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001278
1279out_free_sec:
1280 security_sk_free(sk);
1281out_free:
1282 if (slab != NULL)
1283 kmem_cache_free(slab, sk);
1284 else
1285 kfree(sk);
1286 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001287}
1288
1289static void sk_prot_free(struct proto *prot, struct sock *sk)
1290{
1291 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001292 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001293
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001294 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001295 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001296
1297 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001298 if (slab != NULL)
1299 kmem_cache_free(slab, sk);
1300 else
1301 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001302 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001303}
1304
Daniel Wagner8fb974c2012-09-12 16:12:02 +02001305#if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
Zefan Li211d2f972013-04-08 20:03:35 +00001306void sock_update_classid(struct sock *sk)
Herbert Xuf8451722010-05-24 00:12:34 -07001307{
Paul E. McKenney11441822010-10-06 17:15:35 -07001308 u32 classid;
Herbert Xuf8451722010-05-24 00:12:34 -07001309
Zefan Li211d2f972013-04-08 20:03:35 +00001310 classid = task_cls_classid(current);
Neil Horman3afa6d02012-08-20 07:59:10 +00001311 if (classid != sk->sk_classid)
Herbert Xuf8451722010-05-24 00:12:34 -07001312 sk->sk_classid = classid;
1313}
Herbert Xu82862742010-05-24 00:14:10 -07001314EXPORT_SYMBOL(sock_update_classid);
Daniel Wagner8fb974c2012-09-12 16:12:02 +02001315#endif
Neil Horman5bc14212011-11-22 05:10:51 +00001316
Daniel Wagner51e4e7f2012-09-12 16:12:03 +02001317#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
Zefan Li6ffd4642013-04-08 20:03:47 +00001318void sock_update_netprioidx(struct sock *sk)
Neil Horman5bc14212011-11-22 05:10:51 +00001319{
Neil Horman5bc14212011-11-22 05:10:51 +00001320 if (in_interrupt())
1321 return;
Neil Horman2b73bc62012-02-10 05:43:38 +00001322
Zefan Li6ffd4642013-04-08 20:03:47 +00001323 sk->sk_cgrp_prioidx = task_netprioidx(current);
Neil Horman5bc14212011-11-22 05:10:51 +00001324}
1325EXPORT_SYMBOL_GPL(sock_update_netprioidx);
Herbert Xuf8451722010-05-24 00:12:34 -07001326#endif
1327
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328/**
1329 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001330 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001331 * @family: protocol family
1332 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1333 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001335struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001336 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001338 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001340 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001342 sk->sk_family = family;
1343 /*
1344 * See comment in struct sock definition to understand
1345 * why we need sk_prot_creator -acme
1346 */
1347 sk->sk_prot = sk->sk_prot_creator = prot;
1348 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001349 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001350 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001351
Zefan Li211d2f972013-04-08 20:03:35 +00001352 sock_update_classid(sk);
Zefan Li6ffd4642013-04-08 20:03:47 +00001353 sock_update_netprioidx(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 }
Frank Filza79af592005-09-27 15:23:38 -07001355
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001356 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357}
Eric Dumazet2a915252009-05-27 11:30:05 +00001358EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
Eric Dumazet2b85a342009-06-11 02:55:43 -07001360static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361{
1362 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
1364 if (sk->sk_destruct)
1365 sk->sk_destruct(sk);
1366
Paul E. McKenneya898def2010-02-22 17:04:49 -08001367 filter = rcu_dereference_check(sk->sk_filter,
1368 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001370 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001371 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 }
1373
Eric Dumazet08e29af2011-11-28 12:04:18 +00001374 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
1376 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001377 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1378 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001380 if (sk->sk_peer_cred)
1381 put_cred(sk->sk_peer_cred);
1382 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001383 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001384 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001386
1387void sk_free(struct sock *sk)
1388{
1389 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001390 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001391 * some packets are still in some tx queue.
1392 * If not null, sock_wfree() will call __sk_free(sk) later
1393 */
1394 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1395 __sk_free(sk);
1396}
Eric Dumazet2a915252009-05-27 11:30:05 +00001397EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Denis V. Lunevedf02082008-02-29 11:18:32 -08001399/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001400 * Last sock_put should drop reference to sk->sk_net. It has already
1401 * been dropped in sk_change_net. Taking reference to stopping namespace
Denis V. Lunevedf02082008-02-29 11:18:32 -08001402 * is not an option.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001403 * Take reference to a socket to remove it from hash _alive_ and after that
Denis V. Lunevedf02082008-02-29 11:18:32 -08001404 * destroy it in the context of init_net.
1405 */
1406void sk_release_kernel(struct sock *sk)
1407{
1408 if (sk == NULL || sk->sk_socket == NULL)
1409 return;
1410
1411 sock_hold(sk);
1412 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001413 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001414 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001415 sock_put(sk);
1416}
David S. Miller45af1752008-02-29 11:33:19 -08001417EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001418
Stephen Rothwell475f1b52012-01-09 16:33:16 +11001419static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1420{
1421 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1422 sock_update_memcg(newsk);
1423}
1424
Eric Dumazete56c57d2011-11-08 17:07:07 -05001425/**
1426 * sk_clone_lock - clone a socket, and lock its clone
1427 * @sk: the socket to clone
1428 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1429 *
1430 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1431 */
1432struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001433{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001434 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001435
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001436 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001437 if (newsk != NULL) {
1438 struct sk_filter *filter;
1439
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001440 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001441
1442 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001443 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001444 sk_node_init(&newsk->sk_node);
1445 sock_lock_init(newsk);
1446 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001447 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001448 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001449
1450 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001451 /*
1452 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1453 */
1454 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001455 atomic_set(&newsk->sk_omem_alloc, 0);
1456 skb_queue_head_init(&newsk->sk_receive_queue);
1457 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001458#ifdef CONFIG_NET_DMA
1459 skb_queue_head_init(&newsk->sk_async_wait_queue);
1460#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001461
Eric Dumazetb6c67122010-04-08 23:03:29 +00001462 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001463 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001464 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1465 af_callback_keys + newsk->sk_family,
1466 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001467
1468 newsk->sk_dst_cache = NULL;
1469 newsk->sk_wmem_queued = 0;
1470 newsk->sk_forward_alloc = 0;
1471 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001472 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1473
1474 sock_reset_flag(newsk, SOCK_DONE);
1475 skb_queue_head_init(&newsk->sk_error_queue);
1476
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001477 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001478 if (filter != NULL)
1479 sk_filter_charge(newsk, filter);
1480
1481 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1482 /* It is still raw copy of parent, so invalidate
1483 * destructor and make plain sk_free() */
1484 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001485 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001486 sk_free(newsk);
1487 newsk = NULL;
1488 goto out;
1489 }
1490
1491 newsk->sk_err = 0;
1492 newsk->sk_priority = 0;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001493 /*
1494 * Before updating sk_refcnt, we must commit prior changes to memory
1495 * (Documentation/RCU/rculist_nulls.txt for details)
1496 */
1497 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001498 atomic_set(&newsk->sk_refcnt, 2);
1499
1500 /*
1501 * Increment the counter in the same struct proto as the master
1502 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1503 * is the same as sk->sk_prot->socks, as this field was copied
1504 * with memcpy).
1505 *
1506 * This _changes_ the previous behaviour, where
1507 * tcp_create_openreq_child always was incrementing the
1508 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1509 * to be taken into account in all callers. -acme
1510 */
1511 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001512 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001513 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001514
Glauber Costaf3f511e2012-01-05 20:16:39 +00001515 sk_update_clone(sk, newsk);
1516
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001517 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001518 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001519
Eric Dumazet08e29af2011-11-28 12:04:18 +00001520 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001521 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001522 }
1523out:
1524 return newsk;
1525}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001526EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001527
Andi Kleen99580892007-04-20 17:12:43 -07001528void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1529{
1530 __sk_dst_set(sk, dst);
1531 sk->sk_route_caps = dst->dev->features;
1532 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001533 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001534 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001535 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001536 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001537 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001538 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001539 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001540 sk->sk_gso_max_size = dst->dev->gso_max_size;
Ben Hutchings14853482012-07-30 16:11:42 +00001541 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001542 }
Andi Kleen99580892007-04-20 17:12:43 -07001543 }
1544}
1545EXPORT_SYMBOL_GPL(sk_setup_caps);
1546
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547/*
1548 * Simple resource managers for sockets.
1549 */
1550
1551
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001552/*
1553 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 */
1555void sock_wfree(struct sk_buff *skb)
1556{
1557 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001558 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Eric Dumazetd99927f2009-09-24 10:49:24 +00001560 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1561 /*
1562 * Keep a reference on sk_wmem_alloc, this will be released
1563 * after sk_write_space() call
1564 */
1565 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001567 len = 1;
1568 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001569 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001570 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1571 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001572 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001573 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001574 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575}
Eric Dumazet2a915252009-05-27 11:30:05 +00001576EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001578/*
1579 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 */
1581void sock_rfree(struct sk_buff *skb)
1582{
1583 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001584 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585
Eric Dumazetd361fd52010-07-10 22:45:17 +00001586 atomic_sub(len, &sk->sk_rmem_alloc);
1587 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588}
Eric Dumazet2a915252009-05-27 11:30:05 +00001589EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
David S. Miller41063e92012-06-19 21:22:05 -07001591void sock_edemux(struct sk_buff *skb)
1592{
Eric Dumazete8123472012-09-02 23:57:18 +00001593 struct sock *sk = skb->sk;
1594
Randy Dunlap1c463e52012-09-10 09:13:07 -07001595#ifdef CONFIG_INET
Eric Dumazete8123472012-09-02 23:57:18 +00001596 if (sk->sk_state == TCP_TIME_WAIT)
1597 inet_twsk_put(inet_twsk(sk));
1598 else
Randy Dunlap1c463e52012-09-10 09:13:07 -07001599#endif
Eric Dumazete8123472012-09-02 23:57:18 +00001600 sock_put(sk);
David S. Miller41063e92012-06-19 21:22:05 -07001601}
1602EXPORT_SYMBOL(sock_edemux);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603
Eric W. Biederman976d02012012-05-23 17:16:53 -06001604kuid_t sock_i_uid(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605{
Eric W. Biederman976d02012012-05-23 17:16:53 -06001606 kuid_t uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
Eric Dumazetf064af12010-09-22 12:43:39 +00001608 read_lock_bh(&sk->sk_callback_lock);
Eric W. Biederman976d02012012-05-23 17:16:53 -06001609 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
Eric Dumazetf064af12010-09-22 12:43:39 +00001610 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 return uid;
1612}
Eric Dumazet2a915252009-05-27 11:30:05 +00001613EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
1615unsigned long sock_i_ino(struct sock *sk)
1616{
1617 unsigned long ino;
1618
Eric Dumazetf064af12010-09-22 12:43:39 +00001619 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001621 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 return ino;
1623}
Eric Dumazet2a915252009-05-27 11:30:05 +00001624EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
1626/*
1627 * Allocate a skb from the socket's send buffer.
1628 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001629struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001630 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631{
1632 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001633 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 if (skb) {
1635 skb_set_owner_w(skb, sk);
1636 return skb;
1637 }
1638 }
1639 return NULL;
1640}
Eric Dumazet2a915252009-05-27 11:30:05 +00001641EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
1643/*
1644 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001645 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001646struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001647 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648{
1649 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1650 struct sk_buff *skb = alloc_skb(size, priority);
1651 if (skb) {
1652 skb_set_owner_r(skb, sk);
1653 return skb;
1654 }
1655 }
1656 return NULL;
1657}
1658
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001659/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001661 */
Al Virodd0fc662005-10-07 07:46:04 +01001662void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663{
Eric Dumazet95c96172012-04-15 05:58:06 +00001664 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1666 void *mem;
1667 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001668 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 */
1670 atomic_add(size, &sk->sk_omem_alloc);
1671 mem = kmalloc(size, priority);
1672 if (mem)
1673 return mem;
1674 atomic_sub(size, &sk->sk_omem_alloc);
1675 }
1676 return NULL;
1677}
Eric Dumazet2a915252009-05-27 11:30:05 +00001678EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679
1680/*
1681 * Free an option memory block.
1682 */
1683void sock_kfree_s(struct sock *sk, void *mem, int size)
1684{
1685 kfree(mem);
1686 atomic_sub(size, &sk->sk_omem_alloc);
1687}
Eric Dumazet2a915252009-05-27 11:30:05 +00001688EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
1690/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1691 I think, these locks should be removed for datagram sockets.
1692 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001693static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694{
1695 DEFINE_WAIT(wait);
1696
1697 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1698 for (;;) {
1699 if (!timeo)
1700 break;
1701 if (signal_pending(current))
1702 break;
1703 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001704 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1706 break;
1707 if (sk->sk_shutdown & SEND_SHUTDOWN)
1708 break;
1709 if (sk->sk_err)
1710 break;
1711 timeo = schedule_timeout(timeo);
1712 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001713 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 return timeo;
1715}
1716
1717
1718/*
1719 * Generic send/receive buffer handlers
1720 */
1721
Herbert Xu4cc7f682009-02-04 16:55:54 -08001722struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1723 unsigned long data_len, int noblock,
1724 int *errcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725{
1726 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001727 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 long timeo;
1729 int err;
Jason Wangcc9b17a2012-05-30 21:18:10 +00001730 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1731
1732 err = -EMSGSIZE;
1733 if (npages > MAX_SKB_FRAGS)
1734 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
1736 gfp_mask = sk->sk_allocation;
1737 if (gfp_mask & __GFP_WAIT)
1738 gfp_mask |= __GFP_REPEAT;
1739
1740 timeo = sock_sndtimeo(sk, noblock);
1741 while (1) {
1742 err = sock_error(sk);
1743 if (err != 0)
1744 goto failure;
1745
1746 err = -EPIPE;
1747 if (sk->sk_shutdown & SEND_SHUTDOWN)
1748 goto failure;
1749
1750 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001751 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 int i;
1754
1755 /* No pages, we're done... */
1756 if (!data_len)
1757 break;
1758
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 skb->truesize += data_len;
1760 skb_shinfo(skb)->nr_frags = npages;
1761 for (i = 0; i < npages; i++) {
1762 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
1764 page = alloc_pages(sk->sk_allocation, 0);
1765 if (!page) {
1766 err = -ENOBUFS;
1767 skb_shinfo(skb)->nr_frags = i;
1768 kfree_skb(skb);
1769 goto failure;
1770 }
1771
Ian Campbellea2ab692011-08-22 23:44:58 +00001772 __skb_fill_page_desc(skb, i,
1773 page, 0,
1774 (data_len >= PAGE_SIZE ?
1775 PAGE_SIZE :
1776 data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 data_len -= PAGE_SIZE;
1778 }
1779
1780 /* Full success... */
1781 break;
1782 }
1783 err = -ENOBUFS;
1784 goto failure;
1785 }
1786 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1787 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1788 err = -EAGAIN;
1789 if (!timeo)
1790 goto failure;
1791 if (signal_pending(current))
1792 goto interrupted;
1793 timeo = sock_wait_for_wmem(sk, timeo);
1794 }
1795
1796 skb_set_owner_w(skb, sk);
1797 return skb;
1798
1799interrupted:
1800 err = sock_intr_errno(timeo);
1801failure:
1802 *errcode = err;
1803 return NULL;
1804}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001805EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001807struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 int noblock, int *errcode)
1809{
1810 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1811}
Eric Dumazet2a915252009-05-27 11:30:05 +00001812EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
Eric Dumazet5640f762012-09-23 23:04:42 +00001814/* On 32bit arches, an skb frag is limited to 2^15 */
1815#define SKB_FRAG_PAGE_ORDER get_order(32768)
1816
1817bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1818{
1819 int order;
1820
1821 if (pfrag->page) {
1822 if (atomic_read(&pfrag->page->_count) == 1) {
1823 pfrag->offset = 0;
1824 return true;
1825 }
1826 if (pfrag->offset < pfrag->size)
1827 return true;
1828 put_page(pfrag->page);
1829 }
1830
1831 /* We restrict high order allocations to users that can afford to wait */
1832 order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
1833
1834 do {
1835 gfp_t gfp = sk->sk_allocation;
1836
1837 if (order)
1838 gfp |= __GFP_COMP | __GFP_NOWARN;
1839 pfrag->page = alloc_pages(gfp, order);
1840 if (likely(pfrag->page)) {
1841 pfrag->offset = 0;
1842 pfrag->size = PAGE_SIZE << order;
1843 return true;
1844 }
1845 } while (--order >= 0);
1846
1847 sk_enter_memory_pressure(sk);
1848 sk_stream_moderate_sndbuf(sk);
1849 return false;
1850}
1851EXPORT_SYMBOL(sk_page_frag_refill);
1852
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001854 __releases(&sk->sk_lock.slock)
1855 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856{
1857 DEFINE_WAIT(wait);
1858
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001859 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1861 TASK_UNINTERRUPTIBLE);
1862 spin_unlock_bh(&sk->sk_lock.slock);
1863 schedule();
1864 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001865 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 break;
1867 }
1868 finish_wait(&sk->sk_lock.wq, &wait);
1869}
1870
1871static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001872 __releases(&sk->sk_lock.slock)
1873 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874{
1875 struct sk_buff *skb = sk->sk_backlog.head;
1876
1877 do {
1878 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1879 bh_unlock_sock(sk);
1880
1881 do {
1882 struct sk_buff *next = skb->next;
1883
Eric Dumazete4cbb022012-04-30 16:07:09 +00001884 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00001885 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001887 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
1889 /*
1890 * We are in process context here with softirqs
1891 * disabled, use cond_resched_softirq() to preempt.
1892 * This is safe to do because we've taken the backlog
1893 * queue private:
1894 */
1895 cond_resched_softirq();
1896
1897 skb = next;
1898 } while (skb != NULL);
1899
1900 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001901 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001902
1903 /*
1904 * Doing the zeroing here guarantee we can not loop forever
1905 * while a wild producer attempts to flood us.
1906 */
1907 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908}
1909
1910/**
1911 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001912 * @sk: sock to wait on
1913 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 *
1915 * Now socket state including sk->sk_err is changed only under lock,
1916 * hence we may omit checks after joining wait queue.
1917 * We check receive queue before schedule() only as optimization;
1918 * it is very likely that release_sock() added new data.
1919 */
1920int sk_wait_data(struct sock *sk, long *timeo)
1921{
1922 int rc;
1923 DEFINE_WAIT(wait);
1924
Eric Dumazetaa395142010-04-20 13:03:51 +00001925 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1927 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1928 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001929 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 return rc;
1931}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932EXPORT_SYMBOL(sk_wait_data);
1933
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001934/**
1935 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1936 * @sk: socket
1937 * @size: memory size to allocate
1938 * @kind: allocation type
1939 *
1940 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1941 * rmem allocation. This function assumes that protocols which have
1942 * memory_pressure use sk_wmem_queued as write buffer accounting.
1943 */
1944int __sk_mem_schedule(struct sock *sk, int size, int kind)
1945{
1946 struct proto *prot = sk->sk_prot;
1947 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00001948 long allocated;
Glauber Costae1aab162011-12-11 21:47:03 +00001949 int parent_status = UNDER_LIMIT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001950
1951 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001952
Glauber Costae1aab162011-12-11 21:47:03 +00001953 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001954
1955 /* Under limit. */
Glauber Costae1aab162011-12-11 21:47:03 +00001956 if (parent_status == UNDER_LIMIT &&
1957 allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00001958 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001959 return 1;
1960 }
1961
Glauber Costae1aab162011-12-11 21:47:03 +00001962 /* Under pressure. (we or our parents) */
1963 if ((parent_status > SOFT_LIMIT) ||
1964 allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00001965 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001966
Glauber Costae1aab162011-12-11 21:47:03 +00001967 /* Over hard limit (we or our parents) */
1968 if ((parent_status == OVER_LIMIT) ||
1969 (allocated > sk_prot_mem_limits(sk, 2)))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001970 goto suppress_allocation;
1971
1972 /* guarantee minimum buffer size under pressure */
1973 if (kind == SK_MEM_RECV) {
1974 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1975 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001976
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001977 } else { /* SK_MEM_SEND */
1978 if (sk->sk_type == SOCK_STREAM) {
1979 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1980 return 1;
1981 } else if (atomic_read(&sk->sk_wmem_alloc) <
1982 prot->sysctl_wmem[0])
1983 return 1;
1984 }
1985
Glauber Costa180d8cd2011-12-11 21:47:02 +00001986 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08001987 int alloc;
1988
Glauber Costa180d8cd2011-12-11 21:47:02 +00001989 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08001990 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001991 alloc = sk_sockets_allocated_read_positive(sk);
1992 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001993 sk_mem_pages(sk->sk_wmem_queued +
1994 atomic_read(&sk->sk_rmem_alloc) +
1995 sk->sk_forward_alloc))
1996 return 1;
1997 }
1998
1999suppress_allocation:
2000
2001 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2002 sk_stream_moderate_sndbuf(sk);
2003
2004 /* Fail only if socket is _under_ its sndbuf.
2005 * In this case we cannot block, so that we have to fail.
2006 */
2007 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2008 return 1;
2009 }
2010
Satoru Moriya3847ce32011-06-17 12:00:03 +00002011 trace_sock_exceed_buf_limit(sk, prot, allocated);
2012
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002013 /* Alas. Undo changes. */
2014 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002015
Glauber Costa0e90b312012-01-20 04:57:16 +00002016 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00002017
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002018 return 0;
2019}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002020EXPORT_SYMBOL(__sk_mem_schedule);
2021
2022/**
2023 * __sk_reclaim - reclaim memory_allocated
2024 * @sk: socket
2025 */
2026void __sk_mem_reclaim(struct sock *sk)
2027{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002028 sk_memory_allocated_sub(sk,
Glauber Costa0e90b312012-01-20 04:57:16 +00002029 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002030 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2031
Glauber Costa180d8cd2011-12-11 21:47:02 +00002032 if (sk_under_memory_pressure(sk) &&
2033 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2034 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002035}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002036EXPORT_SYMBOL(__sk_mem_reclaim);
2037
2038
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039/*
2040 * Set of default routines for initialising struct proto_ops when
2041 * the protocol does not support a particular function. In certain
2042 * cases where it makes no sense for a protocol to have a "do nothing"
2043 * function, some default processing is provided.
2044 */
2045
2046int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2047{
2048 return -EOPNOTSUPP;
2049}
Eric Dumazet2a915252009-05-27 11:30:05 +00002050EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002052int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 int len, int flags)
2054{
2055 return -EOPNOTSUPP;
2056}
Eric Dumazet2a915252009-05-27 11:30:05 +00002057EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058
2059int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2060{
2061 return -EOPNOTSUPP;
2062}
Eric Dumazet2a915252009-05-27 11:30:05 +00002063EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
2065int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2066{
2067 return -EOPNOTSUPP;
2068}
Eric Dumazet2a915252009-05-27 11:30:05 +00002069EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002071int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 int *len, int peer)
2073{
2074 return -EOPNOTSUPP;
2075}
Eric Dumazet2a915252009-05-27 11:30:05 +00002076EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
Eric Dumazet2a915252009-05-27 11:30:05 +00002078unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079{
2080 return 0;
2081}
Eric Dumazet2a915252009-05-27 11:30:05 +00002082EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083
2084int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2085{
2086 return -EOPNOTSUPP;
2087}
Eric Dumazet2a915252009-05-27 11:30:05 +00002088EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089
2090int sock_no_listen(struct socket *sock, int backlog)
2091{
2092 return -EOPNOTSUPP;
2093}
Eric Dumazet2a915252009-05-27 11:30:05 +00002094EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095
2096int sock_no_shutdown(struct socket *sock, int how)
2097{
2098 return -EOPNOTSUPP;
2099}
Eric Dumazet2a915252009-05-27 11:30:05 +00002100EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
2102int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002103 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104{
2105 return -EOPNOTSUPP;
2106}
Eric Dumazet2a915252009-05-27 11:30:05 +00002107EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
2109int sock_no_getsockopt(struct socket *sock, int level, int optname,
2110 char __user *optval, int __user *optlen)
2111{
2112 return -EOPNOTSUPP;
2113}
Eric Dumazet2a915252009-05-27 11:30:05 +00002114EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
2116int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2117 size_t len)
2118{
2119 return -EOPNOTSUPP;
2120}
Eric Dumazet2a915252009-05-27 11:30:05 +00002121EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
2123int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2124 size_t len, int flags)
2125{
2126 return -EOPNOTSUPP;
2127}
Eric Dumazet2a915252009-05-27 11:30:05 +00002128EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129
2130int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2131{
2132 /* Mirror missing mmap method error code */
2133 return -ENODEV;
2134}
Eric Dumazet2a915252009-05-27 11:30:05 +00002135EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136
2137ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2138{
2139 ssize_t res;
2140 struct msghdr msg = {.msg_flags = flags};
2141 struct kvec iov;
2142 char *kaddr = kmap(page);
2143 iov.iov_base = kaddr + offset;
2144 iov.iov_len = size;
2145 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2146 kunmap(page);
2147 return res;
2148}
Eric Dumazet2a915252009-05-27 11:30:05 +00002149EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150
2151/*
2152 * Default Socket Callbacks
2153 */
2154
2155static void sock_def_wakeup(struct sock *sk)
2156{
Eric Dumazet43815482010-04-29 11:01:49 +00002157 struct socket_wq *wq;
2158
2159 rcu_read_lock();
2160 wq = rcu_dereference(sk->sk_wq);
2161 if (wq_has_sleeper(wq))
2162 wake_up_interruptible_all(&wq->wait);
2163 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164}
2165
2166static void sock_def_error_report(struct sock *sk)
2167{
Eric Dumazet43815482010-04-29 11:01:49 +00002168 struct socket_wq *wq;
2169
2170 rcu_read_lock();
2171 wq = rcu_dereference(sk->sk_wq);
2172 if (wq_has_sleeper(wq))
2173 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002174 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002175 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176}
2177
2178static void sock_def_readable(struct sock *sk, int len)
2179{
Eric Dumazet43815482010-04-29 11:01:49 +00002180 struct socket_wq *wq;
2181
2182 rcu_read_lock();
2183 wq = rcu_dereference(sk->sk_wq);
2184 if (wq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002185 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002186 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002187 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002188 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189}
2190
2191static void sock_def_write_space(struct sock *sk)
2192{
Eric Dumazet43815482010-04-29 11:01:49 +00002193 struct socket_wq *wq;
2194
2195 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196
2197 /* Do not wake up a writer until he can make "significant"
2198 * progress. --DaveM
2199 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002200 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002201 wq = rcu_dereference(sk->sk_wq);
2202 if (wq_has_sleeper(wq))
2203 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002204 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
2206 /* Should agree with poll, otherwise some programs break */
2207 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002208 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 }
2210
Eric Dumazet43815482010-04-29 11:01:49 +00002211 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212}
2213
2214static void sock_def_destruct(struct sock *sk)
2215{
Jesper Juhla51482b2005-11-08 09:41:34 -08002216 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217}
2218
2219void sk_send_sigurg(struct sock *sk)
2220{
2221 if (sk->sk_socket && sk->sk_socket->file)
2222 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002223 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224}
Eric Dumazet2a915252009-05-27 11:30:05 +00002225EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226
2227void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2228 unsigned long expires)
2229{
2230 if (!mod_timer(timer, expires))
2231 sock_hold(sk);
2232}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233EXPORT_SYMBOL(sk_reset_timer);
2234
2235void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2236{
Ying Xue25cc4ae2013-02-03 20:32:57 +00002237 if (del_timer(timer))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 __sock_put(sk);
2239}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240EXPORT_SYMBOL(sk_stop_timer);
2241
2242void sock_init_data(struct socket *sock, struct sock *sk)
2243{
2244 skb_queue_head_init(&sk->sk_receive_queue);
2245 skb_queue_head_init(&sk->sk_write_queue);
2246 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07002247#ifdef CONFIG_NET_DMA
2248 skb_queue_head_init(&sk->sk_async_wait_queue);
2249#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
2251 sk->sk_send_head = NULL;
2252
2253 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002254
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 sk->sk_allocation = GFP_KERNEL;
2256 sk->sk_rcvbuf = sysctl_rmem_default;
2257 sk->sk_sndbuf = sysctl_wmem_default;
2258 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002259 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
2261 sock_set_flag(sk, SOCK_ZAPPED);
2262
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002263 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002265 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 sock->sk = sk;
2267 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002268 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269
Eric Dumazetb6c67122010-04-08 23:03:29 +00002270 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07002272 lockdep_set_class_and_name(&sk->sk_callback_lock,
2273 af_callback_keys + sk->sk_family,
2274 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275
2276 sk->sk_state_change = sock_def_wakeup;
2277 sk->sk_data_ready = sock_def_readable;
2278 sk->sk_write_space = sock_def_write_space;
2279 sk->sk_error_report = sock_def_error_report;
2280 sk->sk_destruct = sock_def_destruct;
2281
Eric Dumazet5640f762012-09-23 23:04:42 +00002282 sk->sk_frag.page = NULL;
2283 sk->sk_frag.offset = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002284 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002286 sk->sk_peer_pid = NULL;
2287 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 sk->sk_write_pending = 0;
2289 sk->sk_rcvlowat = 1;
2290 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2291 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2292
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002293 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294
Eliezer Tamir06021292013-06-10 11:39:50 +03002295#ifdef CONFIG_NET_LL_RX_POLL
2296 sk->sk_napi_id = 0;
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03002297 sk->sk_ll_usec = sysctl_net_busy_read;
Eliezer Tamir06021292013-06-10 11:39:50 +03002298#endif
2299
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002300 /*
2301 * Before updating sk_refcnt, we must commit prior changes to memory
2302 * (Documentation/RCU/rculist_nulls.txt for details)
2303 */
2304 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002306 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307}
Eric Dumazet2a915252009-05-27 11:30:05 +00002308EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002310void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311{
2312 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002313 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002314 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002316 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002317 spin_unlock(&sk->sk_lock.slock);
2318 /*
2319 * The sk_lock has mutex_lock() semantics here:
2320 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002321 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002322 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002324EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002326void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002328 /*
2329 * The sk_lock has mutex_unlock() semantics:
2330 */
2331 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2332
2333 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 if (sk->sk_backlog.tail)
2335 __release_sock(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002336
2337 if (sk->sk_prot->release_cb)
2338 sk->sk_prot->release_cb(sk);
2339
John Heffnerd2e91172007-09-12 10:44:19 +02002340 sk->sk_lock.owned = 0;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002341 if (waitqueue_active(&sk->sk_lock.wq))
2342 wake_up(&sk->sk_lock.wq);
2343 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344}
2345EXPORT_SYMBOL(release_sock);
2346
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002347/**
2348 * lock_sock_fast - fast version of lock_sock
2349 * @sk: socket
2350 *
2351 * This version should be used for very small section, where process wont block
2352 * return false if fast path is taken
2353 * sk_lock.slock locked, owned = 0, BH disabled
2354 * return true if slow path is taken
2355 * sk_lock.slock unlocked, owned = 1, BH enabled
2356 */
2357bool lock_sock_fast(struct sock *sk)
2358{
2359 might_sleep();
2360 spin_lock_bh(&sk->sk_lock.slock);
2361
2362 if (!sk->sk_lock.owned)
2363 /*
2364 * Note : We must disable BH
2365 */
2366 return false;
2367
2368 __lock_sock(sk);
2369 sk->sk_lock.owned = 1;
2370 spin_unlock(&sk->sk_lock.slock);
2371 /*
2372 * The sk_lock has mutex_lock() semantics here:
2373 */
2374 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2375 local_bh_enable();
2376 return true;
2377}
2378EXPORT_SYMBOL(lock_sock_fast);
2379
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002381{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002382 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002384 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002385 tv = ktime_to_timeval(sk->sk_stamp);
2386 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002388 if (tv.tv_sec == 0) {
2389 sk->sk_stamp = ktime_get_real();
2390 tv = ktime_to_timeval(sk->sk_stamp);
2391 }
2392 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002393}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394EXPORT_SYMBOL(sock_get_timestamp);
2395
Eric Dumazetae40eb12007-03-18 17:33:16 -07002396int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2397{
2398 struct timespec ts;
2399 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002400 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002401 ts = ktime_to_timespec(sk->sk_stamp);
2402 if (ts.tv_sec == -1)
2403 return -ENOENT;
2404 if (ts.tv_sec == 0) {
2405 sk->sk_stamp = ktime_get_real();
2406 ts = ktime_to_timespec(sk->sk_stamp);
2407 }
2408 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2409}
2410EXPORT_SYMBOL(sock_get_timestampns);
2411
Patrick Ohly20d49472009-02-12 05:03:38 +00002412void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002413{
Patrick Ohly20d49472009-02-12 05:03:38 +00002414 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002415 unsigned long previous_flags = sk->sk_flags;
2416
Patrick Ohly20d49472009-02-12 05:03:38 +00002417 sock_set_flag(sk, flag);
2418 /*
2419 * we just set one of the two flags which require net
2420 * time stamping, but time stamping might have been on
2421 * already because of the other one
2422 */
Eric Dumazet08e29af2011-11-28 12:04:18 +00002423 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002424 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 }
2426}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427
2428/*
2429 * Get a socket option on an socket.
2430 *
2431 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2432 * asynchronous errors should be reported by getsockopt. We assume
2433 * this means if you specify SO_ERROR (otherwise whats the point of it).
2434 */
2435int sock_common_getsockopt(struct socket *sock, int level, int optname,
2436 char __user *optval, int __user *optlen)
2437{
2438 struct sock *sk = sock->sk;
2439
2440 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2441}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442EXPORT_SYMBOL(sock_common_getsockopt);
2443
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002444#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002445int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2446 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002447{
2448 struct sock *sk = sock->sk;
2449
Johannes Berg1e51f952007-03-06 13:44:06 -08002450 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002451 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2452 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002453 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2454}
2455EXPORT_SYMBOL(compat_sock_common_getsockopt);
2456#endif
2457
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2459 struct msghdr *msg, size_t size, int flags)
2460{
2461 struct sock *sk = sock->sk;
2462 int addr_len = 0;
2463 int err;
2464
2465 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2466 flags & ~MSG_DONTWAIT, &addr_len);
2467 if (err >= 0)
2468 msg->msg_namelen = addr_len;
2469 return err;
2470}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471EXPORT_SYMBOL(sock_common_recvmsg);
2472
2473/*
2474 * Set socket options on an inet socket.
2475 */
2476int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002477 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478{
2479 struct sock *sk = sock->sk;
2480
2481 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2482}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483EXPORT_SYMBOL(sock_common_setsockopt);
2484
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002485#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002486int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002487 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002488{
2489 struct sock *sk = sock->sk;
2490
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002491 if (sk->sk_prot->compat_setsockopt != NULL)
2492 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2493 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002494 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2495}
2496EXPORT_SYMBOL(compat_sock_common_setsockopt);
2497#endif
2498
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499void sk_common_release(struct sock *sk)
2500{
2501 if (sk->sk_prot->destroy)
2502 sk->sk_prot->destroy(sk);
2503
2504 /*
2505 * Observation: when sock_common_release is called, processes have
2506 * no access to socket. But net still has.
2507 * Step one, detach it from networking:
2508 *
2509 * A. Remove from hash tables.
2510 */
2511
2512 sk->sk_prot->unhash(sk);
2513
2514 /*
2515 * In this point socket cannot receive new packets, but it is possible
2516 * that some packets are in flight because some CPU runs receiver and
2517 * did hash table lookup before we unhashed socket. They will achieve
2518 * receive queue and will be purged by socket destructor.
2519 *
2520 * Also we still have packets pending on receive queue and probably,
2521 * our own packets waiting in device queues. sock_destroy will drain
2522 * receive queue, but transmitted packets will delay socket destruction
2523 * until the last reference will be released.
2524 */
2525
2526 sock_orphan(sk);
2527
2528 xfrm_sk_free_policy(sk);
2529
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002530 sk_refcnt_debug_release(sk);
Eric Dumazet5640f762012-09-23 23:04:42 +00002531
2532 if (sk->sk_frag.page) {
2533 put_page(sk->sk_frag.page);
2534 sk->sk_frag.page = NULL;
2535 }
2536
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 sock_put(sk);
2538}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539EXPORT_SYMBOL(sk_common_release);
2540
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002541#ifdef CONFIG_PROC_FS
2542#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002543struct prot_inuse {
2544 int val[PROTO_INUSE_NR];
2545};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002546
2547static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002548
2549#ifdef CONFIG_NET_NS
2550void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2551{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002552 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002553}
2554EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2555
2556int sock_prot_inuse_get(struct net *net, struct proto *prot)
2557{
2558 int cpu, idx = prot->inuse_idx;
2559 int res = 0;
2560
2561 for_each_possible_cpu(cpu)
2562 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2563
2564 return res >= 0 ? res : 0;
2565}
2566EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2567
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002568static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002569{
2570 net->core.inuse = alloc_percpu(struct prot_inuse);
2571 return net->core.inuse ? 0 : -ENOMEM;
2572}
2573
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002574static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002575{
2576 free_percpu(net->core.inuse);
2577}
2578
2579static struct pernet_operations net_inuse_ops = {
2580 .init = sock_inuse_init_net,
2581 .exit = sock_inuse_exit_net,
2582};
2583
2584static __init int net_inuse_init(void)
2585{
2586 if (register_pernet_subsys(&net_inuse_ops))
2587 panic("Cannot initialize net inuse counters");
2588
2589 return 0;
2590}
2591
2592core_initcall(net_inuse_init);
2593#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002594static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2595
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002596void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002597{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002598 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002599}
2600EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2601
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002602int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002603{
2604 int cpu, idx = prot->inuse_idx;
2605 int res = 0;
2606
2607 for_each_possible_cpu(cpu)
2608 res += per_cpu(prot_inuse, cpu).val[idx];
2609
2610 return res >= 0 ? res : 0;
2611}
2612EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002613#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002614
2615static void assign_proto_idx(struct proto *prot)
2616{
2617 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2618
2619 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00002620 pr_err("PROTO_INUSE_NR exhausted\n");
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002621 return;
2622 }
2623
2624 set_bit(prot->inuse_idx, proto_inuse_idx);
2625}
2626
2627static void release_proto_idx(struct proto *prot)
2628{
2629 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2630 clear_bit(prot->inuse_idx, proto_inuse_idx);
2631}
2632#else
2633static inline void assign_proto_idx(struct proto *prot)
2634{
2635}
2636
2637static inline void release_proto_idx(struct proto *prot)
2638{
2639}
2640#endif
2641
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642int proto_register(struct proto *prot, int alloc_slab)
2643{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 if (alloc_slab) {
2645 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002646 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2647 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648
2649 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002650 pr_crit("%s: Can't create sock SLAB cache!\n",
2651 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002652 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002654
2655 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002656 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002657 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002658 goto out_free_sock_slab;
2659
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002660 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002661 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002662 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002663
2664 if (prot->rsk_prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002665 pr_crit("%s: Can't create request sock SLAB cache!\n",
2666 prot->name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002667 goto out_free_request_sock_slab_name;
2668 }
2669 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002670
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002671 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002672 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002673
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002674 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002675 goto out_free_request_sock_slab;
2676
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002677 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002678 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002679 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002680 0,
2681 SLAB_HWCACHE_ALIGN |
2682 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002683 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002684 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002685 goto out_free_timewait_sock_slab_name;
2686 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 }
2688
Glauber Costa36b77a52011-12-16 00:51:59 +00002689 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002691 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002692 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002693 return 0;
2694
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002695out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002696 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002697out_free_request_sock_slab:
2698 if (prot->rsk_prot && prot->rsk_prot->slab) {
2699 kmem_cache_destroy(prot->rsk_prot->slab);
2700 prot->rsk_prot->slab = NULL;
2701 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002702out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002703 if (prot->rsk_prot)
2704 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002705out_free_sock_slab:
2706 kmem_cache_destroy(prot->slab);
2707 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002708out:
2709 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711EXPORT_SYMBOL(proto_register);
2712
2713void proto_unregister(struct proto *prot)
2714{
Glauber Costa36b77a52011-12-16 00:51:59 +00002715 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002716 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002717 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002718 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719
2720 if (prot->slab != NULL) {
2721 kmem_cache_destroy(prot->slab);
2722 prot->slab = NULL;
2723 }
2724
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002725 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002726 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002727 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002728 prot->rsk_prot->slab = NULL;
2729 }
2730
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002731 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002732 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002733 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002734 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002735 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737EXPORT_SYMBOL(proto_unregister);
2738
2739#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002741 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742{
Glauber Costa36b77a52011-12-16 00:51:59 +00002743 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002744 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745}
2746
2747static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2748{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002749 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750}
2751
2752static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002753 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754{
Glauber Costa36b77a52011-12-16 00:51:59 +00002755 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756}
2757
2758static char proto_method_implemented(const void *method)
2759{
2760 return method == NULL ? 'n' : 'y';
2761}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002762static long sock_prot_memory_allocated(struct proto *proto)
2763{
Jeffrin Josecb75a362012-04-25 19:17:29 +05302764 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002765}
2766
2767static char *sock_prot_memory_pressure(struct proto *proto)
2768{
2769 return proto->memory_pressure != NULL ?
2770 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2771}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772
2773static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2774{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002775
Eric Dumazet8d987e52010-11-09 23:24:26 +00002776 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2778 proto->name,
2779 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002780 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00002781 sock_prot_memory_allocated(proto),
2782 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 proto->max_header,
2784 proto->slab == NULL ? "no" : "yes",
2785 module_name(proto->owner),
2786 proto_method_implemented(proto->close),
2787 proto_method_implemented(proto->connect),
2788 proto_method_implemented(proto->disconnect),
2789 proto_method_implemented(proto->accept),
2790 proto_method_implemented(proto->ioctl),
2791 proto_method_implemented(proto->init),
2792 proto_method_implemented(proto->destroy),
2793 proto_method_implemented(proto->shutdown),
2794 proto_method_implemented(proto->setsockopt),
2795 proto_method_implemented(proto->getsockopt),
2796 proto_method_implemented(proto->sendmsg),
2797 proto_method_implemented(proto->recvmsg),
2798 proto_method_implemented(proto->sendpage),
2799 proto_method_implemented(proto->bind),
2800 proto_method_implemented(proto->backlog_rcv),
2801 proto_method_implemented(proto->hash),
2802 proto_method_implemented(proto->unhash),
2803 proto_method_implemented(proto->get_port),
2804 proto_method_implemented(proto->enter_memory_pressure));
2805}
2806
2807static int proto_seq_show(struct seq_file *seq, void *v)
2808{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002809 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2811 "protocol",
2812 "size",
2813 "sockets",
2814 "memory",
2815 "press",
2816 "maxhdr",
2817 "slab",
2818 "module",
2819 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2820 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002821 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 return 0;
2823}
2824
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002825static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 .start = proto_seq_start,
2827 .next = proto_seq_next,
2828 .stop = proto_seq_stop,
2829 .show = proto_seq_show,
2830};
2831
2832static int proto_seq_open(struct inode *inode, struct file *file)
2833{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002834 return seq_open_net(inode, file, &proto_seq_ops,
2835 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836}
2837
Arjan van de Ven9a321442007-02-12 00:55:35 -08002838static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 .owner = THIS_MODULE,
2840 .open = proto_seq_open,
2841 .read = seq_read,
2842 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002843 .release = seq_release_net,
2844};
2845
2846static __net_init int proto_init_net(struct net *net)
2847{
Gao fengd4beaa62013-02-18 01:34:54 +00002848 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
Eric Dumazet14e943d2008-11-19 15:14:01 -08002849 return -ENOMEM;
2850
2851 return 0;
2852}
2853
2854static __net_exit void proto_exit_net(struct net *net)
2855{
Gao fengece31ff2013-02-18 01:34:56 +00002856 remove_proc_entry("protocols", net->proc_net);
Eric Dumazet14e943d2008-11-19 15:14:01 -08002857}
2858
2859
2860static __net_initdata struct pernet_operations proto_net_ops = {
2861 .init = proto_init_net,
2862 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863};
2864
2865static int __init proto_init(void)
2866{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002867 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868}
2869
2870subsys_initcall(proto_init);
2871
2872#endif /* PROC_FS */