blob: facffabd39609860b984abd196b7ec7b5aa328bf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Joe Perchese005d192012-05-16 19:58:40 +000092#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
Richard Cochrancb820f82013-07-19 19:40:09 +020096#include <linux/errqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <linux/types.h>
98#include <linux/socket.h>
99#include <linux/in.h>
100#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <linux/module.h>
102#include <linux/proc_fs.h>
103#include <linux/seq_file.h>
104#include <linux/sched.h>
105#include <linux/timer.h>
106#include <linux/string.h>
107#include <linux/sockios.h>
108#include <linux/net.h>
109#include <linux/mm.h>
110#include <linux/slab.h>
111#include <linux/interrupt.h>
112#include <linux/poll.h>
113#include <linux/tcp.h>
114#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400115#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000116#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100117#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800118#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400119#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123#include <linux/netdevice.h>
124#include <net/protocol.h>
125#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200126#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700127#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000129#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#include <net/xfrm.h>
131#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700132#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000133#include <net/netprio_cgroup.h>
Craig Gallekeb4cb002015-06-15 11:26:18 -0400134#include <linux/sock_diag.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
136#include <linux/filter.h>
Craig Gallek538950a2016-01-04 17:41:47 -0500137#include <net/sock_reuseport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Satoru Moriya3847ce32011-06-17 12:00:03 +0000139#include <trace/events/sock.h>
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#ifdef CONFIG_INET
142#include <net/tcp.h>
143#endif
144
Eliezer Tamir076bb0c2013-07-10 17:13:17 +0300145#include <net/busy_poll.h>
Eliezer Tamir06021292013-06-10 11:39:50 +0300146
Glauber Costa36b77a52011-12-16 00:51:59 +0000147static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000148static LIST_HEAD(proto_list);
149
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700150/**
151 * sk_ns_capable - General socket capability test
152 * @sk: Socket to use a capability on or through
153 * @user_ns: The user namespace of the capability to use
154 * @cap: The capability to use
155 *
156 * Test to see if the opener of the socket had when the socket was
157 * created and the current process has the capability @cap in the user
158 * namespace @user_ns.
159 */
160bool sk_ns_capable(const struct sock *sk,
161 struct user_namespace *user_ns, int cap)
162{
163 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
164 ns_capable(user_ns, cap);
165}
166EXPORT_SYMBOL(sk_ns_capable);
167
168/**
169 * sk_capable - Socket global capability test
170 * @sk: Socket to use a capability on or through
Masanari Iidae793c0f2014-09-04 23:44:36 +0900171 * @cap: The global capability to use
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700172 *
173 * Test to see if the opener of the socket had when the socket was
174 * created and the current process has the capability @cap in all user
175 * namespaces.
176 */
177bool sk_capable(const struct sock *sk, int cap)
178{
179 return sk_ns_capable(sk, &init_user_ns, cap);
180}
181EXPORT_SYMBOL(sk_capable);
182
183/**
184 * sk_net_capable - Network namespace socket capability test
185 * @sk: Socket to use a capability on or through
186 * @cap: The capability to use
187 *
Masanari Iidae793c0f2014-09-04 23:44:36 +0900188 * Test to see if the opener of the socket had when the socket was created
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700189 * and the current process has the capability @cap over the network namespace
190 * the socket is a member of.
191 */
192bool sk_net_capable(const struct sock *sk, int cap)
193{
194 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
195}
196EXPORT_SYMBOL(sk_net_capable);
197
Ingo Molnarda21f242006-07-03 00:25:12 -0700198/*
199 * Each address family might have different locking rules, so we have
200 * one slock key per address family:
201 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700202static struct lock_class_key af_family_keys[AF_MAX];
203static struct lock_class_key af_family_slock_keys[AF_MAX];
204
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700205/*
206 * Make lock validator output more readable. (we pre-construct these
207 * strings build-time, so that runtime initialization of socket
208 * locks is fast):
209 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700210static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700211 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
212 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
213 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
214 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
215 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
216 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
217 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800218 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700219 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800220 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700221 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700222 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800223 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Dexuan Cui0a1a37b2016-04-05 07:41:11 -0700224 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
Anna, Suman33364ee2017-01-09 21:48:56 -0600225 "sk_lock-AF_QIPCRTR", "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700226};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700227static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700228 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
229 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
230 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
231 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
232 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
233 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
234 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800235 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700236 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800237 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700238 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700239 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800240 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Dexuan Cui0a1a37b2016-04-05 07:41:11 -0700241 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
Anna, Suman33364ee2017-01-09 21:48:56 -0600242 "slock-AF_QIPCRTR", "slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700243};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700244static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700245 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
246 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
247 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
248 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
249 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
250 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
251 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800252 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700253 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700254 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700255 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700256 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800257 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Dexuan Cui0a1a37b2016-04-05 07:41:11 -0700258 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
Anna, Suman33364ee2017-01-09 21:48:56 -0600259 "clock-AF_QIPCRTR", "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700260};
Ingo Molnarda21f242006-07-03 00:25:12 -0700261
262/*
263 * sk_callback_lock locking rules are per-address-family,
264 * so split the lock classes by using a per-AF key:
265 */
266static struct lock_class_key af_callback_keys[AF_MAX];
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268/* Take into consideration the size of the struct sk_buff overhead in the
269 * determination of these values, since that is non-constant across
270 * platforms. This makes socket queueing behavior and performance
271 * not depend upon such differences.
272 */
273#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000274#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
276#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
277
278/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700279__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200280EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700281__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200282EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700283__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
284__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300286/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700287int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000288EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
Willem de Bruijnb245be12015-01-30 13:29:32 -0500290int sysctl_tstamp_allow_data __read_mostly = 1;
291
Mel Gormanc93bdd02012-07-31 16:44:19 -0700292struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
293EXPORT_SYMBOL_GPL(memalloc_socks);
294
Mel Gorman7cb02402012-07-31 16:44:16 -0700295/**
296 * sk_set_memalloc - sets %SOCK_MEMALLOC
297 * @sk: socket to set it on
298 *
299 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
300 * It's the responsibility of the admin to adjust min_free_kbytes
301 * to meet the requirements
302 */
303void sk_set_memalloc(struct sock *sk)
304{
305 sock_set_flag(sk, SOCK_MEMALLOC);
306 sk->sk_allocation |= __GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700307 static_key_slow_inc(&memalloc_socks);
Mel Gorman7cb02402012-07-31 16:44:16 -0700308}
309EXPORT_SYMBOL_GPL(sk_set_memalloc);
310
311void sk_clear_memalloc(struct sock *sk)
312{
313 sock_reset_flag(sk, SOCK_MEMALLOC);
314 sk->sk_allocation &= ~__GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700315 static_key_slow_dec(&memalloc_socks);
Mel Gormanc76562b2012-07-31 16:44:41 -0700316
317 /*
318 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
Mel Gorman5d753612015-06-10 21:02:04 -0400319 * progress of swapping. SOCK_MEMALLOC may be cleared while
320 * it has rmem allocations due to the last swapfile being deactivated
321 * but there is a risk that the socket is unusable due to exceeding
322 * the rmem limits. Reclaim the reserves and obey rmem limits again.
Mel Gormanc76562b2012-07-31 16:44:41 -0700323 */
Mel Gorman5d753612015-06-10 21:02:04 -0400324 sk_mem_reclaim(sk);
Mel Gorman7cb02402012-07-31 16:44:16 -0700325}
326EXPORT_SYMBOL_GPL(sk_clear_memalloc);
327
Mel Gormanb4b9e352012-07-31 16:44:26 -0700328int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
329{
330 int ret;
331 unsigned long pflags = current->flags;
332
333 /* these should have been dropped before queueing */
334 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
335
336 current->flags |= PF_MEMALLOC;
337 ret = sk->sk_backlog_rcv(sk, skb);
338 tsk_restore_flags(current, pflags, PF_MEMALLOC);
339
340 return ret;
341}
342EXPORT_SYMBOL(__sk_backlog_rcv);
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
345{
346 struct timeval tv;
347
348 if (optlen < sizeof(tv))
349 return -EINVAL;
350 if (copy_from_user(&tv, optval, sizeof(tv)))
351 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700352 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
353 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Vasily Averinba780732007-05-24 16:58:54 -0700355 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700356 static int warned __read_mostly;
357
Vasily Averinba780732007-05-24 16:58:54 -0700358 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700359 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700360 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000361 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
362 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700363 }
Vasily Averinba780732007-05-24 16:58:54 -0700364 return 0;
365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 *timeo_p = MAX_SCHEDULE_TIMEOUT;
367 if (tv.tv_sec == 0 && tv.tv_usec == 0)
368 return 0;
369 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
370 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
371 return 0;
372}
373
374static void sock_warn_obsolete_bsdism(const char *name)
375{
376 static int warned;
377 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900378 if (strcmp(warncomm, current->comm) && warned < 5) {
379 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000380 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
381 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 warned++;
383 }
384}
385
Hannes Frederic Sowa080a2702015-10-26 13:51:37 +0100386static bool sock_needs_netstamp(const struct sock *sk)
387{
388 switch (sk->sk_family) {
389 case AF_UNSPEC:
390 case AF_UNIX:
391 return false;
392 default:
393 return true;
394 }
395}
396
Eric Dumazet08e29af2011-11-28 12:04:18 +0000397static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900398{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000399 if (sk->sk_flags & flags) {
400 sk->sk_flags &= ~flags;
Hannes Frederic Sowa080a2702015-10-26 13:51:37 +0100401 if (sock_needs_netstamp(sk) &&
402 !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000403 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 }
405}
406
407
samanthakumare6afc8a2016-04-05 12:41:15 -0400408int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800409{
Neil Horman3b885782009-10-12 13:26:31 -0700410 unsigned long flags;
411 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800412
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000413 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700414 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000415 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700416 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800417 }
418
Mel Gormanc76562b2012-07-31 16:44:41 -0700419 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700420 atomic_inc(&sk->sk_drops);
421 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800422 }
423
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800424 skb->dev = NULL;
425 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800426
Eric Dumazet7fee2262010-05-11 23:19:48 +0000427 /* we escape from rcu protected region, make sure we dont leak
428 * a norefcounted dst
429 */
430 skb_dst_force(skb);
431
Neil Horman3b885782009-10-12 13:26:31 -0700432 spin_lock_irqsave(&list->lock, flags);
Eyal Birger3bc3b962015-03-01 14:58:30 +0200433 sock_skb_set_dropcount(sk, skb);
Neil Horman3b885782009-10-12 13:26:31 -0700434 __skb_queue_tail(list, skb);
435 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800436
437 if (!sock_flag(sk, SOCK_DEAD))
David S. Miller676d2362014-04-11 16:15:36 -0400438 sk->sk_data_ready(sk);
Eric Dumazet766e90372009-10-14 20:40:11 -0700439 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800440}
samanthakumare6afc8a2016-04-05 12:41:15 -0400441EXPORT_SYMBOL(__sock_queue_rcv_skb);
442
443int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
444{
445 int err;
446
447 err = sk_filter(sk, skb);
448 if (err)
449 return err;
450
451 return __sock_queue_rcv_skb(sk, skb);
452}
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800453EXPORT_SYMBOL(sock_queue_rcv_skb);
454
Willem de Bruijn4f0c40d92016-07-12 18:18:57 -0400455int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
Eric Dumazetc3f24cf2016-11-02 17:14:41 -0700456 const int nested, unsigned int trim_cap, bool refcounted)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800457{
458 int rc = NET_RX_SUCCESS;
459
Willem de Bruijn4f0c40d92016-07-12 18:18:57 -0400460 if (sk_filter_trim_cap(sk, skb, trim_cap))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800461 goto discard_and_relse;
462
463 skb->dev = NULL;
464
Sorin Dumitru274f4822014-07-22 21:16:51 +0300465 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700466 atomic_inc(&sk->sk_drops);
467 goto discard_and_relse;
468 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200469 if (nested)
470 bh_lock_sock_nested(sk);
471 else
472 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700473 if (!sock_owned_by_user(sk)) {
474 /*
475 * trylock + unlock semantics:
476 */
477 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
478
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700479 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700480
481 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Eric Dumazetf545a382012-04-22 23:34:26 +0000482 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000483 bh_unlock_sock(sk);
484 atomic_inc(&sk->sk_drops);
485 goto discard_and_relse;
486 }
487
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800488 bh_unlock_sock(sk);
489out:
Eric Dumazetc3f24cf2016-11-02 17:14:41 -0700490 if (refcounted)
491 sock_put(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800492 return rc;
493discard_and_relse:
494 kfree_skb(skb);
495 goto out;
496}
Willem de Bruijn4f0c40d92016-07-12 18:18:57 -0400497EXPORT_SYMBOL(__sk_receive_skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800498
499struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
500{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000501 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800502
503 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000504 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000505 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800506 dst_release(dst);
507 return NULL;
508 }
509
510 return dst;
511}
512EXPORT_SYMBOL(__sk_dst_check);
513
514struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
515{
516 struct dst_entry *dst = sk_dst_get(sk);
517
518 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
519 sk_dst_reset(sk);
520 dst_release(dst);
521 return NULL;
522 }
523
524 return dst;
525}
526EXPORT_SYMBOL(sk_dst_check);
527
Brian Haleyc91f6df2012-11-26 05:21:08 +0000528static int sock_setbindtodevice(struct sock *sk, char __user *optval,
529 int optlen)
David S. Miller48788092007-09-14 16:41:03 -0700530{
531 int ret = -ENOPROTOOPT;
532#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900533 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700534 char devname[IFNAMSIZ];
535 int index;
536
537 /* Sorry... */
538 ret = -EPERM;
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000539 if (!ns_capable(net->user_ns, CAP_NET_RAW))
David S. Miller48788092007-09-14 16:41:03 -0700540 goto out;
541
542 ret = -EINVAL;
543 if (optlen < 0)
544 goto out;
545
546 /* Bind this socket to a particular device like "eth0",
547 * as specified in the passed interface name. If the
548 * name is "" or the option length is zero the socket
549 * is not bound.
550 */
551 if (optlen > IFNAMSIZ - 1)
552 optlen = IFNAMSIZ - 1;
553 memset(devname, 0, sizeof(devname));
554
555 ret = -EFAULT;
556 if (copy_from_user(devname, optval, optlen))
557 goto out;
558
David S. Miller000ba2e2009-11-05 22:37:11 -0800559 index = 0;
560 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800561 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700562
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800563 rcu_read_lock();
564 dev = dev_get_by_name_rcu(net, devname);
565 if (dev)
566 index = dev->ifindex;
567 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700568 ret = -ENODEV;
569 if (!dev)
570 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700571 }
572
573 lock_sock(sk);
574 sk->sk_bound_dev_if = index;
575 sk_dst_reset(sk);
576 release_sock(sk);
577
578 ret = 0;
579
580out:
581#endif
582
583 return ret;
584}
585
Brian Haleyc91f6df2012-11-26 05:21:08 +0000586static int sock_getbindtodevice(struct sock *sk, char __user *optval,
587 int __user *optlen, int len)
588{
589 int ret = -ENOPROTOOPT;
590#ifdef CONFIG_NETDEVICES
591 struct net *net = sock_net(sk);
Brian Haleyc91f6df2012-11-26 05:21:08 +0000592 char devname[IFNAMSIZ];
Brian Haleyc91f6df2012-11-26 05:21:08 +0000593
594 if (sk->sk_bound_dev_if == 0) {
595 len = 0;
596 goto zero;
597 }
598
599 ret = -EINVAL;
600 if (len < IFNAMSIZ)
601 goto out;
602
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200603 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
604 if (ret)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000605 goto out;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000606
607 len = strlen(devname) + 1;
608
609 ret = -EFAULT;
610 if (copy_to_user(optval, devname, len))
611 goto out;
612
613zero:
614 ret = -EFAULT;
615 if (put_user(len, optlen))
616 goto out;
617
618 ret = 0;
619
620out:
621#endif
622
623 return ret;
624}
625
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800626static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
627{
628 if (valbool)
629 sock_set_flag(sk, bit);
630 else
631 sock_reset_flag(sk, bit);
632}
633
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +0200634bool sk_mc_loop(struct sock *sk)
635{
636 if (dev_recursion_level())
637 return false;
638 if (!sk)
639 return true;
640 switch (sk->sk_family) {
641 case AF_INET:
642 return inet_sk(sk)->mc_loop;
643#if IS_ENABLED(CONFIG_IPV6)
644 case AF_INET6:
645 return inet6_sk(sk)->mc_loop;
646#endif
647 }
648 WARN_ON(1);
649 return true;
650}
651EXPORT_SYMBOL(sk_mc_loop);
652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653/*
654 * This is meant for all protocols to use and covers goings on
655 * at the socket level. Everything here is generic.
656 */
657
658int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700659 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660{
Eric Dumazet2a915252009-05-27 11:30:05 +0000661 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 int val;
663 int valbool;
664 struct linger ling;
665 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 /*
668 * Options without arguments
669 */
670
David S. Miller48788092007-09-14 16:41:03 -0700671 if (optname == SO_BINDTODEVICE)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000672 return sock_setbindtodevice(sk, optval, optlen);
David S. Miller48788092007-09-14 16:41:03 -0700673
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700674 if (optlen < sizeof(int))
675 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900676
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 if (get_user(val, (int __user *)optval))
678 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900679
Eric Dumazet2a915252009-05-27 11:30:05 +0000680 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
682 lock_sock(sk);
683
Eric Dumazet2a915252009-05-27 11:30:05 +0000684 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700685 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000686 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700687 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000688 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800689 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700690 break;
691 case SO_REUSEADDR:
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000692 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700693 break;
Tom Herbert055dc212013-01-22 09:49:50 +0000694 case SO_REUSEPORT:
695 sk->sk_reuseport = valbool;
696 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700697 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000698 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000699 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700700 case SO_ERROR:
701 ret = -ENOPROTOOPT;
702 break;
703 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800704 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700705 break;
706 case SO_BROADCAST:
707 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
708 break;
709 case SO_SNDBUF:
710 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000711 * about it this is right. Otherwise apps have to
712 * play 'guess the biggest size' games. RCVBUF/SNDBUF
713 * are treated in BSD as hints
714 */
715 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700716set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700717 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazetb98b0bc2016-12-02 09:44:53 -0800718 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
Eric Dumazet82981932012-04-26 20:07:59 +0000719 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700720 sk->sk_write_space(sk);
721 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700723 case SO_SNDBUFFORCE:
724 if (!capable(CAP_NET_ADMIN)) {
725 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 break;
727 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700728 goto set_sndbuf;
729
730 case SO_RCVBUF:
731 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000732 * about it this is right. Otherwise apps have to
733 * play 'guess the biggest size' games. RCVBUF/SNDBUF
734 * are treated in BSD as hints
735 */
736 val = min_t(u32, val, sysctl_rmem_max);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700737set_rcvbuf:
738 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
739 /*
740 * We double it on the way in to account for
741 * "struct sk_buff" etc. overhead. Applications
742 * assume that the SO_RCVBUF setting they make will
743 * allow that much actual data to be received on that
744 * socket.
745 *
746 * Applications are unaware that "struct sk_buff" and
747 * other overheads allocate from the receive buffer
748 * during socket buffer allocation.
749 *
750 * And after considering the possible alternatives,
751 * returning the value we actually used in getsockopt
752 * is the most desirable behavior.
753 */
Eric Dumazetb98b0bc2016-12-02 09:44:53 -0800754 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700755 break;
756
757 case SO_RCVBUFFORCE:
758 if (!capable(CAP_NET_ADMIN)) {
759 ret = -EPERM;
760 break;
761 }
762 goto set_rcvbuf;
763
764 case SO_KEEPALIVE:
765#ifdef CONFIG_INET
Eric Dumazet3e109862012-09-24 07:00:11 +0000766 if (sk->sk_protocol == IPPROTO_TCP &&
767 sk->sk_type == SOCK_STREAM)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700768 tcp_set_keepalive(sk, valbool);
769#endif
770 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
771 break;
772
773 case SO_OOBINLINE:
774 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
775 break;
776
777 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -0700778 sk->sk_no_check_tx = valbool;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700779 break;
780
781 case SO_PRIORITY:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000782 if ((val >= 0 && val <= 6) ||
783 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700784 sk->sk_priority = val;
785 else
786 ret = -EPERM;
787 break;
788
789 case SO_LINGER:
790 if (optlen < sizeof(ling)) {
791 ret = -EINVAL; /* 1003.1g */
792 break;
793 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000794 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700795 ret = -EFAULT;
796 break;
797 }
798 if (!ling.l_onoff)
799 sock_reset_flag(sk, SOCK_LINGER);
800 else {
801#if (BITS_PER_LONG == 32)
802 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
803 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
804 else
805#endif
806 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
807 sock_set_flag(sk, SOCK_LINGER);
808 }
809 break;
810
811 case SO_BSDCOMPAT:
812 sock_warn_obsolete_bsdism("setsockopt");
813 break;
814
815 case SO_PASSCRED:
816 if (valbool)
817 set_bit(SOCK_PASSCRED, &sock->flags);
818 else
819 clear_bit(SOCK_PASSCRED, &sock->flags);
820 break;
821
822 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700823 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700824 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700825 if (optname == SO_TIMESTAMP)
826 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
827 else
828 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700829 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000830 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700831 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700832 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700833 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
834 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700835 break;
836
Patrick Ohly20d49472009-02-12 05:03:38 +0000837 case SO_TIMESTAMPING:
838 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000839 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000840 break;
841 }
Willem de Bruijnb245be12015-01-30 13:29:32 -0500842
Willem de Bruijn09c2d252014-08-04 22:11:47 -0400843 if (val & SOF_TIMESTAMPING_OPT_ID &&
Willem de Bruijn4ed2d762014-08-04 22:11:49 -0400844 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
WANG Congac5cc972015-12-16 23:39:04 -0800845 if (sk->sk_protocol == IPPROTO_TCP &&
846 sk->sk_type == SOCK_STREAM) {
Soheil Hassas Yeganeh6db8b962016-04-02 23:08:07 -0400847 if ((1 << sk->sk_state) &
848 (TCPF_CLOSE | TCPF_LISTEN)) {
Willem de Bruijn4ed2d762014-08-04 22:11:49 -0400849 ret = -EINVAL;
850 break;
851 }
852 sk->sk_tskey = tcp_sk(sk)->snd_una;
853 } else {
854 sk->sk_tskey = 0;
855 }
856 }
Willem de Bruijnb9f40e22014-08-04 22:11:46 -0400857 sk->sk_tsflags = val;
Patrick Ohly20d49472009-02-12 05:03:38 +0000858 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
859 sock_enable_timestamp(sk,
860 SOCK_TIMESTAMPING_RX_SOFTWARE);
861 else
862 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000863 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000864 break;
865
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700866 case SO_RCVLOWAT:
867 if (val < 0)
868 val = INT_MAX;
869 sk->sk_rcvlowat = val ? : 1;
870 break;
871
872 case SO_RCVTIMEO:
873 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
874 break;
875
876 case SO_SNDTIMEO:
877 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
878 break;
879
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700880 case SO_ATTACH_FILTER:
881 ret = -EINVAL;
882 if (optlen == sizeof(struct sock_fprog)) {
883 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700885 ret = -EFAULT;
886 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700889 ret = sk_attach_filter(&fprog, sk);
890 }
891 break;
892
Alexei Starovoitov89aa0752014-12-01 15:06:35 -0800893 case SO_ATTACH_BPF:
894 ret = -EINVAL;
895 if (optlen == sizeof(u32)) {
896 u32 ufd;
897
898 ret = -EFAULT;
899 if (copy_from_user(&ufd, optval, sizeof(ufd)))
900 break;
901
902 ret = sk_attach_bpf(ufd, sk);
903 }
904 break;
905
Craig Gallek538950a2016-01-04 17:41:47 -0500906 case SO_ATTACH_REUSEPORT_CBPF:
907 ret = -EINVAL;
908 if (optlen == sizeof(struct sock_fprog)) {
909 struct sock_fprog fprog;
910
911 ret = -EFAULT;
912 if (copy_from_user(&fprog, optval, sizeof(fprog)))
913 break;
914
915 ret = sk_reuseport_attach_filter(&fprog, sk);
916 }
917 break;
918
919 case SO_ATTACH_REUSEPORT_EBPF:
920 ret = -EINVAL;
921 if (optlen == sizeof(u32)) {
922 u32 ufd;
923
924 ret = -EFAULT;
925 if (copy_from_user(&ufd, optval, sizeof(ufd)))
926 break;
927
928 ret = sk_reuseport_attach_bpf(ufd, sk);
929 }
930 break;
931
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700932 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700933 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700934 break;
935
Vincent Bernatd59577b2013-01-16 22:55:49 +0100936 case SO_LOCK_FILTER:
937 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
938 ret = -EPERM;
939 else
940 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
941 break;
942
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700943 case SO_PASSSEC:
944 if (valbool)
945 set_bit(SOCK_PASSSEC, &sock->flags);
946 else
947 clear_bit(SOCK_PASSSEC, &sock->flags);
948 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800949 case SO_MARK:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000950 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800951 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000952 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800953 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800954 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700955
Neil Horman3b885782009-10-12 13:26:31 -0700956 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000957 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700958 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100959
960 case SO_WIFI_STATUS:
961 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
962 break;
963
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000964 case SO_PEEK_OFF:
965 if (sock->ops->set_peek_off)
Sasha Levin12663bf2013-12-07 17:26:27 -0500966 ret = sock->ops->set_peek_off(sk, val);
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000967 else
968 ret = -EOPNOTSUPP;
969 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000970
971 case SO_NOFCS:
972 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
973 break;
974
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +0000975 case SO_SELECT_ERR_QUEUE:
976 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
977 break;
978
Cong Wange0d10952013-08-01 11:10:25 +0800979#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +0300980 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +0300981 /* allow unprivileged users to decrease the value */
982 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
983 ret = -EPERM;
984 else {
985 if (val < 0)
986 ret = -EINVAL;
987 else
988 sk->sk_ll_usec = val;
989 }
990 break;
991#endif
Eric Dumazet62748f32013-09-24 08:20:52 -0700992
993 case SO_MAX_PACING_RATE:
994 sk->sk_max_pacing_rate = val;
995 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
996 sk->sk_max_pacing_rate);
997 break;
998
Eric Dumazet70da2682015-10-08 19:33:21 -0700999 case SO_INCOMING_CPU:
1000 sk->sk_incoming_cpu = val;
1001 break;
1002
Tom Herberta87cb3e2016-02-24 10:02:52 -08001003 case SO_CNX_ADVICE:
1004 if (val == 1)
1005 dst_negative_advice(sk);
1006 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001007 default:
1008 ret = -ENOPROTOOPT;
1009 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001010 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 release_sock(sk);
1012 return ret;
1013}
Eric Dumazet2a915252009-05-27 11:30:05 +00001014EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
1016
stephen hemminger8f098982014-01-03 09:17:14 -08001017static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1018 struct ucred *ucred)
Eric W. Biederman3f551f92010-06-13 03:28:59 +00001019{
1020 ucred->pid = pid_vnr(pid);
1021 ucred->uid = ucred->gid = -1;
1022 if (cred) {
1023 struct user_namespace *current_ns = current_user_ns();
1024
Eric W. Biedermanb2e4f542012-05-23 16:39:45 -06001025 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1026 ucred->gid = from_kgid_munged(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +00001027 }
1028}
1029
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030int sock_getsockopt(struct socket *sock, int level, int optname,
1031 char __user *optval, int __user *optlen)
1032{
1033 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001034
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001035 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001036 int val;
Chenbo Feng3fe100e2017-04-05 19:00:55 -07001037 u64 val64;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001038 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 struct timeval tm;
1040 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001041
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -08001042 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001044
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001045 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001046 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001047 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001049
Eugene Teo50fee1d2009-02-23 15:38:41 -08001050 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -08001051
Eric Dumazet2a915252009-05-27 11:30:05 +00001052 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001053 case SO_DEBUG:
1054 v.val = sock_flag(sk, SOCK_DBG);
1055 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001056
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001057 case SO_DONTROUTE:
1058 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1059 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001060
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001061 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001062 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001063 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001065 case SO_SNDBUF:
1066 v.val = sk->sk_sndbuf;
1067 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001068
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001069 case SO_RCVBUF:
1070 v.val = sk->sk_rcvbuf;
1071 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001073 case SO_REUSEADDR:
1074 v.val = sk->sk_reuse;
1075 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
Tom Herbert055dc212013-01-22 09:49:50 +00001077 case SO_REUSEPORT:
1078 v.val = sk->sk_reuseport;
1079 break;
1080
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001081 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001082 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001083 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001085 case SO_TYPE:
1086 v.val = sk->sk_type;
1087 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
Jan Engelhardt49c794e2009-08-04 07:28:28 +00001089 case SO_PROTOCOL:
1090 v.val = sk->sk_protocol;
1091 break;
1092
Jan Engelhardt0d6038e2009-08-04 07:28:29 +00001093 case SO_DOMAIN:
1094 v.val = sk->sk_family;
1095 break;
1096
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001097 case SO_ERROR:
1098 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +00001099 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001100 v.val = xchg(&sk->sk_err_soft, 0);
1101 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001103 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001104 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001105 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001106
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001107 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -07001108 v.val = sk->sk_no_check_tx;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001109 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001111 case SO_PRIORITY:
1112 v.val = sk->sk_priority;
1113 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001114
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001115 case SO_LINGER:
1116 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001117 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001118 v.ling.l_linger = sk->sk_lingertime / HZ;
1119 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001120
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001121 case SO_BSDCOMPAT:
1122 sock_warn_obsolete_bsdism("getsockopt");
1123 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001125 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -07001126 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1127 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1128 break;
1129
1130 case SO_TIMESTAMPNS:
1131 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001132 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
Patrick Ohly20d49472009-02-12 05:03:38 +00001134 case SO_TIMESTAMPING:
Willem de Bruijnb9f40e22014-08-04 22:11:46 -04001135 v.val = sk->sk_tsflags;
Patrick Ohly20d49472009-02-12 05:03:38 +00001136 break;
1137
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001138 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001139 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001140 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1141 v.tm.tv_sec = 0;
1142 v.tm.tv_usec = 0;
1143 } else {
1144 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1145 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001147 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001149 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001150 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001151 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1152 v.tm.tv_sec = 0;
1153 v.tm.tv_usec = 0;
1154 } else {
1155 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1156 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1157 }
1158 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001160 case SO_RCVLOWAT:
1161 v.val = sk->sk_rcvlowat;
1162 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001163
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001164 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +00001165 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001166 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001168 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +00001169 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001170 break;
1171
1172 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001173 {
1174 struct ucred peercred;
1175 if (len > sizeof(peercred))
1176 len = sizeof(peercred);
1177 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1178 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001179 return -EFAULT;
1180 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001181 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001182
1183 case SO_PEERNAME:
1184 {
1185 char address[128];
1186
1187 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1188 return -ENOTCONN;
1189 if (lv < len)
1190 return -EINVAL;
1191 if (copy_to_user(optval, address, len))
1192 return -EFAULT;
1193 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001195
1196 /* Dubious BSD thing... Probably nobody even uses it, but
1197 * the UNIX standard wants it for whatever reason... -DaveM
1198 */
1199 case SO_ACCEPTCONN:
1200 v.val = sk->sk_state == TCP_LISTEN;
1201 break;
1202
1203 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001204 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001205 break;
1206
1207 case SO_PEERSEC:
1208 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1209
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001210 case SO_MARK:
1211 v.val = sk->sk_mark;
1212 break;
1213
Neil Horman3b885782009-10-12 13:26:31 -07001214 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001215 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001216 break;
1217
Johannes Berg6e3e9392011-11-09 10:15:42 +01001218 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001219 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001220 break;
1221
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001222 case SO_PEEK_OFF:
1223 if (!sock->ops->set_peek_off)
1224 return -EOPNOTSUPP;
1225
1226 v.val = sk->sk_peek_off;
1227 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001228 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001229 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001230 break;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001231
Pavel Emelyanovf7b86bf2012-10-18 23:55:56 +00001232 case SO_BINDTODEVICE:
Brian Haleyc91f6df2012-11-26 05:21:08 +00001233 return sock_getbindtodevice(sk, optval, optlen, len);
1234
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001235 case SO_GET_FILTER:
1236 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1237 if (len < 0)
1238 return len;
1239
1240 goto lenout;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001241
Vincent Bernatd59577b2013-01-16 22:55:49 +01001242 case SO_LOCK_FILTER:
1243 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1244 break;
1245
Michal Sekletarea02f942014-01-17 17:09:45 +01001246 case SO_BPF_EXTENSIONS:
1247 v.val = bpf_tell_extensions();
1248 break;
1249
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00001250 case SO_SELECT_ERR_QUEUE:
1251 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1252 break;
1253
Cong Wange0d10952013-08-01 11:10:25 +08001254#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03001255 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +03001256 v.val = sk->sk_ll_usec;
1257 break;
1258#endif
1259
Eric Dumazet62748f32013-09-24 08:20:52 -07001260 case SO_MAX_PACING_RATE:
1261 v.val = sk->sk_max_pacing_rate;
1262 break;
1263
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001264 case SO_INCOMING_CPU:
1265 v.val = sk->sk_incoming_cpu;
1266 break;
1267
Chenbo Feng3fe100e2017-04-05 19:00:55 -07001268
1269 case SO_COOKIE:
1270 lv = sizeof(u64);
1271 if (len < lv)
1272 return -EINVAL;
1273 v.val64 = sock_gen_cookie(sk);
1274 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001275 default:
YOSHIFUJI Hideaki/吉藤英明443b5992015-03-23 18:04:13 +09001276 /* We implement the SO_SNDLOWAT etc to not be settable
1277 * (1003.1g 7).
1278 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001279 return -ENOPROTOOPT;
1280 }
1281
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 if (len > lv)
1283 len = lv;
1284 if (copy_to_user(optval, &v, len))
1285 return -EFAULT;
1286lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001287 if (put_user(len, optlen))
1288 return -EFAULT;
1289 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290}
1291
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001292/*
1293 * Initialize an sk_lock.
1294 *
1295 * (We also register the sk_lock with the lock validator.)
1296 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001297static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001298{
Peter Zijlstraed075362006-12-06 20:35:24 -08001299 sock_lock_init_class_and_name(sk,
1300 af_family_slock_key_strings[sk->sk_family],
1301 af_family_slock_keys + sk->sk_family,
1302 af_family_key_strings[sk->sk_family],
1303 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001304}
1305
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001306/*
1307 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1308 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001309 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001310 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001311static void sock_copy(struct sock *nsk, const struct sock *osk)
1312{
1313#ifdef CONFIG_SECURITY_NETWORK
1314 void *sptr = nsk->sk_security;
1315#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001316 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1317
1318 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1319 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1320
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001321#ifdef CONFIG_SECURITY_NETWORK
1322 nsk->sk_security = sptr;
1323 security_sk_clone(osk, nsk);
1324#endif
1325}
1326
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001327static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1328 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001329{
1330 struct sock *sk;
1331 struct kmem_cache *slab;
1332
1333 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001334 if (slab != NULL) {
1335 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1336 if (!sk)
1337 return sk;
Eric Dumazetba2489b2016-08-23 11:39:29 -07001338 if (priority & __GFP_ZERO)
1339 sk_prot_clear_nulls(sk, prot->obj_size);
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001340 } else
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001341 sk = kmalloc(prot->obj_size, priority);
1342
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001343 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001344 kmemcheck_annotate_bitfield(sk, flags);
1345
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001346 if (security_sk_alloc(sk, family, priority))
1347 goto out_free;
1348
1349 if (!try_module_get(prot->owner))
1350 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001351 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001352 }
1353
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001354 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001355
1356out_free_sec:
1357 security_sk_free(sk);
1358out_free:
1359 if (slab != NULL)
1360 kmem_cache_free(slab, sk);
1361 else
1362 kfree(sk);
1363 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001364}
1365
1366static void sk_prot_free(struct proto *prot, struct sock *sk)
1367{
1368 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001369 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001370
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001371 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001372 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001373
Tejun Heobd1060a2015-12-07 17:38:53 -05001374 cgroup_sk_free(&sk->sk_cgrp_data);
Johannes Weiner2d758072016-10-07 17:00:58 -07001375 mem_cgroup_sk_free(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001376 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001377 if (slab != NULL)
1378 kmem_cache_free(slab, sk);
1379 else
1380 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001381 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001382}
1383
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384/**
1385 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001386 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001387 * @family: protocol family
1388 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1389 * @prot: struct proto associated with this new sock instance
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001390 * @kern: is this to be a kernel socket?
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001392struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001393 struct proto *prot, int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001395 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001397 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001399 sk->sk_family = family;
1400 /*
1401 * See comment in struct sock definition to understand
1402 * why we need sk_prot_creator -acme
1403 */
1404 sk->sk_prot = sk->sk_prot_creator = prot;
1405 sock_lock_init(sk);
Eric W. Biederman26abe142015-05-08 21:10:31 -05001406 sk->sk_net_refcnt = kern ? 0 : 1;
1407 if (likely(sk->sk_net_refcnt))
1408 get_net(net);
1409 sock_net_set(sk, net);
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001410 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001411
Johannes Weiner2d758072016-10-07 17:00:58 -07001412 mem_cgroup_sk_alloc(sk);
Johannes Weinerd979a392016-09-19 14:44:38 -07001413 cgroup_sk_alloc(&sk->sk_cgrp_data);
Tejun Heo2a56a1f2015-12-07 17:38:52 -05001414 sock_update_classid(&sk->sk_cgrp_data);
1415 sock_update_netprioidx(&sk->sk_cgrp_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 }
Frank Filza79af592005-09-27 15:23:38 -07001417
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001418 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419}
Eric Dumazet2a915252009-05-27 11:30:05 +00001420EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
Eric Dumazeta4298e42016-04-01 08:52:12 -07001422/* Sockets having SOCK_RCU_FREE will call this function after one RCU
1423 * grace period. This is the case for UDP sockets and TCP listeners.
1424 */
1425static void __sk_destruct(struct rcu_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426{
Eric Dumazeta4298e42016-04-01 08:52:12 -07001427 struct sock *sk = container_of(head, struct sock, sk_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430 if (sk->sk_destruct)
1431 sk->sk_destruct(sk);
1432
Paul E. McKenneya898def2010-02-22 17:04:49 -08001433 filter = rcu_dereference_check(sk->sk_filter,
1434 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001436 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001437 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 }
Craig Gallek538950a2016-01-04 17:41:47 -05001439 if (rcu_access_pointer(sk->sk_reuseport_cb))
1440 reuseport_detach_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
Eric Dumazet08e29af2011-11-28 12:04:18 +00001442 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
1444 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001445 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1446 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
Eric Dumazete9c1b1a2017-03-15 13:21:28 -07001448 if (sk->sk_frag.page) {
1449 put_page(sk->sk_frag.page);
1450 sk->sk_frag.page = NULL;
1451 }
1452
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001453 if (sk->sk_peer_cred)
1454 put_cred(sk->sk_peer_cred);
1455 put_pid(sk->sk_peer_pid);
Eric W. Biederman26abe142015-05-08 21:10:31 -05001456 if (likely(sk->sk_net_refcnt))
1457 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001458 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001460
Eric Dumazeta4298e42016-04-01 08:52:12 -07001461void sk_destruct(struct sock *sk)
1462{
1463 if (sock_flag(sk, SOCK_RCU_FREE))
1464 call_rcu(&sk->sk_rcu, __sk_destruct);
1465 else
1466 __sk_destruct(&sk->sk_rcu);
1467}
1468
Craig Gallekeb4cb002015-06-15 11:26:18 -04001469static void __sk_free(struct sock *sk)
1470{
Craig Gallekb9226222015-06-30 12:49:32 -04001471 if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
Craig Gallekeb4cb002015-06-15 11:26:18 -04001472 sock_diag_broadcast_destroy(sk);
1473 else
1474 sk_destruct(sk);
1475}
1476
Eric Dumazet2b85a342009-06-11 02:55:43 -07001477void sk_free(struct sock *sk)
1478{
1479 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001480 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001481 * some packets are still in some tx queue.
1482 * If not null, sock_wfree() will call __sk_free(sk) later
1483 */
1484 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1485 __sk_free(sk);
1486}
Eric Dumazet2a915252009-05-27 11:30:05 +00001487EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
Eric Dumazete56c57d2011-11-08 17:07:07 -05001489/**
1490 * sk_clone_lock - clone a socket, and lock its clone
1491 * @sk: the socket to clone
1492 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1493 *
1494 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1495 */
1496struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001497{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001498 struct sock *newsk;
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001499 bool is_charged = true;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001500
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001501 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001502 if (newsk != NULL) {
1503 struct sk_filter *filter;
1504
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001505 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001506
1507 /* SANITY */
Sowmini Varadhan8a681732015-07-30 15:50:36 +02001508 if (likely(newsk->sk_net_refcnt))
1509 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001510 sk_node_init(&newsk->sk_node);
1511 sock_lock_init(newsk);
1512 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001513 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001514 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001515
1516 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001517 /*
1518 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1519 */
1520 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001521 atomic_set(&newsk->sk_omem_alloc, 0);
1522 skb_queue_head_init(&newsk->sk_receive_queue);
1523 skb_queue_head_init(&newsk->sk_write_queue);
1524
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001525 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001526 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1527 af_callback_keys + newsk->sk_family,
1528 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001529
1530 newsk->sk_dst_cache = NULL;
1531 newsk->sk_wmem_queued = 0;
1532 newsk->sk_forward_alloc = 0;
Eric Dumazet9caad862016-04-01 08:52:20 -07001533 atomic_set(&newsk->sk_drops, 0);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001534 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001535 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1536
1537 sock_reset_flag(newsk, SOCK_DONE);
1538 skb_queue_head_init(&newsk->sk_error_queue);
1539
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001540 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001541 if (filter != NULL)
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001542 /* though it's an empty new sock, the charging may fail
1543 * if sysctl_optmem_max was changed between creation of
1544 * original socket and cloning
1545 */
1546 is_charged = sk_filter_charge(newsk, filter);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001547
Eric Dumazetd188ba82015-12-08 07:22:02 -08001548 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
Daniel Borkmanna53ea602017-03-22 13:08:08 +01001549 /* We need to make sure that we don't uncharge the new
1550 * socket if we couldn't charge it in the first place
1551 * as otherwise we uncharge the parent's filter.
1552 */
1553 if (!is_charged)
1554 RCU_INIT_POINTER(newsk->sk_filter, NULL);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001555 /* It is still raw copy of parent, so invalidate
1556 * destructor and make plain sk_free() */
1557 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001558 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001559 sk_free(newsk);
1560 newsk = NULL;
1561 goto out;
1562 }
Craig Gallekfa463492016-02-10 11:50:39 -05001563 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001564
1565 newsk->sk_err = 0;
Eric Dumazete551c322016-10-28 13:40:24 -07001566 newsk->sk_err_soft = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001567 newsk->sk_priority = 0;
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001568 newsk->sk_incoming_cpu = raw_smp_processor_id();
Eric Dumazet33cf7c92015-03-11 18:53:14 -07001569 atomic64_set(&newsk->sk_cookie, 0);
Johannes Weinerd979a392016-09-19 14:44:38 -07001570
Johannes Weiner2d758072016-10-07 17:00:58 -07001571 mem_cgroup_sk_alloc(newsk);
Johannes Weinerd979a392016-09-19 14:44:38 -07001572 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1573
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001574 /*
1575 * Before updating sk_refcnt, we must commit prior changes to memory
1576 * (Documentation/RCU/rculist_nulls.txt for details)
1577 */
1578 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001579 atomic_set(&newsk->sk_refcnt, 2);
1580
1581 /*
1582 * Increment the counter in the same struct proto as the master
1583 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1584 * is the same as sk->sk_prot->socks, as this field was copied
1585 * with memcpy).
1586 *
1587 * This _changes_ the previous behaviour, where
1588 * tcp_create_openreq_child always was incrementing the
1589 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1590 * to be taken into account in all callers. -acme
1591 */
1592 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001593 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001594 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001595
1596 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001597 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001598
Hannes Frederic Sowa080a2702015-10-26 13:51:37 +01001599 if (sock_needs_netstamp(sk) &&
1600 newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001601 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001602 }
1603out:
1604 return newsk;
1605}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001606EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001607
Andi Kleen99580892007-04-20 17:12:43 -07001608void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1609{
Eric Dumazetd6a4e262015-05-26 08:55:28 -07001610 u32 max_segs = 1;
1611
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001612 sk_dst_set(sk, dst);
Andi Kleen99580892007-04-20 17:12:43 -07001613 sk->sk_route_caps = dst->dev->features;
1614 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001615 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001616 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001617 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001618 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001619 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001620 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001621 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001622 sk->sk_gso_max_size = dst->dev->gso_max_size;
Eric Dumazetd6a4e262015-05-26 08:55:28 -07001623 max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001624 }
Andi Kleen99580892007-04-20 17:12:43 -07001625 }
Eric Dumazetd6a4e262015-05-26 08:55:28 -07001626 sk->sk_gso_max_segs = max_segs;
Andi Kleen99580892007-04-20 17:12:43 -07001627}
1628EXPORT_SYMBOL_GPL(sk_setup_caps);
1629
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630/*
1631 * Simple resource managers for sockets.
1632 */
1633
1634
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001635/*
1636 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 */
1638void sock_wfree(struct sk_buff *skb)
1639{
1640 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001641 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
Eric Dumazetd99927f2009-09-24 10:49:24 +00001643 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1644 /*
1645 * Keep a reference on sk_wmem_alloc, this will be released
1646 * after sk_write_space() call
1647 */
1648 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001650 len = 1;
1651 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001652 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001653 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1654 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001655 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001656 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001657 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658}
Eric Dumazet2a915252009-05-27 11:30:05 +00001659EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
Eric Dumazet1d2077a2016-05-02 10:56:27 -07001661/* This variant of sock_wfree() is used by TCP,
1662 * since it sets SOCK_USE_WRITE_QUEUE.
1663 */
1664void __sock_wfree(struct sk_buff *skb)
1665{
1666 struct sock *sk = skb->sk;
1667
1668 if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
1669 __sk_free(sk);
1670}
1671
Eric Dumazet9e17f8a2015-11-01 15:36:55 -08001672void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1673{
1674 skb_orphan(skb);
1675 skb->sk = sk;
1676#ifdef CONFIG_INET
1677 if (unlikely(!sk_fullsock(sk))) {
1678 skb->destructor = sock_edemux;
1679 sock_hold(sk);
1680 return;
1681 }
1682#endif
1683 skb->destructor = sock_wfree;
1684 skb_set_hash_from_sk(skb, sk);
1685 /*
1686 * We used to take a refcount on sk, but following operation
1687 * is enough to guarantee sk_free() wont free this sock until
1688 * all in-flight packets are completed
1689 */
1690 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1691}
1692EXPORT_SYMBOL(skb_set_owner_w);
1693
Eric Dumazet1d2077a2016-05-02 10:56:27 -07001694/* This helper is used by netem, as it can hold packets in its
1695 * delay queue. We want to allow the owner socket to send more
1696 * packets, as if they were already TX completed by a typical driver.
1697 * But we also want to keep skb->sk set because some packet schedulers
1698 * rely on it (sch_fq for example). So we set skb->truesize to a small
1699 * amount (1) and decrease sk_wmem_alloc accordingly.
1700 */
Eric Dumazetf2f872f2013-07-30 17:55:08 -07001701void skb_orphan_partial(struct sk_buff *skb)
1702{
Eric Dumazet1d2077a2016-05-02 10:56:27 -07001703 /* If this skb is a TCP pure ACK or already went here,
1704 * we have nothing to do. 2 is already a very small truesize.
1705 */
1706 if (skb->truesize <= 2)
1707 return;
1708
Eric Dumazetf2f872f2013-07-30 17:55:08 -07001709 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1710 * so we do not completely orphan skb, but transfert all
1711 * accounted bytes but one, to avoid unexpected reorders.
1712 */
1713 if (skb->destructor == sock_wfree
1714#ifdef CONFIG_INET
1715 || skb->destructor == tcp_wfree
1716#endif
1717 ) {
1718 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1719 skb->truesize = 1;
1720 } else {
1721 skb_orphan(skb);
1722 }
1723}
1724EXPORT_SYMBOL(skb_orphan_partial);
1725
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001726/*
1727 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 */
1729void sock_rfree(struct sk_buff *skb)
1730{
1731 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001732 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
Eric Dumazetd361fd52010-07-10 22:45:17 +00001734 atomic_sub(len, &sk->sk_rmem_alloc);
1735 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736}
Eric Dumazet2a915252009-05-27 11:30:05 +00001737EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
Oliver Hartkopp7768eed2015-03-10 19:03:46 +01001739/*
1740 * Buffer destructor for skbs that are not used directly in read or write
1741 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1742 */
Alexander Duyck62bccb82014-09-04 13:31:35 -04001743void sock_efree(struct sk_buff *skb)
1744{
1745 sock_put(skb->sk);
1746}
1747EXPORT_SYMBOL(sock_efree);
1748
Eric W. Biederman976d02012012-05-23 17:16:53 -06001749kuid_t sock_i_uid(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750{
Eric W. Biederman976d02012012-05-23 17:16:53 -06001751 kuid_t uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
Eric Dumazetf064af12010-09-22 12:43:39 +00001753 read_lock_bh(&sk->sk_callback_lock);
Eric W. Biederman976d02012012-05-23 17:16:53 -06001754 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
Eric Dumazetf064af12010-09-22 12:43:39 +00001755 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 return uid;
1757}
Eric Dumazet2a915252009-05-27 11:30:05 +00001758EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759
1760unsigned long sock_i_ino(struct sock *sk)
1761{
1762 unsigned long ino;
1763
Eric Dumazetf064af12010-09-22 12:43:39 +00001764 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001766 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 return ino;
1768}
Eric Dumazet2a915252009-05-27 11:30:05 +00001769EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
1771/*
1772 * Allocate a skb from the socket's send buffer.
1773 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001774struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001775 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776{
1777 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001778 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 if (skb) {
1780 skb_set_owner_w(skb, sk);
1781 return skb;
1782 }
1783 }
1784 return NULL;
1785}
Eric Dumazet2a915252009-05-27 11:30:05 +00001786EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787
1788/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001790 */
Al Virodd0fc662005-10-07 07:46:04 +01001791void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792{
Eric Dumazet95c96172012-04-15 05:58:06 +00001793 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1795 void *mem;
1796 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001797 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 */
1799 atomic_add(size, &sk->sk_omem_alloc);
1800 mem = kmalloc(size, priority);
1801 if (mem)
1802 return mem;
1803 atomic_sub(size, &sk->sk_omem_alloc);
1804 }
1805 return NULL;
1806}
Eric Dumazet2a915252009-05-27 11:30:05 +00001807EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
Daniel Borkmann79e88652014-11-19 17:13:11 +01001809/* Free an option memory block. Note, we actually want the inline
1810 * here as this allows gcc to detect the nullify and fold away the
1811 * condition entirely.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 */
Daniel Borkmann79e88652014-11-19 17:13:11 +01001813static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
1814 const bool nullify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815{
David S. Millere53da5f2014-10-14 17:02:37 -04001816 if (WARN_ON_ONCE(!mem))
1817 return;
Daniel Borkmann79e88652014-11-19 17:13:11 +01001818 if (nullify)
1819 kzfree(mem);
1820 else
1821 kfree(mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 atomic_sub(size, &sk->sk_omem_alloc);
1823}
Daniel Borkmann79e88652014-11-19 17:13:11 +01001824
1825void sock_kfree_s(struct sock *sk, void *mem, int size)
1826{
1827 __sock_kfree_s(sk, mem, size, false);
1828}
Eric Dumazet2a915252009-05-27 11:30:05 +00001829EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830
Daniel Borkmann79e88652014-11-19 17:13:11 +01001831void sock_kzfree_s(struct sock *sk, void *mem, int size)
1832{
1833 __sock_kfree_s(sk, mem, size, true);
1834}
1835EXPORT_SYMBOL(sock_kzfree_s);
1836
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1838 I think, these locks should be removed for datagram sockets.
1839 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001840static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841{
1842 DEFINE_WAIT(wait);
1843
Eric Dumazet9cd3e072015-11-29 20:03:10 -08001844 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 for (;;) {
1846 if (!timeo)
1847 break;
1848 if (signal_pending(current))
1849 break;
1850 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001851 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1853 break;
1854 if (sk->sk_shutdown & SEND_SHUTDOWN)
1855 break;
1856 if (sk->sk_err)
1857 break;
1858 timeo = schedule_timeout(timeo);
1859 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001860 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 return timeo;
1862}
1863
1864
1865/*
1866 * Generic send/receive buffer handlers
1867 */
1868
Herbert Xu4cc7f682009-02-04 16:55:54 -08001869struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1870 unsigned long data_len, int noblock,
Eric Dumazet28d64272013-08-08 14:38:47 -07001871 int *errcode, int max_page_order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872{
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001873 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 long timeo;
1875 int err;
1876
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 timeo = sock_sndtimeo(sk, noblock);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001878 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 err = sock_error(sk);
1880 if (err != 0)
1881 goto failure;
1882
1883 err = -EPIPE;
1884 if (sk->sk_shutdown & SEND_SHUTDOWN)
1885 goto failure;
1886
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001887 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1888 break;
Eric Dumazet28d64272013-08-08 14:38:47 -07001889
Eric Dumazet9cd3e072015-11-29 20:03:10 -08001890 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001891 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1892 err = -EAGAIN;
1893 if (!timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 goto failure;
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001895 if (signal_pending(current))
1896 goto interrupted;
1897 timeo = sock_wait_for_wmem(sk, timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 }
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001899 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1900 errcode, sk->sk_allocation);
1901 if (skb)
1902 skb_set_owner_w(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 return skb;
1904
1905interrupted:
1906 err = sock_intr_errno(timeo);
1907failure:
1908 *errcode = err;
1909 return NULL;
1910}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001911EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001913struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 int noblock, int *errcode)
1915{
Eric Dumazet28d64272013-08-08 14:38:47 -07001916 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917}
Eric Dumazet2a915252009-05-27 11:30:05 +00001918EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919
Willem de Bruijn39771b12016-04-02 23:08:06 -04001920int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1921 struct sockcm_cookie *sockc)
1922{
Soheil Hassas Yeganeh3dd17e62016-04-02 23:08:09 -04001923 u32 tsflags;
1924
Willem de Bruijn39771b12016-04-02 23:08:06 -04001925 switch (cmsg->cmsg_type) {
1926 case SO_MARK:
1927 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1928 return -EPERM;
1929 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
1930 return -EINVAL;
1931 sockc->mark = *(u32 *)CMSG_DATA(cmsg);
1932 break;
Soheil Hassas Yeganeh3dd17e62016-04-02 23:08:09 -04001933 case SO_TIMESTAMPING:
1934 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
1935 return -EINVAL;
1936
1937 tsflags = *(u32 *)CMSG_DATA(cmsg);
1938 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
1939 return -EINVAL;
1940
1941 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
1942 sockc->tsflags |= tsflags;
1943 break;
Soheil Hassas Yeganeh779f1ed2016-07-11 16:51:26 -04001944 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
1945 case SCM_RIGHTS:
1946 case SCM_CREDENTIALS:
1947 break;
Willem de Bruijn39771b12016-04-02 23:08:06 -04001948 default:
1949 return -EINVAL;
1950 }
1951 return 0;
1952}
1953EXPORT_SYMBOL(__sock_cmsg_send);
1954
Edward Jeef28ea362015-10-08 14:56:48 -07001955int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1956 struct sockcm_cookie *sockc)
1957{
1958 struct cmsghdr *cmsg;
Willem de Bruijn39771b12016-04-02 23:08:06 -04001959 int ret;
Edward Jeef28ea362015-10-08 14:56:48 -07001960
1961 for_each_cmsghdr(cmsg, msg) {
1962 if (!CMSG_OK(msg, cmsg))
1963 return -EINVAL;
1964 if (cmsg->cmsg_level != SOL_SOCKET)
1965 continue;
Willem de Bruijn39771b12016-04-02 23:08:06 -04001966 ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
1967 if (ret)
1968 return ret;
Edward Jeef28ea362015-10-08 14:56:48 -07001969 }
1970 return 0;
1971}
1972EXPORT_SYMBOL(sock_cmsg_send);
1973
Eric Dumazet5640f762012-09-23 23:04:42 +00001974/* On 32bit arches, an skb frag is limited to 2^15 */
1975#define SKB_FRAG_PAGE_ORDER get_order(32768)
1976
Eric Dumazet400dfd32013-10-17 16:27:07 -07001977/**
1978 * skb_page_frag_refill - check that a page_frag contains enough room
1979 * @sz: minimum size of the fragment we want to get
1980 * @pfrag: pointer to page_frag
Eric Dumazet82d5e2b2014-09-08 04:00:00 -07001981 * @gfp: priority for memory allocation
Eric Dumazet400dfd32013-10-17 16:27:07 -07001982 *
1983 * Note: While this allocator tries to use high order pages, there is
1984 * no guarantee that allocations succeed. Therefore, @sz MUST be
1985 * less or equal than PAGE_SIZE.
1986 */
Eric Dumazetd9b29382014-08-27 20:49:34 -07001987bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
Eric Dumazet5640f762012-09-23 23:04:42 +00001988{
Eric Dumazet5640f762012-09-23 23:04:42 +00001989 if (pfrag->page) {
Joonsoo Kimfe896d12016-03-17 14:19:26 -07001990 if (page_ref_count(pfrag->page) == 1) {
Eric Dumazet5640f762012-09-23 23:04:42 +00001991 pfrag->offset = 0;
1992 return true;
1993 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07001994 if (pfrag->offset + sz <= pfrag->size)
Eric Dumazet5640f762012-09-23 23:04:42 +00001995 return true;
1996 put_page(pfrag->page);
1997 }
1998
Eric Dumazetd9b29382014-08-27 20:49:34 -07001999 pfrag->offset = 0;
2000 if (SKB_FRAG_PAGE_ORDER) {
Mel Gormand0164ad2015-11-06 16:28:21 -08002001 /* Avoid direct reclaim but allow kswapd to wake */
2002 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2003 __GFP_COMP | __GFP_NOWARN |
2004 __GFP_NORETRY,
Eric Dumazetd9b29382014-08-27 20:49:34 -07002005 SKB_FRAG_PAGE_ORDER);
Eric Dumazet5640f762012-09-23 23:04:42 +00002006 if (likely(pfrag->page)) {
Eric Dumazetd9b29382014-08-27 20:49:34 -07002007 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
Eric Dumazet5640f762012-09-23 23:04:42 +00002008 return true;
2009 }
Eric Dumazetd9b29382014-08-27 20:49:34 -07002010 }
2011 pfrag->page = alloc_page(gfp);
2012 if (likely(pfrag->page)) {
2013 pfrag->size = PAGE_SIZE;
2014 return true;
2015 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07002016 return false;
2017}
2018EXPORT_SYMBOL(skb_page_frag_refill);
2019
2020bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2021{
2022 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2023 return true;
2024
Eric Dumazet5640f762012-09-23 23:04:42 +00002025 sk_enter_memory_pressure(sk);
2026 sk_stream_moderate_sndbuf(sk);
2027 return false;
2028}
2029EXPORT_SYMBOL(sk_page_frag_refill);
2030
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00002032 __releases(&sk->sk_lock.slock)
2033 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034{
2035 DEFINE_WAIT(wait);
2036
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002037 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2039 TASK_UNINTERRUPTIBLE);
2040 spin_unlock_bh(&sk->sk_lock.slock);
2041 schedule();
2042 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002043 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 break;
2045 }
2046 finish_wait(&sk->sk_lock.wq, &wait);
2047}
2048
2049static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00002050 __releases(&sk->sk_lock.slock)
2051 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052{
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002053 struct sk_buff *skb, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002055 while ((skb = sk->sk_backlog.head) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002057
2058 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059
2060 do {
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002061 next = skb->next;
Eric Dumazete4cbb022012-04-30 16:07:09 +00002062 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00002063 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07002065 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002067 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
2069 skb = next;
2070 } while (skb != NULL);
2071
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002072 spin_lock_bh(&sk->sk_lock.slock);
2073 }
Zhu Yi8eae9392010-03-04 18:01:40 +00002074
2075 /*
2076 * Doing the zeroing here guarantee we can not loop forever
2077 * while a wild producer attempts to flood us.
2078 */
2079 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080}
2081
Eric Dumazetd41a69f2016-04-29 14:16:53 -07002082void __sk_flush_backlog(struct sock *sk)
2083{
2084 spin_lock_bh(&sk->sk_lock.slock);
2085 __release_sock(sk);
2086 spin_unlock_bh(&sk->sk_lock.slock);
2087}
2088
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089/**
2090 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07002091 * @sk: sock to wait on
2092 * @timeo: for how long
Sabrina Dubrocadfbafc92015-07-24 18:19:25 +02002093 * @skb: last skb seen on sk_receive_queue
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 *
2095 * Now socket state including sk->sk_err is changed only under lock,
2096 * hence we may omit checks after joining wait queue.
2097 * We check receive queue before schedule() only as optimization;
2098 * it is very likely that release_sock() added new data.
2099 */
Sabrina Dubrocadfbafc92015-07-24 18:19:25 +02002100int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101{
2102 int rc;
2103 DEFINE_WAIT(wait);
2104
Eric Dumazetaa395142010-04-20 13:03:51 +00002105 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Eric Dumazet9cd3e072015-11-29 20:03:10 -08002106 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
Sabrina Dubrocadfbafc92015-07-24 18:19:25 +02002107 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
Eric Dumazet9cd3e072015-11-29 20:03:10 -08002108 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
Eric Dumazetaa395142010-04-20 13:03:51 +00002109 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 return rc;
2111}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112EXPORT_SYMBOL(sk_wait_data);
2113
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002114/**
2115 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2116 * @sk: socket
2117 * @size: memory size to allocate
2118 * @kind: allocation type
2119 *
2120 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2121 * rmem allocation. This function assumes that protocols which have
2122 * memory_pressure use sk_wmem_queued as write buffer accounting.
2123 */
2124int __sk_mem_schedule(struct sock *sk, int size, int kind)
2125{
2126 struct proto *prot = sk->sk_prot;
2127 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00002128 long allocated;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002129
2130 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002131
Johannes Weinere8056052016-01-14 15:21:14 -08002132 allocated = sk_memory_allocated_add(sk, amt);
2133
Johannes Weinerbaac50b2016-01-14 15:21:17 -08002134 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
2135 !mem_cgroup_charge_skmem(sk->sk_memcg, amt))
Johannes Weinere8056052016-01-14 15:21:14 -08002136 goto suppress_allocation;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002137
2138 /* Under limit. */
Johannes Weinere8056052016-01-14 15:21:14 -08002139 if (allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00002140 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002141 return 1;
2142 }
2143
Johannes Weinere8056052016-01-14 15:21:14 -08002144 /* Under pressure. */
2145 if (allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00002146 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002147
Johannes Weinere8056052016-01-14 15:21:14 -08002148 /* Over hard limit. */
2149 if (allocated > sk_prot_mem_limits(sk, 2))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002150 goto suppress_allocation;
2151
2152 /* guarantee minimum buffer size under pressure */
2153 if (kind == SK_MEM_RECV) {
2154 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2155 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002156
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002157 } else { /* SK_MEM_SEND */
2158 if (sk->sk_type == SOCK_STREAM) {
2159 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2160 return 1;
2161 } else if (atomic_read(&sk->sk_wmem_alloc) <
2162 prot->sysctl_wmem[0])
2163 return 1;
2164 }
2165
Glauber Costa180d8cd2011-12-11 21:47:02 +00002166 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08002167 int alloc;
2168
Glauber Costa180d8cd2011-12-11 21:47:02 +00002169 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08002170 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002171 alloc = sk_sockets_allocated_read_positive(sk);
2172 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002173 sk_mem_pages(sk->sk_wmem_queued +
2174 atomic_read(&sk->sk_rmem_alloc) +
2175 sk->sk_forward_alloc))
2176 return 1;
2177 }
2178
2179suppress_allocation:
2180
2181 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2182 sk_stream_moderate_sndbuf(sk);
2183
2184 /* Fail only if socket is _under_ its sndbuf.
2185 * In this case we cannot block, so that we have to fail.
2186 */
2187 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2188 return 1;
2189 }
2190
Satoru Moriya3847ce32011-06-17 12:00:03 +00002191 trace_sock_exceed_buf_limit(sk, prot, allocated);
2192
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002193 /* Alas. Undo changes. */
2194 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002195
Glauber Costa0e90b312012-01-20 04:57:16 +00002196 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00002197
Johannes Weinerbaac50b2016-01-14 15:21:17 -08002198 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2199 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
Johannes Weinere8056052016-01-14 15:21:14 -08002200
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002201 return 0;
2202}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002203EXPORT_SYMBOL(__sk_mem_schedule);
2204
2205/**
Jean Sacren69dba9b2015-08-27 18:05:49 -06002206 * __sk_mem_reclaim - reclaim memory_allocated
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002207 * @sk: socket
Eric Dumazet1a24e042015-05-15 12:39:25 -07002208 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002209 */
Eric Dumazet1a24e042015-05-15 12:39:25 -07002210void __sk_mem_reclaim(struct sock *sk, int amount)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002211{
Eric Dumazet1a24e042015-05-15 12:39:25 -07002212 amount >>= SK_MEM_QUANTUM_SHIFT;
2213 sk_memory_allocated_sub(sk, amount);
2214 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002215
Johannes Weinerbaac50b2016-01-14 15:21:17 -08002216 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2217 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
Johannes Weinere8056052016-01-14 15:21:14 -08002218
Glauber Costa180d8cd2011-12-11 21:47:02 +00002219 if (sk_under_memory_pressure(sk) &&
2220 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2221 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002222}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002223EXPORT_SYMBOL(__sk_mem_reclaim);
2224
samanthakumar627d2d62016-04-05 12:41:16 -04002225int sk_set_peek_off(struct sock *sk, int val)
2226{
2227 if (val < 0)
2228 return -EINVAL;
2229
2230 sk->sk_peek_off = val;
2231 return 0;
2232}
2233EXPORT_SYMBOL_GPL(sk_set_peek_off);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002234
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235/*
2236 * Set of default routines for initialising struct proto_ops when
2237 * the protocol does not support a particular function. In certain
2238 * cases where it makes no sense for a protocol to have a "do nothing"
2239 * function, some default processing is provided.
2240 */
2241
2242int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2243{
2244 return -EOPNOTSUPP;
2245}
Eric Dumazet2a915252009-05-27 11:30:05 +00002246EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002248int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 int len, int flags)
2250{
2251 return -EOPNOTSUPP;
2252}
Eric Dumazet2a915252009-05-27 11:30:05 +00002253EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254
2255int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2256{
2257 return -EOPNOTSUPP;
2258}
Eric Dumazet2a915252009-05-27 11:30:05 +00002259EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
2261int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2262{
2263 return -EOPNOTSUPP;
2264}
Eric Dumazet2a915252009-05-27 11:30:05 +00002265EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002267int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 int *len, int peer)
2269{
2270 return -EOPNOTSUPP;
2271}
Eric Dumazet2a915252009-05-27 11:30:05 +00002272EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273
Eric Dumazet2a915252009-05-27 11:30:05 +00002274unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275{
2276 return 0;
2277}
Eric Dumazet2a915252009-05-27 11:30:05 +00002278EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
2280int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2281{
2282 return -EOPNOTSUPP;
2283}
Eric Dumazet2a915252009-05-27 11:30:05 +00002284EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285
2286int sock_no_listen(struct socket *sock, int backlog)
2287{
2288 return -EOPNOTSUPP;
2289}
Eric Dumazet2a915252009-05-27 11:30:05 +00002290EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291
2292int sock_no_shutdown(struct socket *sock, int how)
2293{
2294 return -EOPNOTSUPP;
2295}
Eric Dumazet2a915252009-05-27 11:30:05 +00002296EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
2298int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002299 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300{
2301 return -EOPNOTSUPP;
2302}
Eric Dumazet2a915252009-05-27 11:30:05 +00002303EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304
2305int sock_no_getsockopt(struct socket *sock, int level, int optname,
2306 char __user *optval, int __user *optlen)
2307{
2308 return -EOPNOTSUPP;
2309}
Eric Dumazet2a915252009-05-27 11:30:05 +00002310EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311
Ying Xue1b784142015-03-02 15:37:48 +08002312int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313{
2314 return -EOPNOTSUPP;
2315}
Eric Dumazet2a915252009-05-27 11:30:05 +00002316EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
Ying Xue1b784142015-03-02 15:37:48 +08002318int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2319 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320{
2321 return -EOPNOTSUPP;
2322}
Eric Dumazet2a915252009-05-27 11:30:05 +00002323EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324
2325int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2326{
2327 /* Mirror missing mmap method error code */
2328 return -ENODEV;
2329}
Eric Dumazet2a915252009-05-27 11:30:05 +00002330EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331
2332ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2333{
2334 ssize_t res;
2335 struct msghdr msg = {.msg_flags = flags};
2336 struct kvec iov;
2337 char *kaddr = kmap(page);
2338 iov.iov_base = kaddr + offset;
2339 iov.iov_len = size;
2340 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2341 kunmap(page);
2342 return res;
2343}
Eric Dumazet2a915252009-05-27 11:30:05 +00002344EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
2346/*
2347 * Default Socket Callbacks
2348 */
2349
2350static void sock_def_wakeup(struct sock *sk)
2351{
Eric Dumazet43815482010-04-29 11:01:49 +00002352 struct socket_wq *wq;
2353
2354 rcu_read_lock();
2355 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002356 if (skwq_has_sleeper(wq))
Eric Dumazet43815482010-04-29 11:01:49 +00002357 wake_up_interruptible_all(&wq->wait);
2358 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359}
2360
2361static void sock_def_error_report(struct sock *sk)
2362{
Eric Dumazet43815482010-04-29 11:01:49 +00002363 struct socket_wq *wq;
2364
2365 rcu_read_lock();
2366 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002367 if (skwq_has_sleeper(wq))
Eric Dumazet43815482010-04-29 11:01:49 +00002368 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002369 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002370 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371}
2372
David S. Miller676d2362014-04-11 16:15:36 -04002373static void sock_def_readable(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374{
Eric Dumazet43815482010-04-29 11:01:49 +00002375 struct socket_wq *wq;
2376
2377 rcu_read_lock();
2378 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002379 if (skwq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002380 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002381 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002382 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002383 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384}
2385
2386static void sock_def_write_space(struct sock *sk)
2387{
Eric Dumazet43815482010-04-29 11:01:49 +00002388 struct socket_wq *wq;
2389
2390 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391
2392 /* Do not wake up a writer until he can make "significant"
2393 * progress. --DaveM
2394 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002395 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002396 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002397 if (skwq_has_sleeper(wq))
Eric Dumazet43815482010-04-29 11:01:49 +00002398 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002399 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400
2401 /* Should agree with poll, otherwise some programs break */
2402 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002403 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 }
2405
Eric Dumazet43815482010-04-29 11:01:49 +00002406 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407}
2408
2409static void sock_def_destruct(struct sock *sk)
2410{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411}
2412
2413void sk_send_sigurg(struct sock *sk)
2414{
2415 if (sk->sk_socket && sk->sk_socket->file)
2416 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002417 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418}
Eric Dumazet2a915252009-05-27 11:30:05 +00002419EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
2421void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2422 unsigned long expires)
2423{
2424 if (!mod_timer(timer, expires))
2425 sock_hold(sk);
2426}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427EXPORT_SYMBOL(sk_reset_timer);
2428
2429void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2430{
Ying Xue25cc4ae2013-02-03 20:32:57 +00002431 if (del_timer(timer))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 __sock_put(sk);
2433}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434EXPORT_SYMBOL(sk_stop_timer);
2435
2436void sock_init_data(struct socket *sock, struct sock *sk)
2437{
2438 skb_queue_head_init(&sk->sk_receive_queue);
2439 skb_queue_head_init(&sk->sk_write_queue);
2440 skb_queue_head_init(&sk->sk_error_queue);
2441
2442 sk->sk_send_head = NULL;
2443
2444 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002445
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 sk->sk_allocation = GFP_KERNEL;
2447 sk->sk_rcvbuf = sysctl_rmem_default;
2448 sk->sk_sndbuf = sysctl_wmem_default;
2449 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002450 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451
2452 sock_set_flag(sk, SOCK_ZAPPED);
2453
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002454 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002456 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 sock->sk = sk;
Lorenzo Colitti81a15912016-11-04 02:23:41 +09002458 sk->sk_uid = SOCK_INODE(sock)->i_uid;
2459 } else {
Eric Dumazet43815482010-04-29 11:01:49 +00002460 sk->sk_wq = NULL;
Lorenzo Colitti81a15912016-11-04 02:23:41 +09002461 sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
2462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07002465 lockdep_set_class_and_name(&sk->sk_callback_lock,
2466 af_callback_keys + sk->sk_family,
2467 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468
2469 sk->sk_state_change = sock_def_wakeup;
2470 sk->sk_data_ready = sock_def_readable;
2471 sk->sk_write_space = sock_def_write_space;
2472 sk->sk_error_report = sock_def_error_report;
2473 sk->sk_destruct = sock_def_destruct;
2474
Eric Dumazet5640f762012-09-23 23:04:42 +00002475 sk->sk_frag.page = NULL;
2476 sk->sk_frag.offset = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002477 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002479 sk->sk_peer_pid = NULL;
2480 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 sk->sk_write_pending = 0;
2482 sk->sk_rcvlowat = 1;
2483 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2484 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2485
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002486 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487
Cong Wange0d10952013-08-01 11:10:25 +08002488#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir06021292013-06-10 11:39:50 +03002489 sk->sk_napi_id = 0;
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03002490 sk->sk_ll_usec = sysctl_net_busy_read;
Eliezer Tamir06021292013-06-10 11:39:50 +03002491#endif
2492
Eric Dumazet62748f32013-09-24 08:20:52 -07002493 sk->sk_max_pacing_rate = ~0U;
Eric Dumazet7eec4172013-10-08 15:16:00 -07002494 sk->sk_pacing_rate = ~0U;
Eric Dumazet70da2682015-10-08 19:33:21 -07002495 sk->sk_incoming_cpu = -1;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002496 /*
2497 * Before updating sk_refcnt, we must commit prior changes to memory
2498 * (Documentation/RCU/rculist_nulls.txt for details)
2499 */
2500 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002502 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503}
Eric Dumazet2a915252009-05-27 11:30:05 +00002504EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002506void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507{
2508 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002509 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002510 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002512 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002513 spin_unlock(&sk->sk_lock.slock);
2514 /*
2515 * The sk_lock has mutex_lock() semantics here:
2516 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002517 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002518 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002520EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002522void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002524 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 if (sk->sk_backlog.tail)
2526 __release_sock(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002527
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002528 /* Warning : release_cb() might need to release sk ownership,
2529 * ie call sock_release_ownership(sk) before us.
2530 */
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002531 if (sk->sk_prot->release_cb)
2532 sk->sk_prot->release_cb(sk);
2533
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002534 sock_release_ownership(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002535 if (waitqueue_active(&sk->sk_lock.wq))
2536 wake_up(&sk->sk_lock.wq);
2537 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538}
2539EXPORT_SYMBOL(release_sock);
2540
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002541/**
2542 * lock_sock_fast - fast version of lock_sock
2543 * @sk: socket
2544 *
2545 * This version should be used for very small section, where process wont block
2546 * return false if fast path is taken
2547 * sk_lock.slock locked, owned = 0, BH disabled
2548 * return true if slow path is taken
2549 * sk_lock.slock unlocked, owned = 1, BH enabled
2550 */
2551bool lock_sock_fast(struct sock *sk)
2552{
2553 might_sleep();
2554 spin_lock_bh(&sk->sk_lock.slock);
2555
2556 if (!sk->sk_lock.owned)
2557 /*
2558 * Note : We must disable BH
2559 */
2560 return false;
2561
2562 __lock_sock(sk);
2563 sk->sk_lock.owned = 1;
2564 spin_unlock(&sk->sk_lock.slock);
2565 /*
2566 * The sk_lock has mutex_lock() semantics here:
2567 */
2568 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2569 local_bh_enable();
2570 return true;
2571}
2572EXPORT_SYMBOL(lock_sock_fast);
2573
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002575{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002576 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002578 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002579 tv = ktime_to_timeval(sk->sk_stamp);
2580 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002582 if (tv.tv_sec == 0) {
2583 sk->sk_stamp = ktime_get_real();
2584 tv = ktime_to_timeval(sk->sk_stamp);
2585 }
2586 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002587}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588EXPORT_SYMBOL(sock_get_timestamp);
2589
Eric Dumazetae40eb12007-03-18 17:33:16 -07002590int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2591{
2592 struct timespec ts;
2593 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002594 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002595 ts = ktime_to_timespec(sk->sk_stamp);
2596 if (ts.tv_sec == -1)
2597 return -ENOENT;
2598 if (ts.tv_sec == 0) {
2599 sk->sk_stamp = ktime_get_real();
2600 ts = ktime_to_timespec(sk->sk_stamp);
2601 }
2602 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2603}
2604EXPORT_SYMBOL(sock_get_timestampns);
2605
Patrick Ohly20d49472009-02-12 05:03:38 +00002606void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002607{
Patrick Ohly20d49472009-02-12 05:03:38 +00002608 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002609 unsigned long previous_flags = sk->sk_flags;
2610
Patrick Ohly20d49472009-02-12 05:03:38 +00002611 sock_set_flag(sk, flag);
2612 /*
2613 * we just set one of the two flags which require net
2614 * time stamping, but time stamping might have been on
2615 * already because of the other one
2616 */
Hannes Frederic Sowa080a2702015-10-26 13:51:37 +01002617 if (sock_needs_netstamp(sk) &&
2618 !(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002619 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 }
2621}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622
Richard Cochrancb820f82013-07-19 19:40:09 +02002623int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2624 int level, int type)
2625{
2626 struct sock_exterr_skb *serr;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002627 struct sk_buff *skb;
Richard Cochrancb820f82013-07-19 19:40:09 +02002628 int copied, err;
2629
2630 err = -EAGAIN;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002631 skb = sock_dequeue_err_skb(sk);
Richard Cochrancb820f82013-07-19 19:40:09 +02002632 if (skb == NULL)
2633 goto out;
2634
2635 copied = skb->len;
2636 if (copied > len) {
2637 msg->msg_flags |= MSG_TRUNC;
2638 copied = len;
2639 }
David S. Miller51f3d022014-11-05 16:46:40 -05002640 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Richard Cochrancb820f82013-07-19 19:40:09 +02002641 if (err)
2642 goto out_free_skb;
2643
2644 sock_recv_timestamp(msg, sk, skb);
2645
2646 serr = SKB_EXT_ERR(skb);
2647 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2648
2649 msg->msg_flags |= MSG_ERRQUEUE;
2650 err = copied;
2651
Richard Cochrancb820f82013-07-19 19:40:09 +02002652out_free_skb:
2653 kfree_skb(skb);
2654out:
2655 return err;
2656}
2657EXPORT_SYMBOL(sock_recv_errqueue);
2658
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659/*
2660 * Get a socket option on an socket.
2661 *
2662 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2663 * asynchronous errors should be reported by getsockopt. We assume
2664 * this means if you specify SO_ERROR (otherwise whats the point of it).
2665 */
2666int sock_common_getsockopt(struct socket *sock, int level, int optname,
2667 char __user *optval, int __user *optlen)
2668{
2669 struct sock *sk = sock->sk;
2670
2671 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2672}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673EXPORT_SYMBOL(sock_common_getsockopt);
2674
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002675#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002676int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2677 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002678{
2679 struct sock *sk = sock->sk;
2680
Johannes Berg1e51f952007-03-06 13:44:06 -08002681 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002682 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2683 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002684 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2685}
2686EXPORT_SYMBOL(compat_sock_common_getsockopt);
2687#endif
2688
Ying Xue1b784142015-03-02 15:37:48 +08002689int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2690 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691{
2692 struct sock *sk = sock->sk;
2693 int addr_len = 0;
2694 int err;
2695
Ying Xue1b784142015-03-02 15:37:48 +08002696 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 flags & ~MSG_DONTWAIT, &addr_len);
2698 if (err >= 0)
2699 msg->msg_namelen = addr_len;
2700 return err;
2701}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702EXPORT_SYMBOL(sock_common_recvmsg);
2703
2704/*
2705 * Set socket options on an inet socket.
2706 */
2707int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002708 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709{
2710 struct sock *sk = sock->sk;
2711
2712 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2713}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714EXPORT_SYMBOL(sock_common_setsockopt);
2715
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002716#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002717int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002718 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002719{
2720 struct sock *sk = sock->sk;
2721
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002722 if (sk->sk_prot->compat_setsockopt != NULL)
2723 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2724 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002725 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2726}
2727EXPORT_SYMBOL(compat_sock_common_setsockopt);
2728#endif
2729
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730void sk_common_release(struct sock *sk)
2731{
2732 if (sk->sk_prot->destroy)
2733 sk->sk_prot->destroy(sk);
2734
2735 /*
2736 * Observation: when sock_common_release is called, processes have
2737 * no access to socket. But net still has.
2738 * Step one, detach it from networking:
2739 *
2740 * A. Remove from hash tables.
2741 */
2742
2743 sk->sk_prot->unhash(sk);
2744
2745 /*
2746 * In this point socket cannot receive new packets, but it is possible
2747 * that some packets are in flight because some CPU runs receiver and
2748 * did hash table lookup before we unhashed socket. They will achieve
2749 * receive queue and will be purged by socket destructor.
2750 *
2751 * Also we still have packets pending on receive queue and probably,
2752 * our own packets waiting in device queues. sock_destroy will drain
2753 * receive queue, but transmitted packets will delay socket destruction
2754 * until the last reference will be released.
2755 */
2756
2757 sock_orphan(sk);
2758
2759 xfrm_sk_free_policy(sk);
2760
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002761 sk_refcnt_debug_release(sk);
Eric Dumazet5640f762012-09-23 23:04:42 +00002762
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 sock_put(sk);
2764}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765EXPORT_SYMBOL(sk_common_release);
2766
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002767#ifdef CONFIG_PROC_FS
2768#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002769struct prot_inuse {
2770 int val[PROTO_INUSE_NR];
2771};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002772
2773static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002774
2775#ifdef CONFIG_NET_NS
2776void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2777{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002778 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002779}
2780EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2781
2782int sock_prot_inuse_get(struct net *net, struct proto *prot)
2783{
2784 int cpu, idx = prot->inuse_idx;
2785 int res = 0;
2786
2787 for_each_possible_cpu(cpu)
2788 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2789
2790 return res >= 0 ? res : 0;
2791}
2792EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2793
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002794static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002795{
2796 net->core.inuse = alloc_percpu(struct prot_inuse);
2797 return net->core.inuse ? 0 : -ENOMEM;
2798}
2799
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002800static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002801{
2802 free_percpu(net->core.inuse);
2803}
2804
2805static struct pernet_operations net_inuse_ops = {
2806 .init = sock_inuse_init_net,
2807 .exit = sock_inuse_exit_net,
2808};
2809
2810static __init int net_inuse_init(void)
2811{
2812 if (register_pernet_subsys(&net_inuse_ops))
2813 panic("Cannot initialize net inuse counters");
2814
2815 return 0;
2816}
2817
2818core_initcall(net_inuse_init);
2819#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002820static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2821
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002822void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002823{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002824 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002825}
2826EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2827
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002828int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002829{
2830 int cpu, idx = prot->inuse_idx;
2831 int res = 0;
2832
2833 for_each_possible_cpu(cpu)
2834 res += per_cpu(prot_inuse, cpu).val[idx];
2835
2836 return res >= 0 ? res : 0;
2837}
2838EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002839#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002840
2841static void assign_proto_idx(struct proto *prot)
2842{
2843 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2844
2845 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00002846 pr_err("PROTO_INUSE_NR exhausted\n");
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002847 return;
2848 }
2849
2850 set_bit(prot->inuse_idx, proto_inuse_idx);
2851}
2852
2853static void release_proto_idx(struct proto *prot)
2854{
2855 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2856 clear_bit(prot->inuse_idx, proto_inuse_idx);
2857}
2858#else
2859static inline void assign_proto_idx(struct proto *prot)
2860{
2861}
2862
2863static inline void release_proto_idx(struct proto *prot)
2864{
2865}
2866#endif
2867
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002868static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
2869{
2870 if (!rsk_prot)
2871 return;
2872 kfree(rsk_prot->slab_name);
2873 rsk_prot->slab_name = NULL;
Julia Lawalladf78ed2015-09-13 14:15:18 +02002874 kmem_cache_destroy(rsk_prot->slab);
2875 rsk_prot->slab = NULL;
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002876}
2877
2878static int req_prot_init(const struct proto *prot)
2879{
2880 struct request_sock_ops *rsk_prot = prot->rsk_prot;
2881
2882 if (!rsk_prot)
2883 return 0;
2884
2885 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
2886 prot->name);
2887 if (!rsk_prot->slab_name)
2888 return -ENOMEM;
2889
2890 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
2891 rsk_prot->obj_size, 0,
Eric Dumazete96f78a2015-10-03 06:27:28 -07002892 prot->slab_flags, NULL);
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002893
2894 if (!rsk_prot->slab) {
2895 pr_crit("%s: Can't create request sock SLAB cache!\n",
2896 prot->name);
2897 return -ENOMEM;
2898 }
2899 return 0;
2900}
2901
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902int proto_register(struct proto *prot, int alloc_slab)
2903{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 if (alloc_slab) {
2905 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002906 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2907 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908
2909 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002910 pr_crit("%s: Can't create sock SLAB cache!\n",
2911 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002912 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002914
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002915 if (req_prot_init(prot))
2916 goto out_free_request_sock_slab;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002917
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002918 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002919 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002920
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002921 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002922 goto out_free_request_sock_slab;
2923
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002924 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002925 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002926 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002927 0,
Eric Dumazet52db70d2015-04-10 06:07:18 -07002928 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002929 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002930 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002931 goto out_free_timewait_sock_slab_name;
2932 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 }
2934
Glauber Costa36b77a52011-12-16 00:51:59 +00002935 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002937 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002938 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002939 return 0;
2940
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002941out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002942 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002943out_free_request_sock_slab:
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002944 req_prot_cleanup(prot->rsk_prot);
2945
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002946 kmem_cache_destroy(prot->slab);
2947 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002948out:
2949 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951EXPORT_SYMBOL(proto_register);
2952
2953void proto_unregister(struct proto *prot)
2954{
Glauber Costa36b77a52011-12-16 00:51:59 +00002955 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002956 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002957 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002958 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959
Julia Lawalladf78ed2015-09-13 14:15:18 +02002960 kmem_cache_destroy(prot->slab);
2961 prot->slab = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002963 req_prot_cleanup(prot->rsk_prot);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002964
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002965 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002966 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002967 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002968 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002969 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971EXPORT_SYMBOL(proto_unregister);
2972
2973#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002975 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976{
Glauber Costa36b77a52011-12-16 00:51:59 +00002977 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002978 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979}
2980
2981static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2982{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002983 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984}
2985
2986static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002987 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988{
Glauber Costa36b77a52011-12-16 00:51:59 +00002989 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990}
2991
2992static char proto_method_implemented(const void *method)
2993{
2994 return method == NULL ? 'n' : 'y';
2995}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002996static long sock_prot_memory_allocated(struct proto *proto)
2997{
Jeffrin Josecb75a362012-04-25 19:17:29 +05302998 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002999}
3000
3001static char *sock_prot_memory_pressure(struct proto *proto)
3002{
3003 return proto->memory_pressure != NULL ?
3004 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3005}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006
3007static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3008{
Glauber Costa180d8cd2011-12-11 21:47:02 +00003009
Eric Dumazet8d987e52010-11-09 23:24:26 +00003010 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3012 proto->name,
3013 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08003014 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00003015 sock_prot_memory_allocated(proto),
3016 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 proto->max_header,
3018 proto->slab == NULL ? "no" : "yes",
3019 module_name(proto->owner),
3020 proto_method_implemented(proto->close),
3021 proto_method_implemented(proto->connect),
3022 proto_method_implemented(proto->disconnect),
3023 proto_method_implemented(proto->accept),
3024 proto_method_implemented(proto->ioctl),
3025 proto_method_implemented(proto->init),
3026 proto_method_implemented(proto->destroy),
3027 proto_method_implemented(proto->shutdown),
3028 proto_method_implemented(proto->setsockopt),
3029 proto_method_implemented(proto->getsockopt),
3030 proto_method_implemented(proto->sendmsg),
3031 proto_method_implemented(proto->recvmsg),
3032 proto_method_implemented(proto->sendpage),
3033 proto_method_implemented(proto->bind),
3034 proto_method_implemented(proto->backlog_rcv),
3035 proto_method_implemented(proto->hash),
3036 proto_method_implemented(proto->unhash),
3037 proto_method_implemented(proto->get_port),
3038 proto_method_implemented(proto->enter_memory_pressure));
3039}
3040
3041static int proto_seq_show(struct seq_file *seq, void *v)
3042{
Pavel Emelianov60f04382007-07-09 13:15:14 -07003043 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3045 "protocol",
3046 "size",
3047 "sockets",
3048 "memory",
3049 "press",
3050 "maxhdr",
3051 "slab",
3052 "module",
3053 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3054 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07003055 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 return 0;
3057}
3058
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003059static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060 .start = proto_seq_start,
3061 .next = proto_seq_next,
3062 .stop = proto_seq_stop,
3063 .show = proto_seq_show,
3064};
3065
3066static int proto_seq_open(struct inode *inode, struct file *file)
3067{
Eric Dumazet14e943d2008-11-19 15:14:01 -08003068 return seq_open_net(inode, file, &proto_seq_ops,
3069 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070}
3071
Arjan van de Ven9a321442007-02-12 00:55:35 -08003072static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073 .owner = THIS_MODULE,
3074 .open = proto_seq_open,
3075 .read = seq_read,
3076 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08003077 .release = seq_release_net,
3078};
3079
3080static __net_init int proto_init_net(struct net *net)
3081{
Gao fengd4beaa62013-02-18 01:34:54 +00003082 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
Eric Dumazet14e943d2008-11-19 15:14:01 -08003083 return -ENOMEM;
3084
3085 return 0;
3086}
3087
3088static __net_exit void proto_exit_net(struct net *net)
3089{
Gao fengece31ff2013-02-18 01:34:56 +00003090 remove_proc_entry("protocols", net->proc_net);
Eric Dumazet14e943d2008-11-19 15:14:01 -08003091}
3092
3093
3094static __net_initdata struct pernet_operations proto_net_ops = {
3095 .init = proto_init_net,
3096 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097};
3098
3099static int __init proto_init(void)
3100{
Eric Dumazet14e943d2008-11-19 15:14:01 -08003101 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102}
3103
3104subsys_initcall(proto_init);
3105
3106#endif /* PROC_FS */