blob: 41794a698da66aad08b42c6c72d7ac9fa80ea543 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Joe Perchese005d192012-05-16 19:58:40 +000092#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
Richard Cochrancb820f82013-07-19 19:40:09 +020096#include <linux/errqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <linux/types.h>
98#include <linux/socket.h>
99#include <linux/in.h>
100#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <linux/module.h>
102#include <linux/proc_fs.h>
103#include <linux/seq_file.h>
104#include <linux/sched.h>
105#include <linux/timer.h>
106#include <linux/string.h>
107#include <linux/sockios.h>
108#include <linux/net.h>
109#include <linux/mm.h>
110#include <linux/slab.h>
111#include <linux/interrupt.h>
112#include <linux/poll.h>
113#include <linux/tcp.h>
114#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400115#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000116#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100117#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800118#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400119#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123#include <linux/netdevice.h>
124#include <net/protocol.h>
125#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200126#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700127#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000129#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#include <net/xfrm.h>
131#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700132#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000133#include <net/netprio_cgroup.h>
Craig Gallekeb4cb002015-06-15 11:26:18 -0400134#include <linux/sock_diag.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
136#include <linux/filter.h>
Craig Gallek538950a2016-01-04 17:41:47 -0500137#include <net/sock_reuseport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Satoru Moriya3847ce32011-06-17 12:00:03 +0000139#include <trace/events/sock.h>
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#include <net/tcp.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +0300142#include <net/busy_poll.h>
Eliezer Tamir06021292013-06-10 11:39:50 +0300143
Glauber Costa36b77a52011-12-16 00:51:59 +0000144static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000145static LIST_HEAD(proto_list);
146
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700147/**
148 * sk_ns_capable - General socket capability test
149 * @sk: Socket to use a capability on or through
150 * @user_ns: The user namespace of the capability to use
151 * @cap: The capability to use
152 *
153 * Test to see if the opener of the socket had when the socket was
154 * created and the current process has the capability @cap in the user
155 * namespace @user_ns.
156 */
157bool sk_ns_capable(const struct sock *sk,
158 struct user_namespace *user_ns, int cap)
159{
160 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
161 ns_capable(user_ns, cap);
162}
163EXPORT_SYMBOL(sk_ns_capable);
164
165/**
166 * sk_capable - Socket global capability test
167 * @sk: Socket to use a capability on or through
Masanari Iidae793c0f2014-09-04 23:44:36 +0900168 * @cap: The global capability to use
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700169 *
170 * Test to see if the opener of the socket had when the socket was
171 * created and the current process has the capability @cap in all user
172 * namespaces.
173 */
174bool sk_capable(const struct sock *sk, int cap)
175{
176 return sk_ns_capable(sk, &init_user_ns, cap);
177}
178EXPORT_SYMBOL(sk_capable);
179
180/**
181 * sk_net_capable - Network namespace socket capability test
182 * @sk: Socket to use a capability on or through
183 * @cap: The capability to use
184 *
Masanari Iidae793c0f2014-09-04 23:44:36 +0900185 * Test to see if the opener of the socket had when the socket was created
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700186 * and the current process has the capability @cap over the network namespace
187 * the socket is a member of.
188 */
189bool sk_net_capable(const struct sock *sk, int cap)
190{
191 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
192}
193EXPORT_SYMBOL(sk_net_capable);
194
Ingo Molnarda21f242006-07-03 00:25:12 -0700195/*
196 * Each address family might have different locking rules, so we have
197 * one slock key per address family:
198 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700199static struct lock_class_key af_family_keys[AF_MAX];
200static struct lock_class_key af_family_slock_keys[AF_MAX];
201
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700202/*
203 * Make lock validator output more readable. (we pre-construct these
204 * strings build-time, so that runtime initialization of socket
205 * locks is fast):
206 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700207static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700208 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
209 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
210 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
211 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
212 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
213 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
214 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800215 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700216 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800217 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700218 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700219 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800220 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Dexuan Cui0a1a37b2016-04-05 07:41:11 -0700221 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
Anna, Suman33364ee2017-01-09 21:48:56 -0600222 "sk_lock-AF_QIPCRTR", "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700223};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700224static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700225 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
226 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
227 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
228 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
229 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
230 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
231 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800232 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700233 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800234 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700235 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700236 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800237 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Dexuan Cui0a1a37b2016-04-05 07:41:11 -0700238 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
Anna, Suman33364ee2017-01-09 21:48:56 -0600239 "slock-AF_QIPCRTR", "slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700240};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700241static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700242 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
243 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
244 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
245 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
246 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
247 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
248 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800249 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700250 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700251 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700252 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700253 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800254 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Dexuan Cui0a1a37b2016-04-05 07:41:11 -0700255 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
Anna, Suman33364ee2017-01-09 21:48:56 -0600256 "clock-AF_QIPCRTR", "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700257};
Ingo Molnarda21f242006-07-03 00:25:12 -0700258
259/*
260 * sk_callback_lock locking rules are per-address-family,
261 * so split the lock classes by using a per-AF key:
262 */
263static struct lock_class_key af_callback_keys[AF_MAX];
264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265/* Take into consideration the size of the struct sk_buff overhead in the
266 * determination of these values, since that is non-constant across
267 * platforms. This makes socket queueing behavior and performance
268 * not depend upon such differences.
269 */
270#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000271#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
273#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
274
275/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700276__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200277EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700278__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200279EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700280__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
281__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300283/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700284int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000285EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Willem de Bruijnb245be12015-01-30 13:29:32 -0500287int sysctl_tstamp_allow_data __read_mostly = 1;
288
Mel Gormanc93bdd02012-07-31 16:44:19 -0700289struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
290EXPORT_SYMBOL_GPL(memalloc_socks);
291
Mel Gorman7cb02402012-07-31 16:44:16 -0700292/**
293 * sk_set_memalloc - sets %SOCK_MEMALLOC
294 * @sk: socket to set it on
295 *
296 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
297 * It's the responsibility of the admin to adjust min_free_kbytes
298 * to meet the requirements
299 */
300void sk_set_memalloc(struct sock *sk)
301{
302 sock_set_flag(sk, SOCK_MEMALLOC);
303 sk->sk_allocation |= __GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700304 static_key_slow_inc(&memalloc_socks);
Mel Gorman7cb02402012-07-31 16:44:16 -0700305}
306EXPORT_SYMBOL_GPL(sk_set_memalloc);
307
308void sk_clear_memalloc(struct sock *sk)
309{
310 sock_reset_flag(sk, SOCK_MEMALLOC);
311 sk->sk_allocation &= ~__GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700312 static_key_slow_dec(&memalloc_socks);
Mel Gormanc76562b2012-07-31 16:44:41 -0700313
314 /*
315 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
Mel Gorman5d753612015-06-10 21:02:04 -0400316 * progress of swapping. SOCK_MEMALLOC may be cleared while
317 * it has rmem allocations due to the last swapfile being deactivated
318 * but there is a risk that the socket is unusable due to exceeding
319 * the rmem limits. Reclaim the reserves and obey rmem limits again.
Mel Gormanc76562b2012-07-31 16:44:41 -0700320 */
Mel Gorman5d753612015-06-10 21:02:04 -0400321 sk_mem_reclaim(sk);
Mel Gorman7cb02402012-07-31 16:44:16 -0700322}
323EXPORT_SYMBOL_GPL(sk_clear_memalloc);
324
Mel Gormanb4b9e352012-07-31 16:44:26 -0700325int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
326{
327 int ret;
328 unsigned long pflags = current->flags;
329
330 /* these should have been dropped before queueing */
331 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
332
333 current->flags |= PF_MEMALLOC;
334 ret = sk->sk_backlog_rcv(sk, skb);
335 tsk_restore_flags(current, pflags, PF_MEMALLOC);
336
337 return ret;
338}
339EXPORT_SYMBOL(__sk_backlog_rcv);
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
342{
343 struct timeval tv;
344
345 if (optlen < sizeof(tv))
346 return -EINVAL;
347 if (copy_from_user(&tv, optval, sizeof(tv)))
348 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700349 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
350 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
Vasily Averinba780732007-05-24 16:58:54 -0700352 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700353 static int warned __read_mostly;
354
Vasily Averinba780732007-05-24 16:58:54 -0700355 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700356 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700357 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000358 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
359 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700360 }
Vasily Averinba780732007-05-24 16:58:54 -0700361 return 0;
362 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 *timeo_p = MAX_SCHEDULE_TIMEOUT;
364 if (tv.tv_sec == 0 && tv.tv_usec == 0)
365 return 0;
366 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
367 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
368 return 0;
369}
370
371static void sock_warn_obsolete_bsdism(const char *name)
372{
373 static int warned;
374 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900375 if (strcmp(warncomm, current->comm) && warned < 5) {
376 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000377 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
378 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 warned++;
380 }
381}
382
Hannes Frederic Sowa080a2702015-10-26 13:51:37 +0100383static bool sock_needs_netstamp(const struct sock *sk)
384{
385 switch (sk->sk_family) {
386 case AF_UNSPEC:
387 case AF_UNIX:
388 return false;
389 default:
390 return true;
391 }
392}
393
Eric Dumazet08e29af2011-11-28 12:04:18 +0000394static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900395{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000396 if (sk->sk_flags & flags) {
397 sk->sk_flags &= ~flags;
Hannes Frederic Sowa080a2702015-10-26 13:51:37 +0100398 if (sock_needs_netstamp(sk) &&
399 !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000400 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 }
402}
403
404
samanthakumare6afc8a2016-04-05 12:41:15 -0400405int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800406{
Neil Horman3b885782009-10-12 13:26:31 -0700407 unsigned long flags;
408 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800409
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000410 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700411 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000412 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700413 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800414 }
415
Mel Gormanc76562b2012-07-31 16:44:41 -0700416 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700417 atomic_inc(&sk->sk_drops);
418 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800419 }
420
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800421 skb->dev = NULL;
422 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800423
Eric Dumazet7fee2262010-05-11 23:19:48 +0000424 /* we escape from rcu protected region, make sure we dont leak
425 * a norefcounted dst
426 */
427 skb_dst_force(skb);
428
Neil Horman3b885782009-10-12 13:26:31 -0700429 spin_lock_irqsave(&list->lock, flags);
Eyal Birger3bc3b962015-03-01 14:58:30 +0200430 sock_skb_set_dropcount(sk, skb);
Neil Horman3b885782009-10-12 13:26:31 -0700431 __skb_queue_tail(list, skb);
432 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800433
434 if (!sock_flag(sk, SOCK_DEAD))
David S. Miller676d2362014-04-11 16:15:36 -0400435 sk->sk_data_ready(sk);
Eric Dumazet766e90372009-10-14 20:40:11 -0700436 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800437}
samanthakumare6afc8a2016-04-05 12:41:15 -0400438EXPORT_SYMBOL(__sock_queue_rcv_skb);
439
440int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
441{
442 int err;
443
444 err = sk_filter(sk, skb);
445 if (err)
446 return err;
447
448 return __sock_queue_rcv_skb(sk, skb);
449}
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800450EXPORT_SYMBOL(sock_queue_rcv_skb);
451
Willem de Bruijn4f0c40d92016-07-12 18:18:57 -0400452int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
Eric Dumazetc3f24cf2016-11-02 17:14:41 -0700453 const int nested, unsigned int trim_cap, bool refcounted)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800454{
455 int rc = NET_RX_SUCCESS;
456
Willem de Bruijn4f0c40d92016-07-12 18:18:57 -0400457 if (sk_filter_trim_cap(sk, skb, trim_cap))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800458 goto discard_and_relse;
459
460 skb->dev = NULL;
461
Sorin Dumitru274f4822014-07-22 21:16:51 +0300462 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700463 atomic_inc(&sk->sk_drops);
464 goto discard_and_relse;
465 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200466 if (nested)
467 bh_lock_sock_nested(sk);
468 else
469 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700470 if (!sock_owned_by_user(sk)) {
471 /*
472 * trylock + unlock semantics:
473 */
474 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
475
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700476 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700477
478 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Eric Dumazetf545a382012-04-22 23:34:26 +0000479 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000480 bh_unlock_sock(sk);
481 atomic_inc(&sk->sk_drops);
482 goto discard_and_relse;
483 }
484
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800485 bh_unlock_sock(sk);
486out:
Eric Dumazetc3f24cf2016-11-02 17:14:41 -0700487 if (refcounted)
488 sock_put(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800489 return rc;
490discard_and_relse:
491 kfree_skb(skb);
492 goto out;
493}
Willem de Bruijn4f0c40d92016-07-12 18:18:57 -0400494EXPORT_SYMBOL(__sk_receive_skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800495
496struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
497{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000498 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800499
500 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000501 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000502 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800503 dst_release(dst);
504 return NULL;
505 }
506
507 return dst;
508}
509EXPORT_SYMBOL(__sk_dst_check);
510
511struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
512{
513 struct dst_entry *dst = sk_dst_get(sk);
514
515 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
516 sk_dst_reset(sk);
517 dst_release(dst);
518 return NULL;
519 }
520
521 return dst;
522}
523EXPORT_SYMBOL(sk_dst_check);
524
Brian Haleyc91f6df2012-11-26 05:21:08 +0000525static int sock_setbindtodevice(struct sock *sk, char __user *optval,
526 int optlen)
David S. Miller48788092007-09-14 16:41:03 -0700527{
528 int ret = -ENOPROTOOPT;
529#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900530 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700531 char devname[IFNAMSIZ];
532 int index;
533
534 /* Sorry... */
535 ret = -EPERM;
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000536 if (!ns_capable(net->user_ns, CAP_NET_RAW))
David S. Miller48788092007-09-14 16:41:03 -0700537 goto out;
538
539 ret = -EINVAL;
540 if (optlen < 0)
541 goto out;
542
543 /* Bind this socket to a particular device like "eth0",
544 * as specified in the passed interface name. If the
545 * name is "" or the option length is zero the socket
546 * is not bound.
547 */
548 if (optlen > IFNAMSIZ - 1)
549 optlen = IFNAMSIZ - 1;
550 memset(devname, 0, sizeof(devname));
551
552 ret = -EFAULT;
553 if (copy_from_user(devname, optval, optlen))
554 goto out;
555
David S. Miller000ba2e2009-11-05 22:37:11 -0800556 index = 0;
557 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800558 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700559
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800560 rcu_read_lock();
561 dev = dev_get_by_name_rcu(net, devname);
562 if (dev)
563 index = dev->ifindex;
564 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700565 ret = -ENODEV;
566 if (!dev)
567 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700568 }
569
570 lock_sock(sk);
571 sk->sk_bound_dev_if = index;
572 sk_dst_reset(sk);
573 release_sock(sk);
574
575 ret = 0;
576
577out:
578#endif
579
580 return ret;
581}
582
Brian Haleyc91f6df2012-11-26 05:21:08 +0000583static int sock_getbindtodevice(struct sock *sk, char __user *optval,
584 int __user *optlen, int len)
585{
586 int ret = -ENOPROTOOPT;
587#ifdef CONFIG_NETDEVICES
588 struct net *net = sock_net(sk);
Brian Haleyc91f6df2012-11-26 05:21:08 +0000589 char devname[IFNAMSIZ];
Brian Haleyc91f6df2012-11-26 05:21:08 +0000590
591 if (sk->sk_bound_dev_if == 0) {
592 len = 0;
593 goto zero;
594 }
595
596 ret = -EINVAL;
597 if (len < IFNAMSIZ)
598 goto out;
599
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200600 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
601 if (ret)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000602 goto out;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000603
604 len = strlen(devname) + 1;
605
606 ret = -EFAULT;
607 if (copy_to_user(optval, devname, len))
608 goto out;
609
610zero:
611 ret = -EFAULT;
612 if (put_user(len, optlen))
613 goto out;
614
615 ret = 0;
616
617out:
618#endif
619
620 return ret;
621}
622
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800623static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
624{
625 if (valbool)
626 sock_set_flag(sk, bit);
627 else
628 sock_reset_flag(sk, bit);
629}
630
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +0200631bool sk_mc_loop(struct sock *sk)
632{
633 if (dev_recursion_level())
634 return false;
635 if (!sk)
636 return true;
637 switch (sk->sk_family) {
638 case AF_INET:
639 return inet_sk(sk)->mc_loop;
640#if IS_ENABLED(CONFIG_IPV6)
641 case AF_INET6:
642 return inet6_sk(sk)->mc_loop;
643#endif
644 }
645 WARN_ON(1);
646 return true;
647}
648EXPORT_SYMBOL(sk_mc_loop);
649
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650/*
651 * This is meant for all protocols to use and covers goings on
652 * at the socket level. Everything here is generic.
653 */
654
655int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700656 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657{
Eric Dumazet2a915252009-05-27 11:30:05 +0000658 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 int val;
660 int valbool;
661 struct linger ling;
662 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900663
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 /*
665 * Options without arguments
666 */
667
David S. Miller48788092007-09-14 16:41:03 -0700668 if (optname == SO_BINDTODEVICE)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000669 return sock_setbindtodevice(sk, optval, optlen);
David S. Miller48788092007-09-14 16:41:03 -0700670
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700671 if (optlen < sizeof(int))
672 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 if (get_user(val, (int __user *)optval))
675 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900676
Eric Dumazet2a915252009-05-27 11:30:05 +0000677 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 lock_sock(sk);
680
Eric Dumazet2a915252009-05-27 11:30:05 +0000681 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700682 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000683 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700684 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000685 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800686 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700687 break;
688 case SO_REUSEADDR:
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000689 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700690 break;
Tom Herbert055dc212013-01-22 09:49:50 +0000691 case SO_REUSEPORT:
692 sk->sk_reuseport = valbool;
693 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700694 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000695 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000696 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700697 case SO_ERROR:
698 ret = -ENOPROTOOPT;
699 break;
700 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800701 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
yupeng6d814b12018-12-05 18:56:28 -0800702 sk_dst_reset(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700703 break;
704 case SO_BROADCAST:
705 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
706 break;
707 case SO_SNDBUF:
708 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000709 * about it this is right. Otherwise apps have to
710 * play 'guess the biggest size' games. RCVBUF/SNDBUF
711 * are treated in BSD as hints
712 */
713 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700714set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700715 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazetb98b0bc2016-12-02 09:44:53 -0800716 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
Eric Dumazet82981932012-04-26 20:07:59 +0000717 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700718 sk->sk_write_space(sk);
719 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700721 case SO_SNDBUFFORCE:
722 if (!capable(CAP_NET_ADMIN)) {
723 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 break;
725 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700726 goto set_sndbuf;
727
728 case SO_RCVBUF:
729 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000730 * about it this is right. Otherwise apps have to
731 * play 'guess the biggest size' games. RCVBUF/SNDBUF
732 * are treated in BSD as hints
733 */
734 val = min_t(u32, val, sysctl_rmem_max);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700735set_rcvbuf:
736 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
737 /*
738 * We double it on the way in to account for
739 * "struct sk_buff" etc. overhead. Applications
740 * assume that the SO_RCVBUF setting they make will
741 * allow that much actual data to be received on that
742 * socket.
743 *
744 * Applications are unaware that "struct sk_buff" and
745 * other overheads allocate from the receive buffer
746 * during socket buffer allocation.
747 *
748 * And after considering the possible alternatives,
749 * returning the value we actually used in getsockopt
750 * is the most desirable behavior.
751 */
Eric Dumazetb98b0bc2016-12-02 09:44:53 -0800752 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700753 break;
754
755 case SO_RCVBUFFORCE:
756 if (!capable(CAP_NET_ADMIN)) {
757 ret = -EPERM;
758 break;
759 }
760 goto set_rcvbuf;
761
762 case SO_KEEPALIVE:
763#ifdef CONFIG_INET
Eric Dumazet3e109862012-09-24 07:00:11 +0000764 if (sk->sk_protocol == IPPROTO_TCP &&
765 sk->sk_type == SOCK_STREAM)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700766 tcp_set_keepalive(sk, valbool);
767#endif
768 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
769 break;
770
771 case SO_OOBINLINE:
772 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
773 break;
774
775 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -0700776 sk->sk_no_check_tx = valbool;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700777 break;
778
779 case SO_PRIORITY:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000780 if ((val >= 0 && val <= 6) ||
781 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700782 sk->sk_priority = val;
783 else
784 ret = -EPERM;
785 break;
786
787 case SO_LINGER:
788 if (optlen < sizeof(ling)) {
789 ret = -EINVAL; /* 1003.1g */
790 break;
791 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000792 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700793 ret = -EFAULT;
794 break;
795 }
796 if (!ling.l_onoff)
797 sock_reset_flag(sk, SOCK_LINGER);
798 else {
799#if (BITS_PER_LONG == 32)
800 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
801 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
802 else
803#endif
804 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
805 sock_set_flag(sk, SOCK_LINGER);
806 }
807 break;
808
809 case SO_BSDCOMPAT:
810 sock_warn_obsolete_bsdism("setsockopt");
811 break;
812
813 case SO_PASSCRED:
814 if (valbool)
815 set_bit(SOCK_PASSCRED, &sock->flags);
816 else
817 clear_bit(SOCK_PASSCRED, &sock->flags);
818 break;
819
820 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700821 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700822 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700823 if (optname == SO_TIMESTAMP)
824 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
825 else
826 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700827 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000828 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700829 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700830 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700831 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
832 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700833 break;
834
Patrick Ohly20d49472009-02-12 05:03:38 +0000835 case SO_TIMESTAMPING:
836 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000837 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000838 break;
839 }
Willem de Bruijnb245be12015-01-30 13:29:32 -0500840
Willem de Bruijn09c2d252014-08-04 22:11:47 -0400841 if (val & SOF_TIMESTAMPING_OPT_ID &&
Willem de Bruijn4ed2d762014-08-04 22:11:49 -0400842 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
WANG Congac5cc972015-12-16 23:39:04 -0800843 if (sk->sk_protocol == IPPROTO_TCP &&
844 sk->sk_type == SOCK_STREAM) {
Soheil Hassas Yeganeh6db8b962016-04-02 23:08:07 -0400845 if ((1 << sk->sk_state) &
846 (TCPF_CLOSE | TCPF_LISTEN)) {
Willem de Bruijn4ed2d762014-08-04 22:11:49 -0400847 ret = -EINVAL;
848 break;
849 }
850 sk->sk_tskey = tcp_sk(sk)->snd_una;
851 } else {
852 sk->sk_tskey = 0;
853 }
854 }
Willem de Bruijnb9f40e22014-08-04 22:11:46 -0400855 sk->sk_tsflags = val;
Patrick Ohly20d49472009-02-12 05:03:38 +0000856 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
857 sock_enable_timestamp(sk,
858 SOCK_TIMESTAMPING_RX_SOFTWARE);
859 else
860 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000861 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000862 break;
863
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700864 case SO_RCVLOWAT:
865 if (val < 0)
866 val = INT_MAX;
867 sk->sk_rcvlowat = val ? : 1;
868 break;
869
870 case SO_RCVTIMEO:
871 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
872 break;
873
874 case SO_SNDTIMEO:
875 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
876 break;
877
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700878 case SO_ATTACH_FILTER:
879 ret = -EINVAL;
880 if (optlen == sizeof(struct sock_fprog)) {
881 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700883 ret = -EFAULT;
884 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700887 ret = sk_attach_filter(&fprog, sk);
888 }
889 break;
890
Alexei Starovoitov89aa0752014-12-01 15:06:35 -0800891 case SO_ATTACH_BPF:
892 ret = -EINVAL;
893 if (optlen == sizeof(u32)) {
894 u32 ufd;
895
896 ret = -EFAULT;
897 if (copy_from_user(&ufd, optval, sizeof(ufd)))
898 break;
899
900 ret = sk_attach_bpf(ufd, sk);
901 }
902 break;
903
Craig Gallek538950a2016-01-04 17:41:47 -0500904 case SO_ATTACH_REUSEPORT_CBPF:
905 ret = -EINVAL;
906 if (optlen == sizeof(struct sock_fprog)) {
907 struct sock_fprog fprog;
908
909 ret = -EFAULT;
910 if (copy_from_user(&fprog, optval, sizeof(fprog)))
911 break;
912
913 ret = sk_reuseport_attach_filter(&fprog, sk);
914 }
915 break;
916
917 case SO_ATTACH_REUSEPORT_EBPF:
918 ret = -EINVAL;
919 if (optlen == sizeof(u32)) {
920 u32 ufd;
921
922 ret = -EFAULT;
923 if (copy_from_user(&ufd, optval, sizeof(ufd)))
924 break;
925
926 ret = sk_reuseport_attach_bpf(ufd, sk);
927 }
928 break;
929
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700930 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700931 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700932 break;
933
Vincent Bernatd59577b2013-01-16 22:55:49 +0100934 case SO_LOCK_FILTER:
935 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
936 ret = -EPERM;
937 else
938 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
939 break;
940
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700941 case SO_PASSSEC:
942 if (valbool)
943 set_bit(SOCK_PASSSEC, &sock->flags);
944 else
945 clear_bit(SOCK_PASSSEC, &sock->flags);
946 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800947 case SO_MARK:
Greg Kroah-Hartman2a1ddc62019-11-29 09:07:29 +0100948 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800949 ret = -EPERM;
Greg Kroah-Hartman2a1ddc62019-11-29 09:07:29 +0100950 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800951 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800952 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700953
Neil Horman3b885782009-10-12 13:26:31 -0700954 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000955 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700956 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100957
958 case SO_WIFI_STATUS:
959 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
960 break;
961
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000962 case SO_PEEK_OFF:
963 if (sock->ops->set_peek_off)
Sasha Levin12663bf2013-12-07 17:26:27 -0500964 ret = sock->ops->set_peek_off(sk, val);
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000965 else
966 ret = -EOPNOTSUPP;
967 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000968
969 case SO_NOFCS:
970 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
971 break;
972
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +0000973 case SO_SELECT_ERR_QUEUE:
974 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
975 break;
976
Cong Wange0d10952013-08-01 11:10:25 +0800977#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +0300978 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +0300979 /* allow unprivileged users to decrease the value */
980 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
981 ret = -EPERM;
982 else {
983 if (val < 0)
984 ret = -EINVAL;
985 else
986 sk->sk_ll_usec = val;
987 }
988 break;
989#endif
Eric Dumazet62748f32013-09-24 08:20:52 -0700990
991 case SO_MAX_PACING_RATE:
992 sk->sk_max_pacing_rate = val;
993 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
994 sk->sk_max_pacing_rate);
995 break;
996
Eric Dumazet70da2682015-10-08 19:33:21 -0700997 case SO_INCOMING_CPU:
998 sk->sk_incoming_cpu = val;
999 break;
1000
Tom Herberta87cb3e2016-02-24 10:02:52 -08001001 case SO_CNX_ADVICE:
1002 if (val == 1)
1003 dst_negative_advice(sk);
1004 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001005 default:
1006 ret = -ENOPROTOOPT;
1007 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001008 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 release_sock(sk);
1010 return ret;
1011}
Eric Dumazet2a915252009-05-27 11:30:05 +00001012EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
1014
stephen hemminger8f098982014-01-03 09:17:14 -08001015static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1016 struct ucred *ucred)
Eric W. Biederman3f551f92010-06-13 03:28:59 +00001017{
1018 ucred->pid = pid_vnr(pid);
1019 ucred->uid = ucred->gid = -1;
1020 if (cred) {
1021 struct user_namespace *current_ns = current_user_ns();
1022
Eric W. Biedermanb2e4f542012-05-23 16:39:45 -06001023 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1024 ucred->gid = from_kgid_munged(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +00001025 }
1026}
1027
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028int sock_getsockopt(struct socket *sock, int level, int optname,
1029 char __user *optval, int __user *optlen)
1030{
1031 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001032
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001033 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001034 int val;
1035 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 struct timeval tm;
1037 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001038
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -08001039 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001041
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001042 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001043 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001044 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001046
Eugene Teo50fee1d2009-02-23 15:38:41 -08001047 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -08001048
Eric Dumazet2a915252009-05-27 11:30:05 +00001049 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001050 case SO_DEBUG:
1051 v.val = sock_flag(sk, SOCK_DBG);
1052 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001053
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001054 case SO_DONTROUTE:
1055 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1056 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001057
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001058 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001059 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001060 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001062 case SO_SNDBUF:
1063 v.val = sk->sk_sndbuf;
1064 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001065
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001066 case SO_RCVBUF:
1067 v.val = sk->sk_rcvbuf;
1068 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001070 case SO_REUSEADDR:
1071 v.val = sk->sk_reuse;
1072 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
Tom Herbert055dc212013-01-22 09:49:50 +00001074 case SO_REUSEPORT:
1075 v.val = sk->sk_reuseport;
1076 break;
1077
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001078 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001079 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001080 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001082 case SO_TYPE:
1083 v.val = sk->sk_type;
1084 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
Jan Engelhardt49c794e2009-08-04 07:28:28 +00001086 case SO_PROTOCOL:
1087 v.val = sk->sk_protocol;
1088 break;
1089
Jan Engelhardt0d6038e2009-08-04 07:28:29 +00001090 case SO_DOMAIN:
1091 v.val = sk->sk_family;
1092 break;
1093
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001094 case SO_ERROR:
1095 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +00001096 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001097 v.val = xchg(&sk->sk_err_soft, 0);
1098 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001100 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001101 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001102 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001103
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001104 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -07001105 v.val = sk->sk_no_check_tx;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001106 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001108 case SO_PRIORITY:
1109 v.val = sk->sk_priority;
1110 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001111
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001112 case SO_LINGER:
1113 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001114 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001115 v.ling.l_linger = sk->sk_lingertime / HZ;
1116 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001117
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001118 case SO_BSDCOMPAT:
1119 sock_warn_obsolete_bsdism("getsockopt");
1120 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001122 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -07001123 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1124 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1125 break;
1126
1127 case SO_TIMESTAMPNS:
1128 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001129 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130
Patrick Ohly20d49472009-02-12 05:03:38 +00001131 case SO_TIMESTAMPING:
Willem de Bruijnb9f40e22014-08-04 22:11:46 -04001132 v.val = sk->sk_tsflags;
Patrick Ohly20d49472009-02-12 05:03:38 +00001133 break;
1134
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001135 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001136 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001137 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1138 v.tm.tv_sec = 0;
1139 v.tm.tv_usec = 0;
1140 } else {
1141 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1142 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001144 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001146 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001147 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001148 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1149 v.tm.tv_sec = 0;
1150 v.tm.tv_usec = 0;
1151 } else {
1152 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1153 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1154 }
1155 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001157 case SO_RCVLOWAT:
1158 v.val = sk->sk_rcvlowat;
1159 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001160
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001161 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +00001162 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001163 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001165 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +00001166 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001167 break;
1168
1169 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001170 {
1171 struct ucred peercred;
1172 if (len > sizeof(peercred))
1173 len = sizeof(peercred);
1174 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1175 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001176 return -EFAULT;
1177 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001178 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001179
1180 case SO_PEERNAME:
1181 {
1182 char address[128];
1183
1184 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1185 return -ENOTCONN;
1186 if (lv < len)
1187 return -EINVAL;
1188 if (copy_to_user(optval, address, len))
1189 return -EFAULT;
1190 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001192
1193 /* Dubious BSD thing... Probably nobody even uses it, but
1194 * the UNIX standard wants it for whatever reason... -DaveM
1195 */
1196 case SO_ACCEPTCONN:
1197 v.val = sk->sk_state == TCP_LISTEN;
1198 break;
1199
1200 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001201 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001202 break;
1203
1204 case SO_PEERSEC:
1205 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1206
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001207 case SO_MARK:
1208 v.val = sk->sk_mark;
1209 break;
1210
Neil Horman3b885782009-10-12 13:26:31 -07001211 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001212 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001213 break;
1214
Johannes Berg6e3e9392011-11-09 10:15:42 +01001215 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001216 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001217 break;
1218
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001219 case SO_PEEK_OFF:
1220 if (!sock->ops->set_peek_off)
1221 return -EOPNOTSUPP;
1222
1223 v.val = sk->sk_peek_off;
1224 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001225 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001226 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001227 break;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001228
Pavel Emelyanovf7b86bf2012-10-18 23:55:56 +00001229 case SO_BINDTODEVICE:
Brian Haleyc91f6df2012-11-26 05:21:08 +00001230 return sock_getbindtodevice(sk, optval, optlen, len);
1231
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001232 case SO_GET_FILTER:
1233 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1234 if (len < 0)
1235 return len;
1236
1237 goto lenout;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001238
Vincent Bernatd59577b2013-01-16 22:55:49 +01001239 case SO_LOCK_FILTER:
1240 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1241 break;
1242
Michal Sekletarea02f942014-01-17 17:09:45 +01001243 case SO_BPF_EXTENSIONS:
1244 v.val = bpf_tell_extensions();
1245 break;
1246
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00001247 case SO_SELECT_ERR_QUEUE:
1248 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1249 break;
1250
Cong Wange0d10952013-08-01 11:10:25 +08001251#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03001252 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +03001253 v.val = sk->sk_ll_usec;
1254 break;
1255#endif
1256
Eric Dumazet62748f32013-09-24 08:20:52 -07001257 case SO_MAX_PACING_RATE:
1258 v.val = sk->sk_max_pacing_rate;
1259 break;
1260
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001261 case SO_INCOMING_CPU:
1262 v.val = sk->sk_incoming_cpu;
1263 break;
1264
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001265 default:
YOSHIFUJI Hideaki/吉藤英明443b5992015-03-23 18:04:13 +09001266 /* We implement the SO_SNDLOWAT etc to not be settable
1267 * (1003.1g 7).
1268 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001269 return -ENOPROTOOPT;
1270 }
1271
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 if (len > lv)
1273 len = lv;
1274 if (copy_to_user(optval, &v, len))
1275 return -EFAULT;
1276lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001277 if (put_user(len, optlen))
1278 return -EFAULT;
1279 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280}
1281
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001282/*
1283 * Initialize an sk_lock.
1284 *
1285 * (We also register the sk_lock with the lock validator.)
1286 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001287static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001288{
Peter Zijlstraed075362006-12-06 20:35:24 -08001289 sock_lock_init_class_and_name(sk,
1290 af_family_slock_key_strings[sk->sk_family],
1291 af_family_slock_keys + sk->sk_family,
1292 af_family_key_strings[sk->sk_family],
1293 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001294}
1295
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001296/*
1297 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1298 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001299 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001300 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001301static void sock_copy(struct sock *nsk, const struct sock *osk)
1302{
1303#ifdef CONFIG_SECURITY_NETWORK
1304 void *sptr = nsk->sk_security;
1305#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001306 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1307
1308 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1309 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1310
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001311#ifdef CONFIG_SECURITY_NETWORK
1312 nsk->sk_security = sptr;
1313 security_sk_clone(osk, nsk);
1314#endif
1315}
1316
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001317static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1318 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001319{
1320 struct sock *sk;
1321 struct kmem_cache *slab;
1322
1323 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001324 if (slab != NULL) {
1325 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1326 if (!sk)
1327 return sk;
Eric Dumazetba2489b2016-08-23 11:39:29 -07001328 if (priority & __GFP_ZERO)
1329 sk_prot_clear_nulls(sk, prot->obj_size);
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001330 } else
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001331 sk = kmalloc(prot->obj_size, priority);
1332
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001333 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001334 kmemcheck_annotate_bitfield(sk, flags);
1335
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001336 if (security_sk_alloc(sk, family, priority))
1337 goto out_free;
1338
1339 if (!try_module_get(prot->owner))
1340 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001341 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001342 }
1343
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001344 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001345
1346out_free_sec:
1347 security_sk_free(sk);
1348out_free:
1349 if (slab != NULL)
1350 kmem_cache_free(slab, sk);
1351 else
1352 kfree(sk);
1353 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001354}
1355
1356static void sk_prot_free(struct proto *prot, struct sock *sk)
1357{
1358 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001359 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001360
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001361 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001362 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001363
Tejun Heobd1060a2015-12-07 17:38:53 -05001364 cgroup_sk_free(&sk->sk_cgrp_data);
Johannes Weiner2d758072016-10-07 17:00:58 -07001365 mem_cgroup_sk_free(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001366 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001367 if (slab != NULL)
1368 kmem_cache_free(slab, sk);
1369 else
1370 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001371 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001372}
1373
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374/**
1375 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001376 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001377 * @family: protocol family
1378 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1379 * @prot: struct proto associated with this new sock instance
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001380 * @kern: is this to be a kernel socket?
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001382struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001383 struct proto *prot, int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001385 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001387 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001389 sk->sk_family = family;
1390 /*
1391 * See comment in struct sock definition to understand
1392 * why we need sk_prot_creator -acme
1393 */
1394 sk->sk_prot = sk->sk_prot_creator = prot;
1395 sock_lock_init(sk);
Eric W. Biederman26abe142015-05-08 21:10:31 -05001396 sk->sk_net_refcnt = kern ? 0 : 1;
1397 if (likely(sk->sk_net_refcnt))
1398 get_net(net);
1399 sock_net_set(sk, net);
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001400 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001401
Johannes Weiner2d758072016-10-07 17:00:58 -07001402 mem_cgroup_sk_alloc(sk);
Johannes Weinerd979a392016-09-19 14:44:38 -07001403 cgroup_sk_alloc(&sk->sk_cgrp_data);
Tejun Heo2a56a1f2015-12-07 17:38:52 -05001404 sock_update_classid(&sk->sk_cgrp_data);
1405 sock_update_netprioidx(&sk->sk_cgrp_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 }
Frank Filza79af592005-09-27 15:23:38 -07001407
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001408 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409}
Eric Dumazet2a915252009-05-27 11:30:05 +00001410EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411
Eric Dumazeta4298e42016-04-01 08:52:12 -07001412/* Sockets having SOCK_RCU_FREE will call this function after one RCU
1413 * grace period. This is the case for UDP sockets and TCP listeners.
1414 */
1415static void __sk_destruct(struct rcu_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416{
Eric Dumazeta4298e42016-04-01 08:52:12 -07001417 struct sock *sk = container_of(head, struct sock, sk_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
1420 if (sk->sk_destruct)
1421 sk->sk_destruct(sk);
1422
Paul E. McKenneya898def2010-02-22 17:04:49 -08001423 filter = rcu_dereference_check(sk->sk_filter,
1424 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001426 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001427 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 }
1429
Eric Dumazet08e29af2011-11-28 12:04:18 +00001430 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431
1432 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001433 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1434 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
Eric Dumazete9c1b1a2017-03-15 13:21:28 -07001436 if (sk->sk_frag.page) {
1437 put_page(sk->sk_frag.page);
1438 sk->sk_frag.page = NULL;
1439 }
1440
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001441 if (sk->sk_peer_cred)
1442 put_cred(sk->sk_peer_cred);
1443 put_pid(sk->sk_peer_pid);
Eric W. Biederman26abe142015-05-08 21:10:31 -05001444 if (likely(sk->sk_net_refcnt))
1445 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001446 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001448
Eric Dumazeta4298e42016-04-01 08:52:12 -07001449void sk_destruct(struct sock *sk)
1450{
Martin KaFai Lau62241d62019-09-27 16:00:31 -07001451 bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
1452
1453 if (rcu_access_pointer(sk->sk_reuseport_cb)) {
1454 reuseport_detach_sock(sk);
1455 use_call_rcu = true;
1456 }
1457
1458 if (use_call_rcu)
Eric Dumazeta4298e42016-04-01 08:52:12 -07001459 call_rcu(&sk->sk_rcu, __sk_destruct);
1460 else
1461 __sk_destruct(&sk->sk_rcu);
1462}
1463
Craig Gallekeb4cb002015-06-15 11:26:18 -04001464static void __sk_free(struct sock *sk)
1465{
Eric Dumazeta5e907c2018-05-18 04:47:55 -07001466 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
Craig Gallekeb4cb002015-06-15 11:26:18 -04001467 sock_diag_broadcast_destroy(sk);
1468 else
1469 sk_destruct(sk);
1470}
1471
Eric Dumazet2b85a342009-06-11 02:55:43 -07001472void sk_free(struct sock *sk)
1473{
1474 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001475 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001476 * some packets are still in some tx queue.
1477 * If not null, sock_wfree() will call __sk_free(sk) later
1478 */
1479 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1480 __sk_free(sk);
1481}
Eric Dumazet2a915252009-05-27 11:30:05 +00001482EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Eric Dumazete56c57d2011-11-08 17:07:07 -05001484/**
1485 * sk_clone_lock - clone a socket, and lock its clone
1486 * @sk: the socket to clone
1487 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1488 *
1489 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1490 */
1491struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001492{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001493 struct sock *newsk;
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001494 bool is_charged = true;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001495
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001496 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001497 if (newsk != NULL) {
1498 struct sk_filter *filter;
1499
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001500 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001501
Christoph Paaschcf2eaf12017-09-26 17:38:50 -07001502 newsk->sk_prot_creator = sk->sk_prot;
1503
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001504 /* SANITY */
Sowmini Varadhan8a681732015-07-30 15:50:36 +02001505 if (likely(newsk->sk_net_refcnt))
1506 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001507 sk_node_init(&newsk->sk_node);
1508 sock_lock_init(newsk);
1509 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001510 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001511 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001512
1513 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001514 /*
1515 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1516 */
1517 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001518 atomic_set(&newsk->sk_omem_alloc, 0);
1519 skb_queue_head_init(&newsk->sk_receive_queue);
1520 skb_queue_head_init(&newsk->sk_write_queue);
1521
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001522 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001523 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1524 af_callback_keys + newsk->sk_family,
1525 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001526
1527 newsk->sk_dst_cache = NULL;
1528 newsk->sk_wmem_queued = 0;
1529 newsk->sk_forward_alloc = 0;
Eric Dumazet9caad862016-04-01 08:52:20 -07001530 atomic_set(&newsk->sk_drops, 0);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001531 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001532 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1533
1534 sock_reset_flag(newsk, SOCK_DONE);
Eric Dumazetcb5880e2017-10-10 19:12:33 -07001535 cgroup_sk_alloc(&newsk->sk_cgrp_data);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001536 skb_queue_head_init(&newsk->sk_error_queue);
1537
Greg Kroah-Hartman00449622017-10-12 21:21:39 +02001538 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001539 if (filter != NULL)
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001540 /* though it's an empty new sock, the charging may fail
1541 * if sysctl_optmem_max was changed between creation of
1542 * original socket and cloning
1543 */
1544 is_charged = sk_filter_charge(newsk, filter);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001545
Eric Dumazetd188ba82015-12-08 07:22:02 -08001546 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
Daniel Borkmanna53ea602017-03-22 13:08:08 +01001547 /* We need to make sure that we don't uncharge the new
1548 * socket if we couldn't charge it in the first place
1549 * as otherwise we uncharge the parent's filter.
1550 */
1551 if (!is_charged)
1552 RCU_INIT_POINTER(newsk->sk_filter, NULL);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001553 /* It is still raw copy of parent, so invalidate
1554 * destructor and make plain sk_free() */
1555 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001556 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001557 sk_free(newsk);
1558 newsk = NULL;
1559 goto out;
1560 }
Craig Gallekfa463492016-02-10 11:50:39 -05001561 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001562
1563 newsk->sk_err = 0;
Eric Dumazete551c322016-10-28 13:40:24 -07001564 newsk->sk_err_soft = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001565 newsk->sk_priority = 0;
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001566 newsk->sk_incoming_cpu = raw_smp_processor_id();
Eric Dumazet33cf7c92015-03-11 18:53:14 -07001567 atomic64_set(&newsk->sk_cookie, 0);
Johannes Weinerd979a392016-09-19 14:44:38 -07001568
Johannes Weiner2d758072016-10-07 17:00:58 -07001569 mem_cgroup_sk_alloc(newsk);
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001570 /*
1571 * Before updating sk_refcnt, we must commit prior changes to memory
1572 * (Documentation/RCU/rculist_nulls.txt for details)
1573 */
1574 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001575 atomic_set(&newsk->sk_refcnt, 2);
1576
1577 /*
1578 * Increment the counter in the same struct proto as the master
1579 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1580 * is the same as sk->sk_prot->socks, as this field was copied
1581 * with memcpy).
1582 *
1583 * This _changes_ the previous behaviour, where
1584 * tcp_create_openreq_child always was incrementing the
1585 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1586 * to be taken into account in all callers. -acme
1587 */
1588 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001589 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001590 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001591
1592 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001593 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001594
Hannes Frederic Sowa080a2702015-10-26 13:51:37 +01001595 if (sock_needs_netstamp(sk) &&
1596 newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001597 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001598 }
1599out:
1600 return newsk;
1601}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001602EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001603
Andi Kleen99580892007-04-20 17:12:43 -07001604void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1605{
Eric Dumazetd6a4e262015-05-26 08:55:28 -07001606 u32 max_segs = 1;
1607
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001608 sk_dst_set(sk, dst);
Andi Kleen99580892007-04-20 17:12:43 -07001609 sk->sk_route_caps = dst->dev->features;
1610 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001611 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001612 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001613 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001614 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001615 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001616 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001617 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001618 sk->sk_gso_max_size = dst->dev->gso_max_size;
Eric Dumazetd6a4e262015-05-26 08:55:28 -07001619 max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001620 }
Andi Kleen99580892007-04-20 17:12:43 -07001621 }
Eric Dumazetd6a4e262015-05-26 08:55:28 -07001622 sk->sk_gso_max_segs = max_segs;
Andi Kleen99580892007-04-20 17:12:43 -07001623}
1624EXPORT_SYMBOL_GPL(sk_setup_caps);
1625
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626/*
1627 * Simple resource managers for sockets.
1628 */
1629
1630
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001631/*
1632 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 */
1634void sock_wfree(struct sk_buff *skb)
1635{
1636 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001637 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
Eric Dumazetd99927f2009-09-24 10:49:24 +00001639 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1640 /*
1641 * Keep a reference on sk_wmem_alloc, this will be released
1642 * after sk_write_space() call
1643 */
1644 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001646 len = 1;
1647 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001648 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001649 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1650 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001651 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001652 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001653 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654}
Eric Dumazet2a915252009-05-27 11:30:05 +00001655EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656
Eric Dumazet1d2077a2016-05-02 10:56:27 -07001657/* This variant of sock_wfree() is used by TCP,
1658 * since it sets SOCK_USE_WRITE_QUEUE.
1659 */
1660void __sock_wfree(struct sk_buff *skb)
1661{
1662 struct sock *sk = skb->sk;
1663
1664 if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
1665 __sk_free(sk);
1666}
1667
Eric Dumazet9e17f8a2015-11-01 15:36:55 -08001668void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1669{
1670 skb_orphan(skb);
1671 skb->sk = sk;
1672#ifdef CONFIG_INET
1673 if (unlikely(!sk_fullsock(sk))) {
1674 skb->destructor = sock_edemux;
1675 sock_hold(sk);
1676 return;
1677 }
1678#endif
1679 skb->destructor = sock_wfree;
1680 skb_set_hash_from_sk(skb, sk);
1681 /*
1682 * We used to take a refcount on sk, but following operation
1683 * is enough to guarantee sk_free() wont free this sock until
1684 * all in-flight packets are completed
1685 */
1686 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1687}
1688EXPORT_SYMBOL(skb_set_owner_w);
1689
Eric Dumazet1d2077a2016-05-02 10:56:27 -07001690/* This helper is used by netem, as it can hold packets in its
1691 * delay queue. We want to allow the owner socket to send more
1692 * packets, as if they were already TX completed by a typical driver.
1693 * But we also want to keep skb->sk set because some packet schedulers
Eric Dumazet5d165da2017-05-11 15:24:41 -07001694 * rely on it (sch_fq for example).
Eric Dumazet1d2077a2016-05-02 10:56:27 -07001695 */
Eric Dumazetf2f872f2013-07-30 17:55:08 -07001696void skb_orphan_partial(struct sk_buff *skb)
1697{
Eric Dumazet5d165da2017-05-11 15:24:41 -07001698 if (skb_is_tcp_pure_ack(skb))
Eric Dumazet1d2077a2016-05-02 10:56:27 -07001699 return;
1700
Eric Dumazetf2f872f2013-07-30 17:55:08 -07001701 if (skb->destructor == sock_wfree
1702#ifdef CONFIG_INET
1703 || skb->destructor == tcp_wfree
1704#endif
1705 ) {
Eric Dumazet5d165da2017-05-11 15:24:41 -07001706 struct sock *sk = skb->sk;
1707
1708 if (atomic_inc_not_zero(&sk->sk_refcnt)) {
1709 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1710 skb->destructor = sock_efree;
1711 }
Eric Dumazetf2f872f2013-07-30 17:55:08 -07001712 } else {
1713 skb_orphan(skb);
1714 }
1715}
1716EXPORT_SYMBOL(skb_orphan_partial);
1717
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001718/*
1719 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 */
1721void sock_rfree(struct sk_buff *skb)
1722{
1723 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001724 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725
Eric Dumazetd361fd52010-07-10 22:45:17 +00001726 atomic_sub(len, &sk->sk_rmem_alloc);
1727 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728}
Eric Dumazet2a915252009-05-27 11:30:05 +00001729EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
Oliver Hartkopp7768eed2015-03-10 19:03:46 +01001731/*
1732 * Buffer destructor for skbs that are not used directly in read or write
1733 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1734 */
Alexander Duyck62bccb82014-09-04 13:31:35 -04001735void sock_efree(struct sk_buff *skb)
1736{
1737 sock_put(skb->sk);
1738}
1739EXPORT_SYMBOL(sock_efree);
1740
Eric W. Biederman976d02012012-05-23 17:16:53 -06001741kuid_t sock_i_uid(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742{
Eric W. Biederman976d02012012-05-23 17:16:53 -06001743 kuid_t uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
Eric Dumazetf064af12010-09-22 12:43:39 +00001745 read_lock_bh(&sk->sk_callback_lock);
Eric W. Biederman976d02012012-05-23 17:16:53 -06001746 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
Eric Dumazetf064af12010-09-22 12:43:39 +00001747 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 return uid;
1749}
Eric Dumazet2a915252009-05-27 11:30:05 +00001750EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
1752unsigned long sock_i_ino(struct sock *sk)
1753{
1754 unsigned long ino;
1755
Eric Dumazetf064af12010-09-22 12:43:39 +00001756 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001758 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 return ino;
1760}
Eric Dumazet2a915252009-05-27 11:30:05 +00001761EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762
1763/*
1764 * Allocate a skb from the socket's send buffer.
1765 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001766struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001767 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768{
1769 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001770 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 if (skb) {
1772 skb_set_owner_w(skb, sk);
1773 return skb;
1774 }
1775 }
1776 return NULL;
1777}
Eric Dumazet2a915252009-05-27 11:30:05 +00001778EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
1780/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001782 */
Al Virodd0fc662005-10-07 07:46:04 +01001783void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784{
Eric Dumazet95c96172012-04-15 05:58:06 +00001785 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1787 void *mem;
1788 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001789 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 */
1791 atomic_add(size, &sk->sk_omem_alloc);
1792 mem = kmalloc(size, priority);
1793 if (mem)
1794 return mem;
1795 atomic_sub(size, &sk->sk_omem_alloc);
1796 }
1797 return NULL;
1798}
Eric Dumazet2a915252009-05-27 11:30:05 +00001799EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800
Daniel Borkmann79e88652014-11-19 17:13:11 +01001801/* Free an option memory block. Note, we actually want the inline
1802 * here as this allows gcc to detect the nullify and fold away the
1803 * condition entirely.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 */
Daniel Borkmann79e88652014-11-19 17:13:11 +01001805static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
1806 const bool nullify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807{
David S. Millere53da5f2014-10-14 17:02:37 -04001808 if (WARN_ON_ONCE(!mem))
1809 return;
Daniel Borkmann79e88652014-11-19 17:13:11 +01001810 if (nullify)
1811 kzfree(mem);
1812 else
1813 kfree(mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 atomic_sub(size, &sk->sk_omem_alloc);
1815}
Daniel Borkmann79e88652014-11-19 17:13:11 +01001816
1817void sock_kfree_s(struct sock *sk, void *mem, int size)
1818{
1819 __sock_kfree_s(sk, mem, size, false);
1820}
Eric Dumazet2a915252009-05-27 11:30:05 +00001821EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
Daniel Borkmann79e88652014-11-19 17:13:11 +01001823void sock_kzfree_s(struct sock *sk, void *mem, int size)
1824{
1825 __sock_kfree_s(sk, mem, size, true);
1826}
1827EXPORT_SYMBOL(sock_kzfree_s);
1828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1830 I think, these locks should be removed for datagram sockets.
1831 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001832static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833{
1834 DEFINE_WAIT(wait);
1835
Eric Dumazet9cd3e072015-11-29 20:03:10 -08001836 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 for (;;) {
1838 if (!timeo)
1839 break;
1840 if (signal_pending(current))
1841 break;
1842 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001843 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1845 break;
1846 if (sk->sk_shutdown & SEND_SHUTDOWN)
1847 break;
1848 if (sk->sk_err)
1849 break;
1850 timeo = schedule_timeout(timeo);
1851 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001852 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 return timeo;
1854}
1855
1856
1857/*
1858 * Generic send/receive buffer handlers
1859 */
1860
Herbert Xu4cc7f682009-02-04 16:55:54 -08001861struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1862 unsigned long data_len, int noblock,
Eric Dumazet28d64272013-08-08 14:38:47 -07001863 int *errcode, int max_page_order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864{
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001865 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 long timeo;
1867 int err;
1868
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 timeo = sock_sndtimeo(sk, noblock);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001870 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 err = sock_error(sk);
1872 if (err != 0)
1873 goto failure;
1874
1875 err = -EPIPE;
1876 if (sk->sk_shutdown & SEND_SHUTDOWN)
1877 goto failure;
1878
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001879 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1880 break;
Eric Dumazet28d64272013-08-08 14:38:47 -07001881
Eric Dumazet9cd3e072015-11-29 20:03:10 -08001882 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001883 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1884 err = -EAGAIN;
1885 if (!timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 goto failure;
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001887 if (signal_pending(current))
1888 goto interrupted;
1889 timeo = sock_wait_for_wmem(sk, timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 }
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001891 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1892 errcode, sk->sk_allocation);
1893 if (skb)
1894 skb_set_owner_w(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 return skb;
1896
1897interrupted:
1898 err = sock_intr_errno(timeo);
1899failure:
1900 *errcode = err;
1901 return NULL;
1902}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001903EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001905struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 int noblock, int *errcode)
1907{
Eric Dumazet28d64272013-08-08 14:38:47 -07001908 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909}
Eric Dumazet2a915252009-05-27 11:30:05 +00001910EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911
Willem de Bruijn39771b12016-04-02 23:08:06 -04001912int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1913 struct sockcm_cookie *sockc)
1914{
Soheil Hassas Yeganeh3dd17e62016-04-02 23:08:09 -04001915 u32 tsflags;
1916
Willem de Bruijn39771b12016-04-02 23:08:06 -04001917 switch (cmsg->cmsg_type) {
1918 case SO_MARK:
1919 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1920 return -EPERM;
1921 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
1922 return -EINVAL;
1923 sockc->mark = *(u32 *)CMSG_DATA(cmsg);
1924 break;
Soheil Hassas Yeganeh3dd17e62016-04-02 23:08:09 -04001925 case SO_TIMESTAMPING:
1926 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
1927 return -EINVAL;
1928
1929 tsflags = *(u32 *)CMSG_DATA(cmsg);
1930 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
1931 return -EINVAL;
1932
1933 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
1934 sockc->tsflags |= tsflags;
1935 break;
Soheil Hassas Yeganeh779f1ed2016-07-11 16:51:26 -04001936 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
1937 case SCM_RIGHTS:
1938 case SCM_CREDENTIALS:
1939 break;
Willem de Bruijn39771b12016-04-02 23:08:06 -04001940 default:
1941 return -EINVAL;
1942 }
1943 return 0;
1944}
1945EXPORT_SYMBOL(__sock_cmsg_send);
1946
Edward Jeef28ea362015-10-08 14:56:48 -07001947int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1948 struct sockcm_cookie *sockc)
1949{
1950 struct cmsghdr *cmsg;
Willem de Bruijn39771b12016-04-02 23:08:06 -04001951 int ret;
Edward Jeef28ea362015-10-08 14:56:48 -07001952
1953 for_each_cmsghdr(cmsg, msg) {
1954 if (!CMSG_OK(msg, cmsg))
1955 return -EINVAL;
1956 if (cmsg->cmsg_level != SOL_SOCKET)
1957 continue;
Willem de Bruijn39771b12016-04-02 23:08:06 -04001958 ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
1959 if (ret)
1960 return ret;
Edward Jeef28ea362015-10-08 14:56:48 -07001961 }
1962 return 0;
1963}
1964EXPORT_SYMBOL(sock_cmsg_send);
1965
Eric Dumazet5640f762012-09-23 23:04:42 +00001966/* On 32bit arches, an skb frag is limited to 2^15 */
1967#define SKB_FRAG_PAGE_ORDER get_order(32768)
1968
Eric Dumazet400dfd32013-10-17 16:27:07 -07001969/**
1970 * skb_page_frag_refill - check that a page_frag contains enough room
1971 * @sz: minimum size of the fragment we want to get
1972 * @pfrag: pointer to page_frag
Eric Dumazet82d5e2b2014-09-08 04:00:00 -07001973 * @gfp: priority for memory allocation
Eric Dumazet400dfd32013-10-17 16:27:07 -07001974 *
1975 * Note: While this allocator tries to use high order pages, there is
1976 * no guarantee that allocations succeed. Therefore, @sz MUST be
1977 * less or equal than PAGE_SIZE.
1978 */
Eric Dumazetd9b29382014-08-27 20:49:34 -07001979bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
Eric Dumazet5640f762012-09-23 23:04:42 +00001980{
Eric Dumazet5640f762012-09-23 23:04:42 +00001981 if (pfrag->page) {
Joonsoo Kimfe896d12016-03-17 14:19:26 -07001982 if (page_ref_count(pfrag->page) == 1) {
Eric Dumazet5640f762012-09-23 23:04:42 +00001983 pfrag->offset = 0;
1984 return true;
1985 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07001986 if (pfrag->offset + sz <= pfrag->size)
Eric Dumazet5640f762012-09-23 23:04:42 +00001987 return true;
1988 put_page(pfrag->page);
1989 }
1990
Eric Dumazetd9b29382014-08-27 20:49:34 -07001991 pfrag->offset = 0;
1992 if (SKB_FRAG_PAGE_ORDER) {
Mel Gormand0164ad2015-11-06 16:28:21 -08001993 /* Avoid direct reclaim but allow kswapd to wake */
1994 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
1995 __GFP_COMP | __GFP_NOWARN |
1996 __GFP_NORETRY,
Eric Dumazetd9b29382014-08-27 20:49:34 -07001997 SKB_FRAG_PAGE_ORDER);
Eric Dumazet5640f762012-09-23 23:04:42 +00001998 if (likely(pfrag->page)) {
Eric Dumazetd9b29382014-08-27 20:49:34 -07001999 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
Eric Dumazet5640f762012-09-23 23:04:42 +00002000 return true;
2001 }
Eric Dumazetd9b29382014-08-27 20:49:34 -07002002 }
2003 pfrag->page = alloc_page(gfp);
2004 if (likely(pfrag->page)) {
2005 pfrag->size = PAGE_SIZE;
2006 return true;
2007 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07002008 return false;
2009}
2010EXPORT_SYMBOL(skb_page_frag_refill);
2011
2012bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2013{
2014 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2015 return true;
2016
Eric Dumazet5640f762012-09-23 23:04:42 +00002017 sk_enter_memory_pressure(sk);
2018 sk_stream_moderate_sndbuf(sk);
2019 return false;
2020}
2021EXPORT_SYMBOL(sk_page_frag_refill);
2022
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00002024 __releases(&sk->sk_lock.slock)
2025 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026{
2027 DEFINE_WAIT(wait);
2028
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002029 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2031 TASK_UNINTERRUPTIBLE);
2032 spin_unlock_bh(&sk->sk_lock.slock);
2033 schedule();
2034 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002035 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 break;
2037 }
2038 finish_wait(&sk->sk_lock.wq, &wait);
2039}
2040
2041static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00002042 __releases(&sk->sk_lock.slock)
2043 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044{
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002045 struct sk_buff *skb, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002047 while ((skb = sk->sk_backlog.head) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002049
2050 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051
2052 do {
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002053 next = skb->next;
Eric Dumazete4cbb022012-04-30 16:07:09 +00002054 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00002055 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07002057 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002059 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060
2061 skb = next;
2062 } while (skb != NULL);
2063
Eric Dumazet5413d1b2016-04-29 14:16:52 -07002064 spin_lock_bh(&sk->sk_lock.slock);
2065 }
Zhu Yi8eae9392010-03-04 18:01:40 +00002066
2067 /*
2068 * Doing the zeroing here guarantee we can not loop forever
2069 * while a wild producer attempts to flood us.
2070 */
2071 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072}
2073
Eric Dumazetd41a69f2016-04-29 14:16:53 -07002074void __sk_flush_backlog(struct sock *sk)
2075{
2076 spin_lock_bh(&sk->sk_lock.slock);
2077 __release_sock(sk);
2078 spin_unlock_bh(&sk->sk_lock.slock);
2079}
2080
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081/**
2082 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07002083 * @sk: sock to wait on
2084 * @timeo: for how long
Sabrina Dubrocadfbafc92015-07-24 18:19:25 +02002085 * @skb: last skb seen on sk_receive_queue
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 *
2087 * Now socket state including sk->sk_err is changed only under lock,
2088 * hence we may omit checks after joining wait queue.
2089 * We check receive queue before schedule() only as optimization;
2090 * it is very likely that release_sock() added new data.
2091 */
Sabrina Dubrocadfbafc92015-07-24 18:19:25 +02002092int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093{
2094 int rc;
2095 DEFINE_WAIT(wait);
2096
Eric Dumazetaa395142010-04-20 13:03:51 +00002097 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Eric Dumazet9cd3e072015-11-29 20:03:10 -08002098 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
Sabrina Dubrocadfbafc92015-07-24 18:19:25 +02002099 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
Eric Dumazet9cd3e072015-11-29 20:03:10 -08002100 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
Eric Dumazetaa395142010-04-20 13:03:51 +00002101 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 return rc;
2103}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104EXPORT_SYMBOL(sk_wait_data);
2105
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002106/**
2107 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2108 * @sk: socket
2109 * @size: memory size to allocate
2110 * @kind: allocation type
2111 *
2112 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2113 * rmem allocation. This function assumes that protocols which have
2114 * memory_pressure use sk_wmem_queued as write buffer accounting.
2115 */
2116int __sk_mem_schedule(struct sock *sk, int size, int kind)
2117{
2118 struct proto *prot = sk->sk_prot;
2119 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00002120 long allocated;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002121
2122 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002123
Johannes Weinere8056052016-01-14 15:21:14 -08002124 allocated = sk_memory_allocated_add(sk, amt);
2125
Johannes Weinerbaac50b2016-01-14 15:21:17 -08002126 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
2127 !mem_cgroup_charge_skmem(sk->sk_memcg, amt))
Johannes Weinere8056052016-01-14 15:21:14 -08002128 goto suppress_allocation;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002129
2130 /* Under limit. */
Johannes Weinere8056052016-01-14 15:21:14 -08002131 if (allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00002132 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002133 return 1;
2134 }
2135
Johannes Weinere8056052016-01-14 15:21:14 -08002136 /* Under pressure. */
2137 if (allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00002138 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002139
Johannes Weinere8056052016-01-14 15:21:14 -08002140 /* Over hard limit. */
2141 if (allocated > sk_prot_mem_limits(sk, 2))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002142 goto suppress_allocation;
2143
2144 /* guarantee minimum buffer size under pressure */
2145 if (kind == SK_MEM_RECV) {
2146 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2147 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002148
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002149 } else { /* SK_MEM_SEND */
2150 if (sk->sk_type == SOCK_STREAM) {
2151 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2152 return 1;
2153 } else if (atomic_read(&sk->sk_wmem_alloc) <
2154 prot->sysctl_wmem[0])
2155 return 1;
2156 }
2157
Glauber Costa180d8cd2011-12-11 21:47:02 +00002158 if (sk_has_memory_pressure(sk)) {
Eric Dumazet9d3fcde2019-02-12 12:26:27 -08002159 u64 alloc;
Eric Dumazet17483762008-11-25 21:16:35 -08002160
Glauber Costa180d8cd2011-12-11 21:47:02 +00002161 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08002162 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002163 alloc = sk_sockets_allocated_read_positive(sk);
2164 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002165 sk_mem_pages(sk->sk_wmem_queued +
2166 atomic_read(&sk->sk_rmem_alloc) +
2167 sk->sk_forward_alloc))
2168 return 1;
2169 }
2170
2171suppress_allocation:
2172
2173 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2174 sk_stream_moderate_sndbuf(sk);
2175
2176 /* Fail only if socket is _under_ its sndbuf.
2177 * In this case we cannot block, so that we have to fail.
2178 */
2179 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2180 return 1;
2181 }
2182
Satoru Moriya3847ce32011-06-17 12:00:03 +00002183 trace_sock_exceed_buf_limit(sk, prot, allocated);
2184
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002185 /* Alas. Undo changes. */
2186 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002187
Glauber Costa0e90b312012-01-20 04:57:16 +00002188 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00002189
Johannes Weinerbaac50b2016-01-14 15:21:17 -08002190 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2191 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
Johannes Weinere8056052016-01-14 15:21:14 -08002192
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002193 return 0;
2194}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002195EXPORT_SYMBOL(__sk_mem_schedule);
2196
2197/**
Jean Sacren69dba9b2015-08-27 18:05:49 -06002198 * __sk_mem_reclaim - reclaim memory_allocated
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002199 * @sk: socket
Eric Dumazet1a24e042015-05-15 12:39:25 -07002200 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002201 */
Eric Dumazet1a24e042015-05-15 12:39:25 -07002202void __sk_mem_reclaim(struct sock *sk, int amount)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002203{
Eric Dumazet1a24e042015-05-15 12:39:25 -07002204 amount >>= SK_MEM_QUANTUM_SHIFT;
2205 sk_memory_allocated_sub(sk, amount);
2206 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002207
Johannes Weinerbaac50b2016-01-14 15:21:17 -08002208 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2209 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
Johannes Weinere8056052016-01-14 15:21:14 -08002210
Glauber Costa180d8cd2011-12-11 21:47:02 +00002211 if (sk_under_memory_pressure(sk) &&
2212 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2213 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002214}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002215EXPORT_SYMBOL(__sk_mem_reclaim);
2216
samanthakumar627d2d62016-04-05 12:41:16 -04002217int sk_set_peek_off(struct sock *sk, int val)
2218{
2219 if (val < 0)
2220 return -EINVAL;
2221
2222 sk->sk_peek_off = val;
2223 return 0;
2224}
2225EXPORT_SYMBOL_GPL(sk_set_peek_off);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002226
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227/*
2228 * Set of default routines for initialising struct proto_ops when
2229 * the protocol does not support a particular function. In certain
2230 * cases where it makes no sense for a protocol to have a "do nothing"
2231 * function, some default processing is provided.
2232 */
2233
2234int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2235{
2236 return -EOPNOTSUPP;
2237}
Eric Dumazet2a915252009-05-27 11:30:05 +00002238EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002240int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 int len, int flags)
2242{
2243 return -EOPNOTSUPP;
2244}
Eric Dumazet2a915252009-05-27 11:30:05 +00002245EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246
2247int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2248{
2249 return -EOPNOTSUPP;
2250}
Eric Dumazet2a915252009-05-27 11:30:05 +00002251EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252
2253int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2254{
2255 return -EOPNOTSUPP;
2256}
Eric Dumazet2a915252009-05-27 11:30:05 +00002257EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002259int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 int *len, int peer)
2261{
2262 return -EOPNOTSUPP;
2263}
Eric Dumazet2a915252009-05-27 11:30:05 +00002264EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265
Eric Dumazet2a915252009-05-27 11:30:05 +00002266unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267{
2268 return 0;
2269}
Eric Dumazet2a915252009-05-27 11:30:05 +00002270EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
2272int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2273{
2274 return -EOPNOTSUPP;
2275}
Eric Dumazet2a915252009-05-27 11:30:05 +00002276EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277
2278int sock_no_listen(struct socket *sock, int backlog)
2279{
2280 return -EOPNOTSUPP;
2281}
Eric Dumazet2a915252009-05-27 11:30:05 +00002282EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283
2284int sock_no_shutdown(struct socket *sock, int how)
2285{
2286 return -EOPNOTSUPP;
2287}
Eric Dumazet2a915252009-05-27 11:30:05 +00002288EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289
2290int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002291 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292{
2293 return -EOPNOTSUPP;
2294}
Eric Dumazet2a915252009-05-27 11:30:05 +00002295EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
2297int sock_no_getsockopt(struct socket *sock, int level, int optname,
2298 char __user *optval, int __user *optlen)
2299{
2300 return -EOPNOTSUPP;
2301}
Eric Dumazet2a915252009-05-27 11:30:05 +00002302EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
Ying Xue1b784142015-03-02 15:37:48 +08002304int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305{
2306 return -EOPNOTSUPP;
2307}
Eric Dumazet2a915252009-05-27 11:30:05 +00002308EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309
Ying Xue1b784142015-03-02 15:37:48 +08002310int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2311 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312{
2313 return -EOPNOTSUPP;
2314}
Eric Dumazet2a915252009-05-27 11:30:05 +00002315EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316
2317int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2318{
2319 /* Mirror missing mmap method error code */
2320 return -ENODEV;
2321}
Eric Dumazet2a915252009-05-27 11:30:05 +00002322EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323
2324ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2325{
2326 ssize_t res;
2327 struct msghdr msg = {.msg_flags = flags};
2328 struct kvec iov;
2329 char *kaddr = kmap(page);
2330 iov.iov_base = kaddr + offset;
2331 iov.iov_len = size;
2332 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2333 kunmap(page);
2334 return res;
2335}
Eric Dumazet2a915252009-05-27 11:30:05 +00002336EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337
2338/*
2339 * Default Socket Callbacks
2340 */
2341
2342static void sock_def_wakeup(struct sock *sk)
2343{
Eric Dumazet43815482010-04-29 11:01:49 +00002344 struct socket_wq *wq;
2345
2346 rcu_read_lock();
2347 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002348 if (skwq_has_sleeper(wq))
Eric Dumazet43815482010-04-29 11:01:49 +00002349 wake_up_interruptible_all(&wq->wait);
2350 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351}
2352
2353static void sock_def_error_report(struct sock *sk)
2354{
Eric Dumazet43815482010-04-29 11:01:49 +00002355 struct socket_wq *wq;
2356
2357 rcu_read_lock();
2358 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002359 if (skwq_has_sleeper(wq))
Eric Dumazet43815482010-04-29 11:01:49 +00002360 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002361 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002362 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363}
2364
David S. Miller676d2362014-04-11 16:15:36 -04002365static void sock_def_readable(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366{
Eric Dumazet43815482010-04-29 11:01:49 +00002367 struct socket_wq *wq;
2368
2369 rcu_read_lock();
2370 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002371 if (skwq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002372 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002373 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002374 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002375 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376}
2377
2378static void sock_def_write_space(struct sock *sk)
2379{
Eric Dumazet43815482010-04-29 11:01:49 +00002380 struct socket_wq *wq;
2381
2382 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383
2384 /* Do not wake up a writer until he can make "significant"
2385 * progress. --DaveM
2386 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002387 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002388 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002389 if (skwq_has_sleeper(wq))
Eric Dumazet43815482010-04-29 11:01:49 +00002390 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002391 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392
2393 /* Should agree with poll, otherwise some programs break */
2394 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002395 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 }
2397
Eric Dumazet43815482010-04-29 11:01:49 +00002398 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399}
2400
2401static void sock_def_destruct(struct sock *sk)
2402{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403}
2404
2405void sk_send_sigurg(struct sock *sk)
2406{
2407 if (sk->sk_socket && sk->sk_socket->file)
2408 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002409 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410}
Eric Dumazet2a915252009-05-27 11:30:05 +00002411EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412
2413void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2414 unsigned long expires)
2415{
2416 if (!mod_timer(timer, expires))
2417 sock_hold(sk);
2418}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419EXPORT_SYMBOL(sk_reset_timer);
2420
2421void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2422{
Ying Xue25cc4ae2013-02-03 20:32:57 +00002423 if (del_timer(timer))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 __sock_put(sk);
2425}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426EXPORT_SYMBOL(sk_stop_timer);
2427
2428void sock_init_data(struct socket *sock, struct sock *sk)
2429{
2430 skb_queue_head_init(&sk->sk_receive_queue);
2431 skb_queue_head_init(&sk->sk_write_queue);
2432 skb_queue_head_init(&sk->sk_error_queue);
2433
2434 sk->sk_send_head = NULL;
2435
2436 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002437
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 sk->sk_allocation = GFP_KERNEL;
2439 sk->sk_rcvbuf = sysctl_rmem_default;
2440 sk->sk_sndbuf = sysctl_wmem_default;
2441 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002442 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443
2444 sock_set_flag(sk, SOCK_ZAPPED);
2445
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002446 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002448 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 sock->sk = sk;
2450 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002451 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07002454 lockdep_set_class_and_name(&sk->sk_callback_lock,
2455 af_callback_keys + sk->sk_family,
2456 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457
2458 sk->sk_state_change = sock_def_wakeup;
2459 sk->sk_data_ready = sock_def_readable;
2460 sk->sk_write_space = sock_def_write_space;
2461 sk->sk_error_report = sock_def_error_report;
2462 sk->sk_destruct = sock_def_destruct;
2463
Eric Dumazet5640f762012-09-23 23:04:42 +00002464 sk->sk_frag.page = NULL;
2465 sk->sk_frag.offset = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002466 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002468 sk->sk_peer_pid = NULL;
2469 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 sk->sk_write_pending = 0;
2471 sk->sk_rcvlowat = 1;
2472 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2473 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2474
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002475 sk->sk_stamp = ktime_set(-1L, 0);
Deepa Dinamani7abb7f72018-12-27 18:55:09 -08002476#if BITS_PER_LONG==32
2477 seqlock_init(&sk->sk_stamp_seq);
2478#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479
Cong Wange0d10952013-08-01 11:10:25 +08002480#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir06021292013-06-10 11:39:50 +03002481 sk->sk_napi_id = 0;
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03002482 sk->sk_ll_usec = sysctl_net_busy_read;
Eliezer Tamir06021292013-06-10 11:39:50 +03002483#endif
2484
Eric Dumazet62748f32013-09-24 08:20:52 -07002485 sk->sk_max_pacing_rate = ~0U;
Eric Dumazet7eec4172013-10-08 15:16:00 -07002486 sk->sk_pacing_rate = ~0U;
Eric Dumazet70da2682015-10-08 19:33:21 -07002487 sk->sk_incoming_cpu = -1;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002488 /*
2489 * Before updating sk_refcnt, we must commit prior changes to memory
2490 * (Documentation/RCU/rculist_nulls.txt for details)
2491 */
2492 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002494 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495}
Eric Dumazet2a915252009-05-27 11:30:05 +00002496EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002498void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499{
2500 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002501 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002502 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002504 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002505 spin_unlock(&sk->sk_lock.slock);
2506 /*
2507 * The sk_lock has mutex_lock() semantics here:
2508 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002509 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002510 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002512EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002514void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002516 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 if (sk->sk_backlog.tail)
2518 __release_sock(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002519
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002520 /* Warning : release_cb() might need to release sk ownership,
2521 * ie call sock_release_ownership(sk) before us.
2522 */
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002523 if (sk->sk_prot->release_cb)
2524 sk->sk_prot->release_cb(sk);
2525
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002526 sock_release_ownership(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002527 if (waitqueue_active(&sk->sk_lock.wq))
2528 wake_up(&sk->sk_lock.wq);
2529 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530}
2531EXPORT_SYMBOL(release_sock);
2532
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002533/**
2534 * lock_sock_fast - fast version of lock_sock
2535 * @sk: socket
2536 *
2537 * This version should be used for very small section, where process wont block
2538 * return false if fast path is taken
2539 * sk_lock.slock locked, owned = 0, BH disabled
2540 * return true if slow path is taken
2541 * sk_lock.slock unlocked, owned = 1, BH enabled
2542 */
2543bool lock_sock_fast(struct sock *sk)
2544{
2545 might_sleep();
2546 spin_lock_bh(&sk->sk_lock.slock);
2547
2548 if (!sk->sk_lock.owned)
2549 /*
2550 * Note : We must disable BH
2551 */
2552 return false;
2553
2554 __lock_sock(sk);
2555 sk->sk_lock.owned = 1;
2556 spin_unlock(&sk->sk_lock.slock);
2557 /*
2558 * The sk_lock has mutex_lock() semantics here:
2559 */
2560 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2561 local_bh_enable();
2562 return true;
2563}
2564EXPORT_SYMBOL(lock_sock_fast);
2565
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002567{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002568 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002570 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002571 tv = ktime_to_timeval(sk->sk_stamp);
2572 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002574 if (tv.tv_sec == 0) {
2575 sk->sk_stamp = ktime_get_real();
2576 tv = ktime_to_timeval(sk->sk_stamp);
2577 }
2578 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002579}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580EXPORT_SYMBOL(sock_get_timestamp);
2581
Eric Dumazetae40eb12007-03-18 17:33:16 -07002582int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2583{
2584 struct timespec ts;
2585 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002586 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002587 ts = ktime_to_timespec(sk->sk_stamp);
2588 if (ts.tv_sec == -1)
2589 return -ENOENT;
2590 if (ts.tv_sec == 0) {
2591 sk->sk_stamp = ktime_get_real();
2592 ts = ktime_to_timespec(sk->sk_stamp);
2593 }
2594 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2595}
2596EXPORT_SYMBOL(sock_get_timestampns);
2597
Patrick Ohly20d49472009-02-12 05:03:38 +00002598void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002599{
Patrick Ohly20d49472009-02-12 05:03:38 +00002600 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002601 unsigned long previous_flags = sk->sk_flags;
2602
Patrick Ohly20d49472009-02-12 05:03:38 +00002603 sock_set_flag(sk, flag);
2604 /*
2605 * we just set one of the two flags which require net
2606 * time stamping, but time stamping might have been on
2607 * already because of the other one
2608 */
Hannes Frederic Sowa080a2702015-10-26 13:51:37 +01002609 if (sock_needs_netstamp(sk) &&
2610 !(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002611 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 }
2613}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
Richard Cochrancb820f82013-07-19 19:40:09 +02002615int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2616 int level, int type)
2617{
2618 struct sock_exterr_skb *serr;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002619 struct sk_buff *skb;
Richard Cochrancb820f82013-07-19 19:40:09 +02002620 int copied, err;
2621
2622 err = -EAGAIN;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002623 skb = sock_dequeue_err_skb(sk);
Richard Cochrancb820f82013-07-19 19:40:09 +02002624 if (skb == NULL)
2625 goto out;
2626
2627 copied = skb->len;
2628 if (copied > len) {
2629 msg->msg_flags |= MSG_TRUNC;
2630 copied = len;
2631 }
David S. Miller51f3d022014-11-05 16:46:40 -05002632 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Richard Cochrancb820f82013-07-19 19:40:09 +02002633 if (err)
2634 goto out_free_skb;
2635
2636 sock_recv_timestamp(msg, sk, skb);
2637
2638 serr = SKB_EXT_ERR(skb);
2639 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2640
2641 msg->msg_flags |= MSG_ERRQUEUE;
2642 err = copied;
2643
Richard Cochrancb820f82013-07-19 19:40:09 +02002644out_free_skb:
2645 kfree_skb(skb);
2646out:
2647 return err;
2648}
2649EXPORT_SYMBOL(sock_recv_errqueue);
2650
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651/*
2652 * Get a socket option on an socket.
2653 *
2654 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2655 * asynchronous errors should be reported by getsockopt. We assume
2656 * this means if you specify SO_ERROR (otherwise whats the point of it).
2657 */
2658int sock_common_getsockopt(struct socket *sock, int level, int optname,
2659 char __user *optval, int __user *optlen)
2660{
2661 struct sock *sk = sock->sk;
2662
2663 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2664}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665EXPORT_SYMBOL(sock_common_getsockopt);
2666
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002667#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002668int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2669 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002670{
2671 struct sock *sk = sock->sk;
2672
Johannes Berg1e51f952007-03-06 13:44:06 -08002673 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002674 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2675 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002676 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2677}
2678EXPORT_SYMBOL(compat_sock_common_getsockopt);
2679#endif
2680
Ying Xue1b784142015-03-02 15:37:48 +08002681int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2682 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683{
2684 struct sock *sk = sock->sk;
2685 int addr_len = 0;
2686 int err;
2687
Ying Xue1b784142015-03-02 15:37:48 +08002688 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 flags & ~MSG_DONTWAIT, &addr_len);
2690 if (err >= 0)
2691 msg->msg_namelen = addr_len;
2692 return err;
2693}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694EXPORT_SYMBOL(sock_common_recvmsg);
2695
2696/*
2697 * Set socket options on an inet socket.
2698 */
2699int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002700 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701{
2702 struct sock *sk = sock->sk;
2703
2704 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2705}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706EXPORT_SYMBOL(sock_common_setsockopt);
2707
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002708#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002709int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002710 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002711{
2712 struct sock *sk = sock->sk;
2713
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002714 if (sk->sk_prot->compat_setsockopt != NULL)
2715 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2716 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002717 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2718}
2719EXPORT_SYMBOL(compat_sock_common_setsockopt);
2720#endif
2721
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722void sk_common_release(struct sock *sk)
2723{
2724 if (sk->sk_prot->destroy)
2725 sk->sk_prot->destroy(sk);
2726
2727 /*
2728 * Observation: when sock_common_release is called, processes have
2729 * no access to socket. But net still has.
2730 * Step one, detach it from networking:
2731 *
2732 * A. Remove from hash tables.
2733 */
2734
2735 sk->sk_prot->unhash(sk);
2736
2737 /*
2738 * In this point socket cannot receive new packets, but it is possible
2739 * that some packets are in flight because some CPU runs receiver and
2740 * did hash table lookup before we unhashed socket. They will achieve
2741 * receive queue and will be purged by socket destructor.
2742 *
2743 * Also we still have packets pending on receive queue and probably,
2744 * our own packets waiting in device queues. sock_destroy will drain
2745 * receive queue, but transmitted packets will delay socket destruction
2746 * until the last reference will be released.
2747 */
2748
2749 sock_orphan(sk);
2750
2751 xfrm_sk_free_policy(sk);
2752
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002753 sk_refcnt_debug_release(sk);
Eric Dumazet5640f762012-09-23 23:04:42 +00002754
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 sock_put(sk);
2756}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757EXPORT_SYMBOL(sk_common_release);
2758
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002759#ifdef CONFIG_PROC_FS
2760#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002761struct prot_inuse {
2762 int val[PROTO_INUSE_NR];
2763};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002764
2765static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002766
2767#ifdef CONFIG_NET_NS
2768void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2769{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002770 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002771}
2772EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2773
2774int sock_prot_inuse_get(struct net *net, struct proto *prot)
2775{
2776 int cpu, idx = prot->inuse_idx;
2777 int res = 0;
2778
2779 for_each_possible_cpu(cpu)
2780 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2781
2782 return res >= 0 ? res : 0;
2783}
2784EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2785
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002786static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002787{
2788 net->core.inuse = alloc_percpu(struct prot_inuse);
2789 return net->core.inuse ? 0 : -ENOMEM;
2790}
2791
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002792static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002793{
2794 free_percpu(net->core.inuse);
2795}
2796
2797static struct pernet_operations net_inuse_ops = {
2798 .init = sock_inuse_init_net,
2799 .exit = sock_inuse_exit_net,
2800};
2801
2802static __init int net_inuse_init(void)
2803{
2804 if (register_pernet_subsys(&net_inuse_ops))
2805 panic("Cannot initialize net inuse counters");
2806
2807 return 0;
2808}
2809
2810core_initcall(net_inuse_init);
2811#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002812static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2813
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002814void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002815{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002816 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002817}
2818EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2819
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002820int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002821{
2822 int cpu, idx = prot->inuse_idx;
2823 int res = 0;
2824
2825 for_each_possible_cpu(cpu)
2826 res += per_cpu(prot_inuse, cpu).val[idx];
2827
2828 return res >= 0 ? res : 0;
2829}
2830EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002831#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002832
2833static void assign_proto_idx(struct proto *prot)
2834{
2835 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2836
2837 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00002838 pr_err("PROTO_INUSE_NR exhausted\n");
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002839 return;
2840 }
2841
2842 set_bit(prot->inuse_idx, proto_inuse_idx);
2843}
2844
2845static void release_proto_idx(struct proto *prot)
2846{
2847 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2848 clear_bit(prot->inuse_idx, proto_inuse_idx);
2849}
2850#else
2851static inline void assign_proto_idx(struct proto *prot)
2852{
2853}
2854
2855static inline void release_proto_idx(struct proto *prot)
2856{
2857}
2858#endif
2859
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002860static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
2861{
2862 if (!rsk_prot)
2863 return;
2864 kfree(rsk_prot->slab_name);
2865 rsk_prot->slab_name = NULL;
Julia Lawalladf78ed2015-09-13 14:15:18 +02002866 kmem_cache_destroy(rsk_prot->slab);
2867 rsk_prot->slab = NULL;
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002868}
2869
2870static int req_prot_init(const struct proto *prot)
2871{
2872 struct request_sock_ops *rsk_prot = prot->rsk_prot;
2873
2874 if (!rsk_prot)
2875 return 0;
2876
2877 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
2878 prot->name);
2879 if (!rsk_prot->slab_name)
2880 return -ENOMEM;
2881
2882 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
2883 rsk_prot->obj_size, 0,
Eric Dumazete96f78a2015-10-03 06:27:28 -07002884 prot->slab_flags, NULL);
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002885
2886 if (!rsk_prot->slab) {
2887 pr_crit("%s: Can't create request sock SLAB cache!\n",
2888 prot->name);
2889 return -ENOMEM;
2890 }
2891 return 0;
2892}
2893
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894int proto_register(struct proto *prot, int alloc_slab)
2895{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 if (alloc_slab) {
2897 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002898 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2899 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900
2901 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002902 pr_crit("%s: Can't create sock SLAB cache!\n",
2903 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002904 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002906
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002907 if (req_prot_init(prot))
2908 goto out_free_request_sock_slab;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002909
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002910 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002911 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002912
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002913 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002914 goto out_free_request_sock_slab;
2915
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002916 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002917 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002918 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002919 0,
Eric Dumazet52db70d2015-04-10 06:07:18 -07002920 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002921 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002922 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002923 goto out_free_timewait_sock_slab_name;
2924 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 }
2926
Glauber Costa36b77a52011-12-16 00:51:59 +00002927 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002929 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002930 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002931 return 0;
2932
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002933out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002934 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002935out_free_request_sock_slab:
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002936 req_prot_cleanup(prot->rsk_prot);
2937
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002938 kmem_cache_destroy(prot->slab);
2939 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002940out:
2941 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943EXPORT_SYMBOL(proto_register);
2944
2945void proto_unregister(struct proto *prot)
2946{
Glauber Costa36b77a52011-12-16 00:51:59 +00002947 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002948 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002949 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002950 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951
Julia Lawalladf78ed2015-09-13 14:15:18 +02002952 kmem_cache_destroy(prot->slab);
2953 prot->slab = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954
Eric Dumazet0159dfd2015-03-12 16:44:07 -07002955 req_prot_cleanup(prot->rsk_prot);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002956
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002957 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002958 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002959 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002960 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002961 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963EXPORT_SYMBOL(proto_unregister);
2964
2965#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002967 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968{
Glauber Costa36b77a52011-12-16 00:51:59 +00002969 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002970 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971}
2972
2973static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2974{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002975 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976}
2977
2978static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002979 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980{
Glauber Costa36b77a52011-12-16 00:51:59 +00002981 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982}
2983
2984static char proto_method_implemented(const void *method)
2985{
2986 return method == NULL ? 'n' : 'y';
2987}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002988static long sock_prot_memory_allocated(struct proto *proto)
2989{
Jeffrin Josecb75a362012-04-25 19:17:29 +05302990 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002991}
2992
2993static char *sock_prot_memory_pressure(struct proto *proto)
2994{
2995 return proto->memory_pressure != NULL ?
2996 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2997}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998
2999static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3000{
Glauber Costa180d8cd2011-12-11 21:47:02 +00003001
Eric Dumazet8d987e52010-11-09 23:24:26 +00003002 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3004 proto->name,
3005 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08003006 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00003007 sock_prot_memory_allocated(proto),
3008 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 proto->max_header,
3010 proto->slab == NULL ? "no" : "yes",
3011 module_name(proto->owner),
3012 proto_method_implemented(proto->close),
3013 proto_method_implemented(proto->connect),
3014 proto_method_implemented(proto->disconnect),
3015 proto_method_implemented(proto->accept),
3016 proto_method_implemented(proto->ioctl),
3017 proto_method_implemented(proto->init),
3018 proto_method_implemented(proto->destroy),
3019 proto_method_implemented(proto->shutdown),
3020 proto_method_implemented(proto->setsockopt),
3021 proto_method_implemented(proto->getsockopt),
3022 proto_method_implemented(proto->sendmsg),
3023 proto_method_implemented(proto->recvmsg),
3024 proto_method_implemented(proto->sendpage),
3025 proto_method_implemented(proto->bind),
3026 proto_method_implemented(proto->backlog_rcv),
3027 proto_method_implemented(proto->hash),
3028 proto_method_implemented(proto->unhash),
3029 proto_method_implemented(proto->get_port),
3030 proto_method_implemented(proto->enter_memory_pressure));
3031}
3032
3033static int proto_seq_show(struct seq_file *seq, void *v)
3034{
Pavel Emelianov60f04382007-07-09 13:15:14 -07003035 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3037 "protocol",
3038 "size",
3039 "sockets",
3040 "memory",
3041 "press",
3042 "maxhdr",
3043 "slab",
3044 "module",
3045 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3046 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07003047 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048 return 0;
3049}
3050
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003051static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 .start = proto_seq_start,
3053 .next = proto_seq_next,
3054 .stop = proto_seq_stop,
3055 .show = proto_seq_show,
3056};
3057
3058static int proto_seq_open(struct inode *inode, struct file *file)
3059{
Eric Dumazet14e943d2008-11-19 15:14:01 -08003060 return seq_open_net(inode, file, &proto_seq_ops,
3061 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062}
3063
Arjan van de Ven9a321442007-02-12 00:55:35 -08003064static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 .owner = THIS_MODULE,
3066 .open = proto_seq_open,
3067 .read = seq_read,
3068 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08003069 .release = seq_release_net,
3070};
3071
3072static __net_init int proto_init_net(struct net *net)
3073{
Gao fengd4beaa62013-02-18 01:34:54 +00003074 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
Eric Dumazet14e943d2008-11-19 15:14:01 -08003075 return -ENOMEM;
3076
3077 return 0;
3078}
3079
3080static __net_exit void proto_exit_net(struct net *net)
3081{
Gao fengece31ff2013-02-18 01:34:56 +00003082 remove_proc_entry("protocols", net->proc_net);
Eric Dumazet14e943d2008-11-19 15:14:01 -08003083}
3084
3085
3086static __net_initdata struct pernet_operations proto_net_ops = {
3087 .init = proto_init_net,
3088 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089};
3090
3091static int __init proto_init(void)
3092{
Eric Dumazet14e943d2008-11-19 15:14:01 -08003093 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094}
3095
3096subsys_initcall(proto_init);
3097
3098#endif /* PROC_FS */