blob: 71e3e5f1eaa04816b8bdd1d34b4fd575f19793b9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Joe Perchese005d192012-05-16 19:58:40 +000092#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
Richard Cochrancb820f82013-07-19 19:40:09 +020096#include <linux/errqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <linux/types.h>
98#include <linux/socket.h>
99#include <linux/in.h>
100#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <linux/module.h>
102#include <linux/proc_fs.h>
103#include <linux/seq_file.h>
104#include <linux/sched.h>
105#include <linux/timer.h>
106#include <linux/string.h>
107#include <linux/sockios.h>
108#include <linux/net.h>
109#include <linux/mm.h>
110#include <linux/slab.h>
111#include <linux/interrupt.h>
112#include <linux/poll.h>
113#include <linux/tcp.h>
114#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400115#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000116#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100117#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800118#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400119#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123#include <linux/netdevice.h>
124#include <net/protocol.h>
125#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200126#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700127#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000129#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#include <net/xfrm.h>
131#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700132#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000133#include <net/netprio_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135#include <linux/filter.h>
136
Satoru Moriya3847ce32011-06-17 12:00:03 +0000137#include <trace/events/sock.h>
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139#ifdef CONFIG_INET
140#include <net/tcp.h>
141#endif
142
Eliezer Tamir076bb0c2013-07-10 17:13:17 +0300143#include <net/busy_poll.h>
Eliezer Tamir06021292013-06-10 11:39:50 +0300144
Glauber Costa36b77a52011-12-16 00:51:59 +0000145static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000146static LIST_HEAD(proto_list);
147
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700148/**
149 * sk_ns_capable - General socket capability test
150 * @sk: Socket to use a capability on or through
151 * @user_ns: The user namespace of the capability to use
152 * @cap: The capability to use
153 *
154 * Test to see if the opener of the socket had when the socket was
155 * created and the current process has the capability @cap in the user
156 * namespace @user_ns.
157 */
158bool sk_ns_capable(const struct sock *sk,
159 struct user_namespace *user_ns, int cap)
160{
161 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
162 ns_capable(user_ns, cap);
163}
164EXPORT_SYMBOL(sk_ns_capable);
165
166/**
167 * sk_capable - Socket global capability test
168 * @sk: Socket to use a capability on or through
Masanari Iidae793c0f2014-09-04 23:44:36 +0900169 * @cap: The global capability to use
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700170 *
171 * Test to see if the opener of the socket had when the socket was
172 * created and the current process has the capability @cap in all user
173 * namespaces.
174 */
175bool sk_capable(const struct sock *sk, int cap)
176{
177 return sk_ns_capable(sk, &init_user_ns, cap);
178}
179EXPORT_SYMBOL(sk_capable);
180
181/**
182 * sk_net_capable - Network namespace socket capability test
183 * @sk: Socket to use a capability on or through
184 * @cap: The capability to use
185 *
Masanari Iidae793c0f2014-09-04 23:44:36 +0900186 * Test to see if the opener of the socket had when the socket was created
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700187 * and the current process has the capability @cap over the network namespace
188 * the socket is a member of.
189 */
190bool sk_net_capable(const struct sock *sk, int cap)
191{
192 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
193}
194EXPORT_SYMBOL(sk_net_capable);
195
196
Andrew Mortonc255a452012-07-31 16:43:02 -0700197#ifdef CONFIG_MEMCG_KMEM
Glauber Costa1d62e432012-04-09 19:36:33 -0300198int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000199{
200 struct proto *proto;
201 int ret = 0;
202
Glauber Costa36b77a52011-12-16 00:51:59 +0000203 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000204 list_for_each_entry(proto, &proto_list, node) {
205 if (proto->init_cgroup) {
Glauber Costa1d62e432012-04-09 19:36:33 -0300206 ret = proto->init_cgroup(memcg, ss);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000207 if (ret)
208 goto out;
209 }
210 }
211
Glauber Costa36b77a52011-12-16 00:51:59 +0000212 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000213 return ret;
214out:
215 list_for_each_entry_continue_reverse(proto, &proto_list, node)
216 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300217 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000218 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000219 return ret;
220}
221
Glauber Costa1d62e432012-04-09 19:36:33 -0300222void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000223{
224 struct proto *proto;
225
Glauber Costa36b77a52011-12-16 00:51:59 +0000226 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000227 list_for_each_entry_reverse(proto, &proto_list, node)
228 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300229 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000230 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000231}
232#endif
233
Ingo Molnarda21f242006-07-03 00:25:12 -0700234/*
235 * Each address family might have different locking rules, so we have
236 * one slock key per address family:
237 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700238static struct lock_class_key af_family_keys[AF_MAX];
239static struct lock_class_key af_family_slock_keys[AF_MAX];
240
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000241#if defined(CONFIG_MEMCG_KMEM)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100242struct static_key memcg_socket_limit_enabled;
Glauber Costae1aab162011-12-11 21:47:03 +0000243EXPORT_SYMBOL(memcg_socket_limit_enabled);
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000244#endif
Glauber Costae1aab162011-12-11 21:47:03 +0000245
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700246/*
247 * Make lock validator output more readable. (we pre-construct these
248 * strings build-time, so that runtime initialization of socket
249 * locks is fast):
250 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700251static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700252 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
253 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
254 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
255 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
256 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
257 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
258 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800259 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700260 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800261 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700262 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700263 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800264 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000265 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700266};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700267static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700268 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
269 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
270 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
271 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
272 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
273 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
274 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800275 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700276 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800277 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700278 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700279 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800280 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000281 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700282};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700283static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700284 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
285 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
286 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
287 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
288 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
289 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
290 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800291 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700292 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700293 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700294 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700295 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800296 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000297 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700298};
Ingo Molnarda21f242006-07-03 00:25:12 -0700299
300/*
301 * sk_callback_lock locking rules are per-address-family,
302 * so split the lock classes by using a per-AF key:
303 */
304static struct lock_class_key af_callback_keys[AF_MAX];
305
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306/* Take into consideration the size of the struct sk_buff overhead in the
307 * determination of these values, since that is non-constant across
308 * platforms. This makes socket queueing behavior and performance
309 * not depend upon such differences.
310 */
311#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000312#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
314#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
315
316/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700317__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200318EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700319__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200320EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700321__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
322__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300324/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700325int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000326EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
Willem de Bruijnb245be12015-01-30 13:29:32 -0500328int sysctl_tstamp_allow_data __read_mostly = 1;
329
Mel Gormanc93bdd02012-07-31 16:44:19 -0700330struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
331EXPORT_SYMBOL_GPL(memalloc_socks);
332
Mel Gorman7cb02402012-07-31 16:44:16 -0700333/**
334 * sk_set_memalloc - sets %SOCK_MEMALLOC
335 * @sk: socket to set it on
336 *
337 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
338 * It's the responsibility of the admin to adjust min_free_kbytes
339 * to meet the requirements
340 */
341void sk_set_memalloc(struct sock *sk)
342{
343 sock_set_flag(sk, SOCK_MEMALLOC);
344 sk->sk_allocation |= __GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700345 static_key_slow_inc(&memalloc_socks);
Mel Gorman7cb02402012-07-31 16:44:16 -0700346}
347EXPORT_SYMBOL_GPL(sk_set_memalloc);
348
349void sk_clear_memalloc(struct sock *sk)
350{
351 sock_reset_flag(sk, SOCK_MEMALLOC);
352 sk->sk_allocation &= ~__GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700353 static_key_slow_dec(&memalloc_socks);
Mel Gormanc76562b2012-07-31 16:44:41 -0700354
355 /*
356 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
357 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
358 * it has rmem allocations there is a risk that the user of the
359 * socket cannot make forward progress due to exceeding the rmem
360 * limits. By rights, sk_clear_memalloc() should only be called
361 * on sockets being torn down but warn and reset the accounting if
362 * that assumption breaks.
363 */
364 if (WARN_ON(sk->sk_forward_alloc))
365 sk_mem_reclaim(sk);
Mel Gorman7cb02402012-07-31 16:44:16 -0700366}
367EXPORT_SYMBOL_GPL(sk_clear_memalloc);
368
Mel Gormanb4b9e352012-07-31 16:44:26 -0700369int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
370{
371 int ret;
372 unsigned long pflags = current->flags;
373
374 /* these should have been dropped before queueing */
375 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
376
377 current->flags |= PF_MEMALLOC;
378 ret = sk->sk_backlog_rcv(sk, skb);
379 tsk_restore_flags(current, pflags, PF_MEMALLOC);
380
381 return ret;
382}
383EXPORT_SYMBOL(__sk_backlog_rcv);
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
386{
387 struct timeval tv;
388
389 if (optlen < sizeof(tv))
390 return -EINVAL;
391 if (copy_from_user(&tv, optval, sizeof(tv)))
392 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700393 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
394 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Vasily Averinba780732007-05-24 16:58:54 -0700396 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700397 static int warned __read_mostly;
398
Vasily Averinba780732007-05-24 16:58:54 -0700399 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700400 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700401 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000402 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
403 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700404 }
Vasily Averinba780732007-05-24 16:58:54 -0700405 return 0;
406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 *timeo_p = MAX_SCHEDULE_TIMEOUT;
408 if (tv.tv_sec == 0 && tv.tv_usec == 0)
409 return 0;
410 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
411 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
412 return 0;
413}
414
415static void sock_warn_obsolete_bsdism(const char *name)
416{
417 static int warned;
418 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900419 if (strcmp(warncomm, current->comm) && warned < 5) {
420 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000421 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
422 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 warned++;
424 }
425}
426
Eric Dumazet08e29af2011-11-28 12:04:18 +0000427#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
428
429static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900430{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000431 if (sk->sk_flags & flags) {
432 sk->sk_flags &= ~flags;
433 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000434 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 }
436}
437
438
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800439int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
440{
Eric Dumazet766e90372009-10-14 20:40:11 -0700441 int err;
Neil Horman3b885782009-10-12 13:26:31 -0700442 unsigned long flags;
443 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800444
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000445 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700446 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000447 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700448 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800449 }
450
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700451 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800452 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700453 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800454
Mel Gormanc76562b2012-07-31 16:44:41 -0700455 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700456 atomic_inc(&sk->sk_drops);
457 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800458 }
459
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800460 skb->dev = NULL;
461 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800462
Eric Dumazet7fee2262010-05-11 23:19:48 +0000463 /* we escape from rcu protected region, make sure we dont leak
464 * a norefcounted dst
465 */
466 skb_dst_force(skb);
467
Neil Horman3b885782009-10-12 13:26:31 -0700468 spin_lock_irqsave(&list->lock, flags);
469 skb->dropcount = atomic_read(&sk->sk_drops);
470 __skb_queue_tail(list, skb);
471 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800472
473 if (!sock_flag(sk, SOCK_DEAD))
David S. Miller676d2362014-04-11 16:15:36 -0400474 sk->sk_data_ready(sk);
Eric Dumazet766e90372009-10-14 20:40:11 -0700475 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800476}
477EXPORT_SYMBOL(sock_queue_rcv_skb);
478
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200479int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800480{
481 int rc = NET_RX_SUCCESS;
482
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700483 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800484 goto discard_and_relse;
485
486 skb->dev = NULL;
487
Sorin Dumitru274f4822014-07-22 21:16:51 +0300488 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700489 atomic_inc(&sk->sk_drops);
490 goto discard_and_relse;
491 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200492 if (nested)
493 bh_lock_sock_nested(sk);
494 else
495 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700496 if (!sock_owned_by_user(sk)) {
497 /*
498 * trylock + unlock semantics:
499 */
500 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
501
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700502 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700503
504 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Eric Dumazetf545a382012-04-22 23:34:26 +0000505 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000506 bh_unlock_sock(sk);
507 atomic_inc(&sk->sk_drops);
508 goto discard_and_relse;
509 }
510
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800511 bh_unlock_sock(sk);
512out:
513 sock_put(sk);
514 return rc;
515discard_and_relse:
516 kfree_skb(skb);
517 goto out;
518}
519EXPORT_SYMBOL(sk_receive_skb);
520
521struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
522{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000523 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800524
525 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000526 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000527 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800528 dst_release(dst);
529 return NULL;
530 }
531
532 return dst;
533}
534EXPORT_SYMBOL(__sk_dst_check);
535
536struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
537{
538 struct dst_entry *dst = sk_dst_get(sk);
539
540 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
541 sk_dst_reset(sk);
542 dst_release(dst);
543 return NULL;
544 }
545
546 return dst;
547}
548EXPORT_SYMBOL(sk_dst_check);
549
Brian Haleyc91f6df2012-11-26 05:21:08 +0000550static int sock_setbindtodevice(struct sock *sk, char __user *optval,
551 int optlen)
David S. Miller48788092007-09-14 16:41:03 -0700552{
553 int ret = -ENOPROTOOPT;
554#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900555 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700556 char devname[IFNAMSIZ];
557 int index;
558
559 /* Sorry... */
560 ret = -EPERM;
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000561 if (!ns_capable(net->user_ns, CAP_NET_RAW))
David S. Miller48788092007-09-14 16:41:03 -0700562 goto out;
563
564 ret = -EINVAL;
565 if (optlen < 0)
566 goto out;
567
568 /* Bind this socket to a particular device like "eth0",
569 * as specified in the passed interface name. If the
570 * name is "" or the option length is zero the socket
571 * is not bound.
572 */
573 if (optlen > IFNAMSIZ - 1)
574 optlen = IFNAMSIZ - 1;
575 memset(devname, 0, sizeof(devname));
576
577 ret = -EFAULT;
578 if (copy_from_user(devname, optval, optlen))
579 goto out;
580
David S. Miller000ba2e2009-11-05 22:37:11 -0800581 index = 0;
582 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800583 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700584
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800585 rcu_read_lock();
586 dev = dev_get_by_name_rcu(net, devname);
587 if (dev)
588 index = dev->ifindex;
589 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700590 ret = -ENODEV;
591 if (!dev)
592 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700593 }
594
595 lock_sock(sk);
596 sk->sk_bound_dev_if = index;
597 sk_dst_reset(sk);
598 release_sock(sk);
599
600 ret = 0;
601
602out:
603#endif
604
605 return ret;
606}
607
Brian Haleyc91f6df2012-11-26 05:21:08 +0000608static int sock_getbindtodevice(struct sock *sk, char __user *optval,
609 int __user *optlen, int len)
610{
611 int ret = -ENOPROTOOPT;
612#ifdef CONFIG_NETDEVICES
613 struct net *net = sock_net(sk);
Brian Haleyc91f6df2012-11-26 05:21:08 +0000614 char devname[IFNAMSIZ];
Brian Haleyc91f6df2012-11-26 05:21:08 +0000615
616 if (sk->sk_bound_dev_if == 0) {
617 len = 0;
618 goto zero;
619 }
620
621 ret = -EINVAL;
622 if (len < IFNAMSIZ)
623 goto out;
624
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200625 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
626 if (ret)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000627 goto out;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000628
629 len = strlen(devname) + 1;
630
631 ret = -EFAULT;
632 if (copy_to_user(optval, devname, len))
633 goto out;
634
635zero:
636 ret = -EFAULT;
637 if (put_user(len, optlen))
638 goto out;
639
640 ret = 0;
641
642out:
643#endif
644
645 return ret;
646}
647
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800648static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
649{
650 if (valbool)
651 sock_set_flag(sk, bit);
652 else
653 sock_reset_flag(sk, bit);
654}
655
hannes@stressinduktion.orgf60e5992015-04-01 17:07:44 +0200656bool sk_mc_loop(struct sock *sk)
657{
658 if (dev_recursion_level())
659 return false;
660 if (!sk)
661 return true;
662 switch (sk->sk_family) {
663 case AF_INET:
664 return inet_sk(sk)->mc_loop;
665#if IS_ENABLED(CONFIG_IPV6)
666 case AF_INET6:
667 return inet6_sk(sk)->mc_loop;
668#endif
669 }
670 WARN_ON(1);
671 return true;
672}
673EXPORT_SYMBOL(sk_mc_loop);
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675/*
676 * This is meant for all protocols to use and covers goings on
677 * at the socket level. Everything here is generic.
678 */
679
680int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700681 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
Eric Dumazet2a915252009-05-27 11:30:05 +0000683 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 int val;
685 int valbool;
686 struct linger ling;
687 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900688
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 /*
690 * Options without arguments
691 */
692
David S. Miller48788092007-09-14 16:41:03 -0700693 if (optname == SO_BINDTODEVICE)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000694 return sock_setbindtodevice(sk, optval, optlen);
David S. Miller48788092007-09-14 16:41:03 -0700695
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700696 if (optlen < sizeof(int))
697 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900698
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 if (get_user(val, (int __user *)optval))
700 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900701
Eric Dumazet2a915252009-05-27 11:30:05 +0000702 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
704 lock_sock(sk);
705
Eric Dumazet2a915252009-05-27 11:30:05 +0000706 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700707 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000708 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700709 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000710 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800711 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700712 break;
713 case SO_REUSEADDR:
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000714 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700715 break;
Tom Herbert055dc212013-01-22 09:49:50 +0000716 case SO_REUSEPORT:
717 sk->sk_reuseport = valbool;
718 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700719 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000720 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000721 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700722 case SO_ERROR:
723 ret = -ENOPROTOOPT;
724 break;
725 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800726 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700727 break;
728 case SO_BROADCAST:
729 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
730 break;
731 case SO_SNDBUF:
732 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000733 * about it this is right. Otherwise apps have to
734 * play 'guess the biggest size' games. RCVBUF/SNDBUF
735 * are treated in BSD as hints
736 */
737 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700738set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700739 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazet82981932012-04-26 20:07:59 +0000740 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
741 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700742 sk->sk_write_space(sk);
743 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700745 case SO_SNDBUFFORCE:
746 if (!capable(CAP_NET_ADMIN)) {
747 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 break;
749 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700750 goto set_sndbuf;
751
752 case SO_RCVBUF:
753 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000754 * about it this is right. Otherwise apps have to
755 * play 'guess the biggest size' games. RCVBUF/SNDBUF
756 * are treated in BSD as hints
757 */
758 val = min_t(u32, val, sysctl_rmem_max);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700759set_rcvbuf:
760 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
761 /*
762 * We double it on the way in to account for
763 * "struct sk_buff" etc. overhead. Applications
764 * assume that the SO_RCVBUF setting they make will
765 * allow that much actual data to be received on that
766 * socket.
767 *
768 * Applications are unaware that "struct sk_buff" and
769 * other overheads allocate from the receive buffer
770 * during socket buffer allocation.
771 *
772 * And after considering the possible alternatives,
773 * returning the value we actually used in getsockopt
774 * is the most desirable behavior.
775 */
Eric Dumazet82981932012-04-26 20:07:59 +0000776 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700777 break;
778
779 case SO_RCVBUFFORCE:
780 if (!capable(CAP_NET_ADMIN)) {
781 ret = -EPERM;
782 break;
783 }
784 goto set_rcvbuf;
785
786 case SO_KEEPALIVE:
787#ifdef CONFIG_INET
Eric Dumazet3e109862012-09-24 07:00:11 +0000788 if (sk->sk_protocol == IPPROTO_TCP &&
789 sk->sk_type == SOCK_STREAM)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700790 tcp_set_keepalive(sk, valbool);
791#endif
792 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
793 break;
794
795 case SO_OOBINLINE:
796 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
797 break;
798
799 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -0700800 sk->sk_no_check_tx = valbool;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700801 break;
802
803 case SO_PRIORITY:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000804 if ((val >= 0 && val <= 6) ||
805 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700806 sk->sk_priority = val;
807 else
808 ret = -EPERM;
809 break;
810
811 case SO_LINGER:
812 if (optlen < sizeof(ling)) {
813 ret = -EINVAL; /* 1003.1g */
814 break;
815 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000816 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700817 ret = -EFAULT;
818 break;
819 }
820 if (!ling.l_onoff)
821 sock_reset_flag(sk, SOCK_LINGER);
822 else {
823#if (BITS_PER_LONG == 32)
824 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
825 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
826 else
827#endif
828 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
829 sock_set_flag(sk, SOCK_LINGER);
830 }
831 break;
832
833 case SO_BSDCOMPAT:
834 sock_warn_obsolete_bsdism("setsockopt");
835 break;
836
837 case SO_PASSCRED:
838 if (valbool)
839 set_bit(SOCK_PASSCRED, &sock->flags);
840 else
841 clear_bit(SOCK_PASSCRED, &sock->flags);
842 break;
843
844 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700845 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700846 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700847 if (optname == SO_TIMESTAMP)
848 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
849 else
850 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700851 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000852 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700853 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700854 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700855 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
856 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700857 break;
858
Patrick Ohly20d49472009-02-12 05:03:38 +0000859 case SO_TIMESTAMPING:
860 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000861 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000862 break;
863 }
Willem de Bruijnb245be12015-01-30 13:29:32 -0500864
Willem de Bruijn09c2d252014-08-04 22:11:47 -0400865 if (val & SOF_TIMESTAMPING_OPT_ID &&
Willem de Bruijn4ed2d762014-08-04 22:11:49 -0400866 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
867 if (sk->sk_protocol == IPPROTO_TCP) {
868 if (sk->sk_state != TCP_ESTABLISHED) {
869 ret = -EINVAL;
870 break;
871 }
872 sk->sk_tskey = tcp_sk(sk)->snd_una;
873 } else {
874 sk->sk_tskey = 0;
875 }
876 }
Willem de Bruijnb9f40e22014-08-04 22:11:46 -0400877 sk->sk_tsflags = val;
Patrick Ohly20d49472009-02-12 05:03:38 +0000878 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
879 sock_enable_timestamp(sk,
880 SOCK_TIMESTAMPING_RX_SOFTWARE);
881 else
882 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000883 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000884 break;
885
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700886 case SO_RCVLOWAT:
887 if (val < 0)
888 val = INT_MAX;
889 sk->sk_rcvlowat = val ? : 1;
890 break;
891
892 case SO_RCVTIMEO:
893 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
894 break;
895
896 case SO_SNDTIMEO:
897 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
898 break;
899
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700900 case SO_ATTACH_FILTER:
901 ret = -EINVAL;
902 if (optlen == sizeof(struct sock_fprog)) {
903 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700905 ret = -EFAULT;
906 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700909 ret = sk_attach_filter(&fprog, sk);
910 }
911 break;
912
Alexei Starovoitov89aa0752014-12-01 15:06:35 -0800913 case SO_ATTACH_BPF:
914 ret = -EINVAL;
915 if (optlen == sizeof(u32)) {
916 u32 ufd;
917
918 ret = -EFAULT;
919 if (copy_from_user(&ufd, optval, sizeof(ufd)))
920 break;
921
922 ret = sk_attach_bpf(ufd, sk);
923 }
924 break;
925
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700926 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700927 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700928 break;
929
Vincent Bernatd59577b2013-01-16 22:55:49 +0100930 case SO_LOCK_FILTER:
931 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
932 ret = -EPERM;
933 else
934 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
935 break;
936
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700937 case SO_PASSSEC:
938 if (valbool)
939 set_bit(SOCK_PASSSEC, &sock->flags);
940 else
941 clear_bit(SOCK_PASSSEC, &sock->flags);
942 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800943 case SO_MARK:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000944 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800945 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000946 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800947 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800948 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700949
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 /* We implement the SO_SNDLOWAT etc to
951 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700952 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000953 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700954 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100955
956 case SO_WIFI_STATUS:
957 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
958 break;
959
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000960 case SO_PEEK_OFF:
961 if (sock->ops->set_peek_off)
Sasha Levin12663bf2013-12-07 17:26:27 -0500962 ret = sock->ops->set_peek_off(sk, val);
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000963 else
964 ret = -EOPNOTSUPP;
965 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000966
967 case SO_NOFCS:
968 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
969 break;
970
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +0000971 case SO_SELECT_ERR_QUEUE:
972 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
973 break;
974
Cong Wange0d10952013-08-01 11:10:25 +0800975#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +0300976 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +0300977 /* allow unprivileged users to decrease the value */
978 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
979 ret = -EPERM;
980 else {
981 if (val < 0)
982 ret = -EINVAL;
983 else
984 sk->sk_ll_usec = val;
985 }
986 break;
987#endif
Eric Dumazet62748f32013-09-24 08:20:52 -0700988
989 case SO_MAX_PACING_RATE:
990 sk->sk_max_pacing_rate = val;
991 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
992 sk->sk_max_pacing_rate);
993 break;
994
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700995 default:
996 ret = -ENOPROTOOPT;
997 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900998 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 release_sock(sk);
1000 return ret;
1001}
Eric Dumazet2a915252009-05-27 11:30:05 +00001002EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
1004
stephen hemminger8f098982014-01-03 09:17:14 -08001005static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1006 struct ucred *ucred)
Eric W. Biederman3f551f92010-06-13 03:28:59 +00001007{
1008 ucred->pid = pid_vnr(pid);
1009 ucred->uid = ucred->gid = -1;
1010 if (cred) {
1011 struct user_namespace *current_ns = current_user_ns();
1012
Eric W. Biedermanb2e4f542012-05-23 16:39:45 -06001013 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1014 ucred->gid = from_kgid_munged(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +00001015 }
1016}
1017
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018int sock_getsockopt(struct socket *sock, int level, int optname,
1019 char __user *optval, int __user *optlen)
1020{
1021 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001022
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001023 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001024 int val;
1025 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 struct timeval tm;
1027 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001028
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -08001029 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001031
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001032 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001033 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001034 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001036
Eugene Teo50fee1d2009-02-23 15:38:41 -08001037 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -08001038
Eric Dumazet2a915252009-05-27 11:30:05 +00001039 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001040 case SO_DEBUG:
1041 v.val = sock_flag(sk, SOCK_DBG);
1042 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001043
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001044 case SO_DONTROUTE:
1045 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1046 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001047
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001048 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001049 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001050 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001052 case SO_SNDBUF:
1053 v.val = sk->sk_sndbuf;
1054 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001055
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001056 case SO_RCVBUF:
1057 v.val = sk->sk_rcvbuf;
1058 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001060 case SO_REUSEADDR:
1061 v.val = sk->sk_reuse;
1062 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063
Tom Herbert055dc212013-01-22 09:49:50 +00001064 case SO_REUSEPORT:
1065 v.val = sk->sk_reuseport;
1066 break;
1067
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001068 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001069 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001070 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001072 case SO_TYPE:
1073 v.val = sk->sk_type;
1074 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Jan Engelhardt49c794e2009-08-04 07:28:28 +00001076 case SO_PROTOCOL:
1077 v.val = sk->sk_protocol;
1078 break;
1079
Jan Engelhardt0d6038e2009-08-04 07:28:29 +00001080 case SO_DOMAIN:
1081 v.val = sk->sk_family;
1082 break;
1083
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001084 case SO_ERROR:
1085 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +00001086 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001087 v.val = xchg(&sk->sk_err_soft, 0);
1088 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001090 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001091 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001092 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001093
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001094 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -07001095 v.val = sk->sk_no_check_tx;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001096 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001098 case SO_PRIORITY:
1099 v.val = sk->sk_priority;
1100 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001101
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001102 case SO_LINGER:
1103 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001104 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001105 v.ling.l_linger = sk->sk_lingertime / HZ;
1106 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001107
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001108 case SO_BSDCOMPAT:
1109 sock_warn_obsolete_bsdism("getsockopt");
1110 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001112 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -07001113 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1114 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1115 break;
1116
1117 case SO_TIMESTAMPNS:
1118 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001119 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
Patrick Ohly20d49472009-02-12 05:03:38 +00001121 case SO_TIMESTAMPING:
Willem de Bruijnb9f40e22014-08-04 22:11:46 -04001122 v.val = sk->sk_tsflags;
Patrick Ohly20d49472009-02-12 05:03:38 +00001123 break;
1124
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001125 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001126 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001127 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1128 v.tm.tv_sec = 0;
1129 v.tm.tv_usec = 0;
1130 } else {
1131 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1132 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001134 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001136 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001137 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001138 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1139 v.tm.tv_sec = 0;
1140 v.tm.tv_usec = 0;
1141 } else {
1142 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1143 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1144 }
1145 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001147 case SO_RCVLOWAT:
1148 v.val = sk->sk_rcvlowat;
1149 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001150
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001151 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +00001152 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001153 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001155 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +00001156 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001157 break;
1158
1159 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001160 {
1161 struct ucred peercred;
1162 if (len > sizeof(peercred))
1163 len = sizeof(peercred);
1164 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1165 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001166 return -EFAULT;
1167 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001168 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001169
1170 case SO_PEERNAME:
1171 {
1172 char address[128];
1173
1174 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1175 return -ENOTCONN;
1176 if (lv < len)
1177 return -EINVAL;
1178 if (copy_to_user(optval, address, len))
1179 return -EFAULT;
1180 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001182
1183 /* Dubious BSD thing... Probably nobody even uses it, but
1184 * the UNIX standard wants it for whatever reason... -DaveM
1185 */
1186 case SO_ACCEPTCONN:
1187 v.val = sk->sk_state == TCP_LISTEN;
1188 break;
1189
1190 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001191 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001192 break;
1193
1194 case SO_PEERSEC:
1195 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1196
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001197 case SO_MARK:
1198 v.val = sk->sk_mark;
1199 break;
1200
Neil Horman3b885782009-10-12 13:26:31 -07001201 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001202 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001203 break;
1204
Johannes Berg6e3e9392011-11-09 10:15:42 +01001205 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001206 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001207 break;
1208
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001209 case SO_PEEK_OFF:
1210 if (!sock->ops->set_peek_off)
1211 return -EOPNOTSUPP;
1212
1213 v.val = sk->sk_peek_off;
1214 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001215 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001216 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001217 break;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001218
Pavel Emelyanovf7b86bf2012-10-18 23:55:56 +00001219 case SO_BINDTODEVICE:
Brian Haleyc91f6df2012-11-26 05:21:08 +00001220 return sock_getbindtodevice(sk, optval, optlen, len);
1221
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001222 case SO_GET_FILTER:
1223 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1224 if (len < 0)
1225 return len;
1226
1227 goto lenout;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001228
Vincent Bernatd59577b2013-01-16 22:55:49 +01001229 case SO_LOCK_FILTER:
1230 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1231 break;
1232
Michal Sekletarea02f942014-01-17 17:09:45 +01001233 case SO_BPF_EXTENSIONS:
1234 v.val = bpf_tell_extensions();
1235 break;
1236
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00001237 case SO_SELECT_ERR_QUEUE:
1238 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1239 break;
1240
Cong Wange0d10952013-08-01 11:10:25 +08001241#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03001242 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +03001243 v.val = sk->sk_ll_usec;
1244 break;
1245#endif
1246
Eric Dumazet62748f32013-09-24 08:20:52 -07001247 case SO_MAX_PACING_RATE:
1248 v.val = sk->sk_max_pacing_rate;
1249 break;
1250
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001251 case SO_INCOMING_CPU:
1252 v.val = sk->sk_incoming_cpu;
1253 break;
1254
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001255 default:
1256 return -ENOPROTOOPT;
1257 }
1258
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 if (len > lv)
1260 len = lv;
1261 if (copy_to_user(optval, &v, len))
1262 return -EFAULT;
1263lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001264 if (put_user(len, optlen))
1265 return -EFAULT;
1266 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267}
1268
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001269/*
1270 * Initialize an sk_lock.
1271 *
1272 * (We also register the sk_lock with the lock validator.)
1273 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001274static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001275{
Peter Zijlstraed075362006-12-06 20:35:24 -08001276 sock_lock_init_class_and_name(sk,
1277 af_family_slock_key_strings[sk->sk_family],
1278 af_family_slock_keys + sk->sk_family,
1279 af_family_key_strings[sk->sk_family],
1280 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001281}
1282
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001283/*
1284 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1285 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001286 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001287 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001288static void sock_copy(struct sock *nsk, const struct sock *osk)
1289{
1290#ifdef CONFIG_SECURITY_NETWORK
1291 void *sptr = nsk->sk_security;
1292#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001293 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1294
1295 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1296 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1297
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001298#ifdef CONFIG_SECURITY_NETWORK
1299 nsk->sk_security = sptr;
1300 security_sk_clone(osk, nsk);
1301#endif
1302}
1303
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001304void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1305{
1306 unsigned long nulls1, nulls2;
1307
1308 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1309 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1310 if (nulls1 > nulls2)
1311 swap(nulls1, nulls2);
1312
1313 if (nulls1 != 0)
1314 memset((char *)sk, 0, nulls1);
1315 memset((char *)sk + nulls1 + sizeof(void *), 0,
1316 nulls2 - nulls1 - sizeof(void *));
1317 memset((char *)sk + nulls2 + sizeof(void *), 0,
1318 size - nulls2 - sizeof(void *));
1319}
1320EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1321
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001322static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1323 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001324{
1325 struct sock *sk;
1326 struct kmem_cache *slab;
1327
1328 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001329 if (slab != NULL) {
1330 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1331 if (!sk)
1332 return sk;
1333 if (priority & __GFP_ZERO) {
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001334 if (prot->clear_sk)
1335 prot->clear_sk(sk, prot->obj_size);
1336 else
1337 sk_prot_clear_nulls(sk, prot->obj_size);
Eric Dumazete912b112009-07-08 19:36:05 +00001338 }
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001339 } else
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001340 sk = kmalloc(prot->obj_size, priority);
1341
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001342 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001343 kmemcheck_annotate_bitfield(sk, flags);
1344
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001345 if (security_sk_alloc(sk, family, priority))
1346 goto out_free;
1347
1348 if (!try_module_get(prot->owner))
1349 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001350 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001351 }
1352
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001353 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001354
1355out_free_sec:
1356 security_sk_free(sk);
1357out_free:
1358 if (slab != NULL)
1359 kmem_cache_free(slab, sk);
1360 else
1361 kfree(sk);
1362 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001363}
1364
1365static void sk_prot_free(struct proto *prot, struct sock *sk)
1366{
1367 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001368 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001369
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001370 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001371 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001372
1373 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001374 if (slab != NULL)
1375 kmem_cache_free(slab, sk);
1376 else
1377 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001378 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001379}
1380
Daniel Borkmann86f85152013-12-29 17:27:11 +01001381#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Zefan Li6ffd4642013-04-08 20:03:47 +00001382void sock_update_netprioidx(struct sock *sk)
Neil Horman5bc14212011-11-22 05:10:51 +00001383{
Neil Horman5bc14212011-11-22 05:10:51 +00001384 if (in_interrupt())
1385 return;
Neil Horman2b73bc62012-02-10 05:43:38 +00001386
Zefan Li6ffd4642013-04-08 20:03:47 +00001387 sk->sk_cgrp_prioidx = task_netprioidx(current);
Neil Horman5bc14212011-11-22 05:10:51 +00001388}
1389EXPORT_SYMBOL_GPL(sock_update_netprioidx);
Herbert Xuf8451722010-05-24 00:12:34 -07001390#endif
1391
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392/**
1393 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001394 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001395 * @family: protocol family
1396 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1397 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001399struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001400 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001402 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001404 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001406 sk->sk_family = family;
1407 /*
1408 * See comment in struct sock definition to understand
1409 * why we need sk_prot_creator -acme
1410 */
1411 sk->sk_prot = sk->sk_prot_creator = prot;
1412 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001413 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001414 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001415
Zefan Li211d2f972013-04-08 20:03:35 +00001416 sock_update_classid(sk);
Zefan Li6ffd4642013-04-08 20:03:47 +00001417 sock_update_netprioidx(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 }
Frank Filza79af592005-09-27 15:23:38 -07001419
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001420 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421}
Eric Dumazet2a915252009-05-27 11:30:05 +00001422EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
Eric Dumazet2b85a342009-06-11 02:55:43 -07001424static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425{
1426 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
1428 if (sk->sk_destruct)
1429 sk->sk_destruct(sk);
1430
Paul E. McKenneya898def2010-02-22 17:04:49 -08001431 filter = rcu_dereference_check(sk->sk_filter,
1432 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001434 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001435 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 }
1437
Eric Dumazet08e29af2011-11-28 12:04:18 +00001438 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
1440 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001441 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1442 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001444 if (sk->sk_peer_cred)
1445 put_cred(sk->sk_peer_cred);
1446 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001447 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001448 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001450
1451void sk_free(struct sock *sk)
1452{
1453 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001454 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001455 * some packets are still in some tx queue.
1456 * If not null, sock_wfree() will call __sk_free(sk) later
1457 */
1458 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1459 __sk_free(sk);
1460}
Eric Dumazet2a915252009-05-27 11:30:05 +00001461EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
Denis V. Lunevedf02082008-02-29 11:18:32 -08001463/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001464 * Last sock_put should drop reference to sk->sk_net. It has already
1465 * been dropped in sk_change_net. Taking reference to stopping namespace
Denis V. Lunevedf02082008-02-29 11:18:32 -08001466 * is not an option.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001467 * Take reference to a socket to remove it from hash _alive_ and after that
Denis V. Lunevedf02082008-02-29 11:18:32 -08001468 * destroy it in the context of init_net.
1469 */
1470void sk_release_kernel(struct sock *sk)
1471{
1472 if (sk == NULL || sk->sk_socket == NULL)
1473 return;
1474
1475 sock_hold(sk);
1476 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001477 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001478 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001479 sock_put(sk);
1480}
David S. Miller45af1752008-02-29 11:33:19 -08001481EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001482
Stephen Rothwell475f1b52012-01-09 16:33:16 +11001483static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1484{
1485 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1486 sock_update_memcg(newsk);
1487}
1488
Eric Dumazete56c57d2011-11-08 17:07:07 -05001489/**
1490 * sk_clone_lock - clone a socket, and lock its clone
1491 * @sk: the socket to clone
1492 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1493 *
1494 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1495 */
1496struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001497{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001498 struct sock *newsk;
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001499 bool is_charged = true;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001500
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001501 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001502 if (newsk != NULL) {
1503 struct sk_filter *filter;
1504
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001505 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001506
1507 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001508 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001509 sk_node_init(&newsk->sk_node);
1510 sock_lock_init(newsk);
1511 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001512 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001513 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001514
1515 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001516 /*
1517 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1518 */
1519 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001520 atomic_set(&newsk->sk_omem_alloc, 0);
1521 skb_queue_head_init(&newsk->sk_receive_queue);
1522 skb_queue_head_init(&newsk->sk_write_queue);
1523
Eric Dumazetb6c67122010-04-08 23:03:29 +00001524 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001525 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001526 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1527 af_callback_keys + newsk->sk_family,
1528 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001529
1530 newsk->sk_dst_cache = NULL;
1531 newsk->sk_wmem_queued = 0;
1532 newsk->sk_forward_alloc = 0;
1533 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001534 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1535
1536 sock_reset_flag(newsk, SOCK_DONE);
1537 skb_queue_head_init(&newsk->sk_error_queue);
1538
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001539 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001540 if (filter != NULL)
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001541 /* though it's an empty new sock, the charging may fail
1542 * if sysctl_optmem_max was changed between creation of
1543 * original socket and cloning
1544 */
1545 is_charged = sk_filter_charge(newsk, filter);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001546
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001547 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001548 /* It is still raw copy of parent, so invalidate
1549 * destructor and make plain sk_free() */
1550 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001551 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001552 sk_free(newsk);
1553 newsk = NULL;
1554 goto out;
1555 }
1556
1557 newsk->sk_err = 0;
1558 newsk->sk_priority = 0;
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001559 newsk->sk_incoming_cpu = raw_smp_processor_id();
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001560 /*
1561 * Before updating sk_refcnt, we must commit prior changes to memory
1562 * (Documentation/RCU/rculist_nulls.txt for details)
1563 */
1564 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001565 atomic_set(&newsk->sk_refcnt, 2);
1566
1567 /*
1568 * Increment the counter in the same struct proto as the master
1569 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1570 * is the same as sk->sk_prot->socks, as this field was copied
1571 * with memcpy).
1572 *
1573 * This _changes_ the previous behaviour, where
1574 * tcp_create_openreq_child always was incrementing the
1575 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1576 * to be taken into account in all callers. -acme
1577 */
1578 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001579 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001580 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001581
Glauber Costaf3f511e2012-01-05 20:16:39 +00001582 sk_update_clone(sk, newsk);
1583
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001584 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001585 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001586
Eric Dumazet08e29af2011-11-28 12:04:18 +00001587 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001588 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001589 }
1590out:
1591 return newsk;
1592}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001593EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001594
Andi Kleen99580892007-04-20 17:12:43 -07001595void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1596{
1597 __sk_dst_set(sk, dst);
1598 sk->sk_route_caps = dst->dev->features;
1599 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001600 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001601 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001602 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001603 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001604 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001605 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001606 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001607 sk->sk_gso_max_size = dst->dev->gso_max_size;
Ben Hutchings14853482012-07-30 16:11:42 +00001608 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001609 }
Andi Kleen99580892007-04-20 17:12:43 -07001610 }
1611}
1612EXPORT_SYMBOL_GPL(sk_setup_caps);
1613
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614/*
1615 * Simple resource managers for sockets.
1616 */
1617
1618
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001619/*
1620 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 */
1622void sock_wfree(struct sk_buff *skb)
1623{
1624 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001625 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
Eric Dumazetd99927f2009-09-24 10:49:24 +00001627 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1628 /*
1629 * Keep a reference on sk_wmem_alloc, this will be released
1630 * after sk_write_space() call
1631 */
1632 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001634 len = 1;
1635 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001636 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001637 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1638 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001639 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001640 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001641 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642}
Eric Dumazet2a915252009-05-27 11:30:05 +00001643EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
Eric Dumazetf2f872f2013-07-30 17:55:08 -07001645void skb_orphan_partial(struct sk_buff *skb)
1646{
1647 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1648 * so we do not completely orphan skb, but transfert all
1649 * accounted bytes but one, to avoid unexpected reorders.
1650 */
1651 if (skb->destructor == sock_wfree
1652#ifdef CONFIG_INET
1653 || skb->destructor == tcp_wfree
1654#endif
1655 ) {
1656 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1657 skb->truesize = 1;
1658 } else {
1659 skb_orphan(skb);
1660 }
1661}
1662EXPORT_SYMBOL(skb_orphan_partial);
1663
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001664/*
1665 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 */
1667void sock_rfree(struct sk_buff *skb)
1668{
1669 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001670 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671
Eric Dumazetd361fd52010-07-10 22:45:17 +00001672 atomic_sub(len, &sk->sk_rmem_alloc);
1673 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674}
Eric Dumazet2a915252009-05-27 11:30:05 +00001675EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
Oliver Hartkopp7768eed2015-03-10 19:03:46 +01001677/*
1678 * Buffer destructor for skbs that are not used directly in read or write
1679 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1680 */
Alexander Duyck62bccb82014-09-04 13:31:35 -04001681void sock_efree(struct sk_buff *skb)
1682{
1683 sock_put(skb->sk);
1684}
1685EXPORT_SYMBOL(sock_efree);
1686
Alexander Duyck82eabd92014-09-04 13:32:11 -04001687#ifdef CONFIG_INET
David S. Miller41063e92012-06-19 21:22:05 -07001688void sock_edemux(struct sk_buff *skb)
1689{
Eric Dumazete8123472012-09-02 23:57:18 +00001690 struct sock *sk = skb->sk;
1691
1692 if (sk->sk_state == TCP_TIME_WAIT)
1693 inet_twsk_put(inet_twsk(sk));
1694 else
1695 sock_put(sk);
David S. Miller41063e92012-06-19 21:22:05 -07001696}
1697EXPORT_SYMBOL(sock_edemux);
Alexander Duyck82eabd92014-09-04 13:32:11 -04001698#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
Eric W. Biederman976d02012012-05-23 17:16:53 -06001700kuid_t sock_i_uid(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701{
Eric W. Biederman976d02012012-05-23 17:16:53 -06001702 kuid_t uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Eric Dumazetf064af12010-09-22 12:43:39 +00001704 read_lock_bh(&sk->sk_callback_lock);
Eric W. Biederman976d02012012-05-23 17:16:53 -06001705 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
Eric Dumazetf064af12010-09-22 12:43:39 +00001706 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 return uid;
1708}
Eric Dumazet2a915252009-05-27 11:30:05 +00001709EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
1711unsigned long sock_i_ino(struct sock *sk)
1712{
1713 unsigned long ino;
1714
Eric Dumazetf064af12010-09-22 12:43:39 +00001715 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001717 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 return ino;
1719}
Eric Dumazet2a915252009-05-27 11:30:05 +00001720EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
1722/*
1723 * Allocate a skb from the socket's send buffer.
1724 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001725struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001726 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727{
1728 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001729 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 if (skb) {
1731 skb_set_owner_w(skb, sk);
1732 return skb;
1733 }
1734 }
1735 return NULL;
1736}
Eric Dumazet2a915252009-05-27 11:30:05 +00001737EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
1739/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001741 */
Al Virodd0fc662005-10-07 07:46:04 +01001742void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743{
Eric Dumazet95c96172012-04-15 05:58:06 +00001744 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1746 void *mem;
1747 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001748 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 */
1750 atomic_add(size, &sk->sk_omem_alloc);
1751 mem = kmalloc(size, priority);
1752 if (mem)
1753 return mem;
1754 atomic_sub(size, &sk->sk_omem_alloc);
1755 }
1756 return NULL;
1757}
Eric Dumazet2a915252009-05-27 11:30:05 +00001758EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759
Daniel Borkmann79e88652014-11-19 17:13:11 +01001760/* Free an option memory block. Note, we actually want the inline
1761 * here as this allows gcc to detect the nullify and fold away the
1762 * condition entirely.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 */
Daniel Borkmann79e88652014-11-19 17:13:11 +01001764static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
1765 const bool nullify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766{
David S. Millere53da5f2014-10-14 17:02:37 -04001767 if (WARN_ON_ONCE(!mem))
1768 return;
Daniel Borkmann79e88652014-11-19 17:13:11 +01001769 if (nullify)
1770 kzfree(mem);
1771 else
1772 kfree(mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 atomic_sub(size, &sk->sk_omem_alloc);
1774}
Daniel Borkmann79e88652014-11-19 17:13:11 +01001775
1776void sock_kfree_s(struct sock *sk, void *mem, int size)
1777{
1778 __sock_kfree_s(sk, mem, size, false);
1779}
Eric Dumazet2a915252009-05-27 11:30:05 +00001780EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781
Daniel Borkmann79e88652014-11-19 17:13:11 +01001782void sock_kzfree_s(struct sock *sk, void *mem, int size)
1783{
1784 __sock_kfree_s(sk, mem, size, true);
1785}
1786EXPORT_SYMBOL(sock_kzfree_s);
1787
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1789 I think, these locks should be removed for datagram sockets.
1790 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001791static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792{
1793 DEFINE_WAIT(wait);
1794
1795 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1796 for (;;) {
1797 if (!timeo)
1798 break;
1799 if (signal_pending(current))
1800 break;
1801 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001802 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1804 break;
1805 if (sk->sk_shutdown & SEND_SHUTDOWN)
1806 break;
1807 if (sk->sk_err)
1808 break;
1809 timeo = schedule_timeout(timeo);
1810 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001811 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 return timeo;
1813}
1814
1815
1816/*
1817 * Generic send/receive buffer handlers
1818 */
1819
Herbert Xu4cc7f682009-02-04 16:55:54 -08001820struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1821 unsigned long data_len, int noblock,
Eric Dumazet28d64272013-08-08 14:38:47 -07001822 int *errcode, int max_page_order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823{
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001824 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 long timeo;
1826 int err;
1827
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 timeo = sock_sndtimeo(sk, noblock);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001829 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 err = sock_error(sk);
1831 if (err != 0)
1832 goto failure;
1833
1834 err = -EPIPE;
1835 if (sk->sk_shutdown & SEND_SHUTDOWN)
1836 goto failure;
1837
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001838 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1839 break;
Eric Dumazet28d64272013-08-08 14:38:47 -07001840
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001841 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1842 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1843 err = -EAGAIN;
1844 if (!timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 goto failure;
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001846 if (signal_pending(current))
1847 goto interrupted;
1848 timeo = sock_wait_for_wmem(sk, timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 }
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001850 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1851 errcode, sk->sk_allocation);
1852 if (skb)
1853 skb_set_owner_w(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 return skb;
1855
1856interrupted:
1857 err = sock_intr_errno(timeo);
1858failure:
1859 *errcode = err;
1860 return NULL;
1861}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001862EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001864struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 int noblock, int *errcode)
1866{
Eric Dumazet28d64272013-08-08 14:38:47 -07001867 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868}
Eric Dumazet2a915252009-05-27 11:30:05 +00001869EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870
Eric Dumazet5640f762012-09-23 23:04:42 +00001871/* On 32bit arches, an skb frag is limited to 2^15 */
1872#define SKB_FRAG_PAGE_ORDER get_order(32768)
1873
Eric Dumazet400dfd32013-10-17 16:27:07 -07001874/**
1875 * skb_page_frag_refill - check that a page_frag contains enough room
1876 * @sz: minimum size of the fragment we want to get
1877 * @pfrag: pointer to page_frag
Eric Dumazet82d5e2b2014-09-08 04:00:00 -07001878 * @gfp: priority for memory allocation
Eric Dumazet400dfd32013-10-17 16:27:07 -07001879 *
1880 * Note: While this allocator tries to use high order pages, there is
1881 * no guarantee that allocations succeed. Therefore, @sz MUST be
1882 * less or equal than PAGE_SIZE.
1883 */
Eric Dumazetd9b29382014-08-27 20:49:34 -07001884bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
Eric Dumazet5640f762012-09-23 23:04:42 +00001885{
Eric Dumazet5640f762012-09-23 23:04:42 +00001886 if (pfrag->page) {
1887 if (atomic_read(&pfrag->page->_count) == 1) {
1888 pfrag->offset = 0;
1889 return true;
1890 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07001891 if (pfrag->offset + sz <= pfrag->size)
Eric Dumazet5640f762012-09-23 23:04:42 +00001892 return true;
1893 put_page(pfrag->page);
1894 }
1895
Eric Dumazetd9b29382014-08-27 20:49:34 -07001896 pfrag->offset = 0;
1897 if (SKB_FRAG_PAGE_ORDER) {
1898 pfrag->page = alloc_pages(gfp | __GFP_COMP |
1899 __GFP_NOWARN | __GFP_NORETRY,
1900 SKB_FRAG_PAGE_ORDER);
Eric Dumazet5640f762012-09-23 23:04:42 +00001901 if (likely(pfrag->page)) {
Eric Dumazetd9b29382014-08-27 20:49:34 -07001902 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
Eric Dumazet5640f762012-09-23 23:04:42 +00001903 return true;
1904 }
Eric Dumazetd9b29382014-08-27 20:49:34 -07001905 }
1906 pfrag->page = alloc_page(gfp);
1907 if (likely(pfrag->page)) {
1908 pfrag->size = PAGE_SIZE;
1909 return true;
1910 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07001911 return false;
1912}
1913EXPORT_SYMBOL(skb_page_frag_refill);
1914
1915bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1916{
1917 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1918 return true;
1919
Eric Dumazet5640f762012-09-23 23:04:42 +00001920 sk_enter_memory_pressure(sk);
1921 sk_stream_moderate_sndbuf(sk);
1922 return false;
1923}
1924EXPORT_SYMBOL(sk_page_frag_refill);
1925
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001927 __releases(&sk->sk_lock.slock)
1928 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929{
1930 DEFINE_WAIT(wait);
1931
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001932 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1934 TASK_UNINTERRUPTIBLE);
1935 spin_unlock_bh(&sk->sk_lock.slock);
1936 schedule();
1937 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001938 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 break;
1940 }
1941 finish_wait(&sk->sk_lock.wq, &wait);
1942}
1943
1944static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001945 __releases(&sk->sk_lock.slock)
1946 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947{
1948 struct sk_buff *skb = sk->sk_backlog.head;
1949
1950 do {
1951 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1952 bh_unlock_sock(sk);
1953
1954 do {
1955 struct sk_buff *next = skb->next;
1956
Eric Dumazete4cbb022012-04-30 16:07:09 +00001957 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00001958 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001960 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
1962 /*
1963 * We are in process context here with softirqs
1964 * disabled, use cond_resched_softirq() to preempt.
1965 * This is safe to do because we've taken the backlog
1966 * queue private:
1967 */
1968 cond_resched_softirq();
1969
1970 skb = next;
1971 } while (skb != NULL);
1972
1973 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001974 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001975
1976 /*
1977 * Doing the zeroing here guarantee we can not loop forever
1978 * while a wild producer attempts to flood us.
1979 */
1980 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981}
1982
1983/**
1984 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001985 * @sk: sock to wait on
1986 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 *
1988 * Now socket state including sk->sk_err is changed only under lock,
1989 * hence we may omit checks after joining wait queue.
1990 * We check receive queue before schedule() only as optimization;
1991 * it is very likely that release_sock() added new data.
1992 */
1993int sk_wait_data(struct sock *sk, long *timeo)
1994{
1995 int rc;
1996 DEFINE_WAIT(wait);
1997
Eric Dumazetaa395142010-04-20 13:03:51 +00001998 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2000 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
2001 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00002002 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 return rc;
2004}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005EXPORT_SYMBOL(sk_wait_data);
2006
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002007/**
2008 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2009 * @sk: socket
2010 * @size: memory size to allocate
2011 * @kind: allocation type
2012 *
2013 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2014 * rmem allocation. This function assumes that protocols which have
2015 * memory_pressure use sk_wmem_queued as write buffer accounting.
2016 */
2017int __sk_mem_schedule(struct sock *sk, int size, int kind)
2018{
2019 struct proto *prot = sk->sk_prot;
2020 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00002021 long allocated;
Glauber Costae1aab162011-12-11 21:47:03 +00002022 int parent_status = UNDER_LIMIT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002023
2024 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002025
Glauber Costae1aab162011-12-11 21:47:03 +00002026 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002027
2028 /* Under limit. */
Glauber Costae1aab162011-12-11 21:47:03 +00002029 if (parent_status == UNDER_LIMIT &&
2030 allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00002031 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002032 return 1;
2033 }
2034
Glauber Costae1aab162011-12-11 21:47:03 +00002035 /* Under pressure. (we or our parents) */
2036 if ((parent_status > SOFT_LIMIT) ||
2037 allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00002038 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002039
Glauber Costae1aab162011-12-11 21:47:03 +00002040 /* Over hard limit (we or our parents) */
2041 if ((parent_status == OVER_LIMIT) ||
2042 (allocated > sk_prot_mem_limits(sk, 2)))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002043 goto suppress_allocation;
2044
2045 /* guarantee minimum buffer size under pressure */
2046 if (kind == SK_MEM_RECV) {
2047 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2048 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002049
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002050 } else { /* SK_MEM_SEND */
2051 if (sk->sk_type == SOCK_STREAM) {
2052 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2053 return 1;
2054 } else if (atomic_read(&sk->sk_wmem_alloc) <
2055 prot->sysctl_wmem[0])
2056 return 1;
2057 }
2058
Glauber Costa180d8cd2011-12-11 21:47:02 +00002059 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08002060 int alloc;
2061
Glauber Costa180d8cd2011-12-11 21:47:02 +00002062 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08002063 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002064 alloc = sk_sockets_allocated_read_positive(sk);
2065 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002066 sk_mem_pages(sk->sk_wmem_queued +
2067 atomic_read(&sk->sk_rmem_alloc) +
2068 sk->sk_forward_alloc))
2069 return 1;
2070 }
2071
2072suppress_allocation:
2073
2074 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2075 sk_stream_moderate_sndbuf(sk);
2076
2077 /* Fail only if socket is _under_ its sndbuf.
2078 * In this case we cannot block, so that we have to fail.
2079 */
2080 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2081 return 1;
2082 }
2083
Satoru Moriya3847ce32011-06-17 12:00:03 +00002084 trace_sock_exceed_buf_limit(sk, prot, allocated);
2085
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002086 /* Alas. Undo changes. */
2087 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002088
Glauber Costa0e90b312012-01-20 04:57:16 +00002089 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00002090
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002091 return 0;
2092}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002093EXPORT_SYMBOL(__sk_mem_schedule);
2094
2095/**
2096 * __sk_reclaim - reclaim memory_allocated
2097 * @sk: socket
2098 */
2099void __sk_mem_reclaim(struct sock *sk)
2100{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002101 sk_memory_allocated_sub(sk,
Glauber Costa0e90b312012-01-20 04:57:16 +00002102 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002103 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2104
Glauber Costa180d8cd2011-12-11 21:47:02 +00002105 if (sk_under_memory_pressure(sk) &&
2106 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2107 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002108}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002109EXPORT_SYMBOL(__sk_mem_reclaim);
2110
2111
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112/*
2113 * Set of default routines for initialising struct proto_ops when
2114 * the protocol does not support a particular function. In certain
2115 * cases where it makes no sense for a protocol to have a "do nothing"
2116 * function, some default processing is provided.
2117 */
2118
2119int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2120{
2121 return -EOPNOTSUPP;
2122}
Eric Dumazet2a915252009-05-27 11:30:05 +00002123EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002125int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 int len, int flags)
2127{
2128 return -EOPNOTSUPP;
2129}
Eric Dumazet2a915252009-05-27 11:30:05 +00002130EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
2132int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2133{
2134 return -EOPNOTSUPP;
2135}
Eric Dumazet2a915252009-05-27 11:30:05 +00002136EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137
2138int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2139{
2140 return -EOPNOTSUPP;
2141}
Eric Dumazet2a915252009-05-27 11:30:05 +00002142EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002144int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 int *len, int peer)
2146{
2147 return -EOPNOTSUPP;
2148}
Eric Dumazet2a915252009-05-27 11:30:05 +00002149EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150
Eric Dumazet2a915252009-05-27 11:30:05 +00002151unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152{
2153 return 0;
2154}
Eric Dumazet2a915252009-05-27 11:30:05 +00002155EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
2157int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2158{
2159 return -EOPNOTSUPP;
2160}
Eric Dumazet2a915252009-05-27 11:30:05 +00002161EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162
2163int sock_no_listen(struct socket *sock, int backlog)
2164{
2165 return -EOPNOTSUPP;
2166}
Eric Dumazet2a915252009-05-27 11:30:05 +00002167EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
2169int sock_no_shutdown(struct socket *sock, int how)
2170{
2171 return -EOPNOTSUPP;
2172}
Eric Dumazet2a915252009-05-27 11:30:05 +00002173EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
2175int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002176 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177{
2178 return -EOPNOTSUPP;
2179}
Eric Dumazet2a915252009-05-27 11:30:05 +00002180EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
2182int sock_no_getsockopt(struct socket *sock, int level, int optname,
2183 char __user *optval, int __user *optlen)
2184{
2185 return -EOPNOTSUPP;
2186}
Eric Dumazet2a915252009-05-27 11:30:05 +00002187EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
2189int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2190 size_t len)
2191{
2192 return -EOPNOTSUPP;
2193}
Eric Dumazet2a915252009-05-27 11:30:05 +00002194EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195
2196int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2197 size_t len, int flags)
2198{
2199 return -EOPNOTSUPP;
2200}
Eric Dumazet2a915252009-05-27 11:30:05 +00002201EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202
2203int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2204{
2205 /* Mirror missing mmap method error code */
2206 return -ENODEV;
2207}
Eric Dumazet2a915252009-05-27 11:30:05 +00002208EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
2210ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2211{
2212 ssize_t res;
2213 struct msghdr msg = {.msg_flags = flags};
2214 struct kvec iov;
2215 char *kaddr = kmap(page);
2216 iov.iov_base = kaddr + offset;
2217 iov.iov_len = size;
2218 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2219 kunmap(page);
2220 return res;
2221}
Eric Dumazet2a915252009-05-27 11:30:05 +00002222EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223
2224/*
2225 * Default Socket Callbacks
2226 */
2227
2228static void sock_def_wakeup(struct sock *sk)
2229{
Eric Dumazet43815482010-04-29 11:01:49 +00002230 struct socket_wq *wq;
2231
2232 rcu_read_lock();
2233 wq = rcu_dereference(sk->sk_wq);
2234 if (wq_has_sleeper(wq))
2235 wake_up_interruptible_all(&wq->wait);
2236 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237}
2238
2239static void sock_def_error_report(struct sock *sk)
2240{
Eric Dumazet43815482010-04-29 11:01:49 +00002241 struct socket_wq *wq;
2242
2243 rcu_read_lock();
2244 wq = rcu_dereference(sk->sk_wq);
2245 if (wq_has_sleeper(wq))
2246 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002247 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002248 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249}
2250
David S. Miller676d2362014-04-11 16:15:36 -04002251static void sock_def_readable(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252{
Eric Dumazet43815482010-04-29 11:01:49 +00002253 struct socket_wq *wq;
2254
2255 rcu_read_lock();
2256 wq = rcu_dereference(sk->sk_wq);
2257 if (wq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002258 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002259 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002260 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002261 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262}
2263
2264static void sock_def_write_space(struct sock *sk)
2265{
Eric Dumazet43815482010-04-29 11:01:49 +00002266 struct socket_wq *wq;
2267
2268 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269
2270 /* Do not wake up a writer until he can make "significant"
2271 * progress. --DaveM
2272 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002273 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002274 wq = rcu_dereference(sk->sk_wq);
2275 if (wq_has_sleeper(wq))
2276 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002277 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
2279 /* Should agree with poll, otherwise some programs break */
2280 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002281 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 }
2283
Eric Dumazet43815482010-04-29 11:01:49 +00002284 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285}
2286
2287static void sock_def_destruct(struct sock *sk)
2288{
Jesper Juhla51482b2005-11-08 09:41:34 -08002289 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290}
2291
2292void sk_send_sigurg(struct sock *sk)
2293{
2294 if (sk->sk_socket && sk->sk_socket->file)
2295 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002296 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297}
Eric Dumazet2a915252009-05-27 11:30:05 +00002298EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
2300void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2301 unsigned long expires)
2302{
2303 if (!mod_timer(timer, expires))
2304 sock_hold(sk);
2305}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306EXPORT_SYMBOL(sk_reset_timer);
2307
2308void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2309{
Ying Xue25cc4ae2013-02-03 20:32:57 +00002310 if (del_timer(timer))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 __sock_put(sk);
2312}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313EXPORT_SYMBOL(sk_stop_timer);
2314
2315void sock_init_data(struct socket *sock, struct sock *sk)
2316{
2317 skb_queue_head_init(&sk->sk_receive_queue);
2318 skb_queue_head_init(&sk->sk_write_queue);
2319 skb_queue_head_init(&sk->sk_error_queue);
2320
2321 sk->sk_send_head = NULL;
2322
2323 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002324
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 sk->sk_allocation = GFP_KERNEL;
2326 sk->sk_rcvbuf = sysctl_rmem_default;
2327 sk->sk_sndbuf = sysctl_wmem_default;
2328 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002329 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
2331 sock_set_flag(sk, SOCK_ZAPPED);
2332
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002333 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002335 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 sock->sk = sk;
2337 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002338 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339
Eric Dumazetb6c67122010-04-08 23:03:29 +00002340 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07002342 lockdep_set_class_and_name(&sk->sk_callback_lock,
2343 af_callback_keys + sk->sk_family,
2344 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
2346 sk->sk_state_change = sock_def_wakeup;
2347 sk->sk_data_ready = sock_def_readable;
2348 sk->sk_write_space = sock_def_write_space;
2349 sk->sk_error_report = sock_def_error_report;
2350 sk->sk_destruct = sock_def_destruct;
2351
Eric Dumazet5640f762012-09-23 23:04:42 +00002352 sk->sk_frag.page = NULL;
2353 sk->sk_frag.offset = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002354 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002356 sk->sk_peer_pid = NULL;
2357 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 sk->sk_write_pending = 0;
2359 sk->sk_rcvlowat = 1;
2360 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2361 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2362
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002363 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364
Cong Wange0d10952013-08-01 11:10:25 +08002365#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir06021292013-06-10 11:39:50 +03002366 sk->sk_napi_id = 0;
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03002367 sk->sk_ll_usec = sysctl_net_busy_read;
Eliezer Tamir06021292013-06-10 11:39:50 +03002368#endif
2369
Eric Dumazet62748f32013-09-24 08:20:52 -07002370 sk->sk_max_pacing_rate = ~0U;
Eric Dumazet7eec4172013-10-08 15:16:00 -07002371 sk->sk_pacing_rate = ~0U;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002372 /*
2373 * Before updating sk_refcnt, we must commit prior changes to memory
2374 * (Documentation/RCU/rculist_nulls.txt for details)
2375 */
2376 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002378 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379}
Eric Dumazet2a915252009-05-27 11:30:05 +00002380EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002382void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383{
2384 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002385 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002386 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002388 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002389 spin_unlock(&sk->sk_lock.slock);
2390 /*
2391 * The sk_lock has mutex_lock() semantics here:
2392 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002393 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002394 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002396EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002398void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002400 /*
2401 * The sk_lock has mutex_unlock() semantics:
2402 */
2403 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2404
2405 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 if (sk->sk_backlog.tail)
2407 __release_sock(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002408
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002409 /* Warning : release_cb() might need to release sk ownership,
2410 * ie call sock_release_ownership(sk) before us.
2411 */
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002412 if (sk->sk_prot->release_cb)
2413 sk->sk_prot->release_cb(sk);
2414
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002415 sock_release_ownership(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002416 if (waitqueue_active(&sk->sk_lock.wq))
2417 wake_up(&sk->sk_lock.wq);
2418 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419}
2420EXPORT_SYMBOL(release_sock);
2421
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002422/**
2423 * lock_sock_fast - fast version of lock_sock
2424 * @sk: socket
2425 *
2426 * This version should be used for very small section, where process wont block
2427 * return false if fast path is taken
2428 * sk_lock.slock locked, owned = 0, BH disabled
2429 * return true if slow path is taken
2430 * sk_lock.slock unlocked, owned = 1, BH enabled
2431 */
2432bool lock_sock_fast(struct sock *sk)
2433{
2434 might_sleep();
2435 spin_lock_bh(&sk->sk_lock.slock);
2436
2437 if (!sk->sk_lock.owned)
2438 /*
2439 * Note : We must disable BH
2440 */
2441 return false;
2442
2443 __lock_sock(sk);
2444 sk->sk_lock.owned = 1;
2445 spin_unlock(&sk->sk_lock.slock);
2446 /*
2447 * The sk_lock has mutex_lock() semantics here:
2448 */
2449 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2450 local_bh_enable();
2451 return true;
2452}
2453EXPORT_SYMBOL(lock_sock_fast);
2454
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002456{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002457 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002459 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002460 tv = ktime_to_timeval(sk->sk_stamp);
2461 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002463 if (tv.tv_sec == 0) {
2464 sk->sk_stamp = ktime_get_real();
2465 tv = ktime_to_timeval(sk->sk_stamp);
2466 }
2467 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002468}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469EXPORT_SYMBOL(sock_get_timestamp);
2470
Eric Dumazetae40eb12007-03-18 17:33:16 -07002471int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2472{
2473 struct timespec ts;
2474 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002475 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002476 ts = ktime_to_timespec(sk->sk_stamp);
2477 if (ts.tv_sec == -1)
2478 return -ENOENT;
2479 if (ts.tv_sec == 0) {
2480 sk->sk_stamp = ktime_get_real();
2481 ts = ktime_to_timespec(sk->sk_stamp);
2482 }
2483 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2484}
2485EXPORT_SYMBOL(sock_get_timestampns);
2486
Patrick Ohly20d49472009-02-12 05:03:38 +00002487void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002488{
Patrick Ohly20d49472009-02-12 05:03:38 +00002489 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002490 unsigned long previous_flags = sk->sk_flags;
2491
Patrick Ohly20d49472009-02-12 05:03:38 +00002492 sock_set_flag(sk, flag);
2493 /*
2494 * we just set one of the two flags which require net
2495 * time stamping, but time stamping might have been on
2496 * already because of the other one
2497 */
Eric Dumazet08e29af2011-11-28 12:04:18 +00002498 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002499 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 }
2501}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
Richard Cochrancb820f82013-07-19 19:40:09 +02002503int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2504 int level, int type)
2505{
2506 struct sock_exterr_skb *serr;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002507 struct sk_buff *skb;
Richard Cochrancb820f82013-07-19 19:40:09 +02002508 int copied, err;
2509
2510 err = -EAGAIN;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002511 skb = sock_dequeue_err_skb(sk);
Richard Cochrancb820f82013-07-19 19:40:09 +02002512 if (skb == NULL)
2513 goto out;
2514
2515 copied = skb->len;
2516 if (copied > len) {
2517 msg->msg_flags |= MSG_TRUNC;
2518 copied = len;
2519 }
David S. Miller51f3d022014-11-05 16:46:40 -05002520 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Richard Cochrancb820f82013-07-19 19:40:09 +02002521 if (err)
2522 goto out_free_skb;
2523
2524 sock_recv_timestamp(msg, sk, skb);
2525
2526 serr = SKB_EXT_ERR(skb);
2527 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2528
2529 msg->msg_flags |= MSG_ERRQUEUE;
2530 err = copied;
2531
Richard Cochrancb820f82013-07-19 19:40:09 +02002532out_free_skb:
2533 kfree_skb(skb);
2534out:
2535 return err;
2536}
2537EXPORT_SYMBOL(sock_recv_errqueue);
2538
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539/*
2540 * Get a socket option on an socket.
2541 *
2542 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2543 * asynchronous errors should be reported by getsockopt. We assume
2544 * this means if you specify SO_ERROR (otherwise whats the point of it).
2545 */
2546int sock_common_getsockopt(struct socket *sock, int level, int optname,
2547 char __user *optval, int __user *optlen)
2548{
2549 struct sock *sk = sock->sk;
2550
2551 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2552}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553EXPORT_SYMBOL(sock_common_getsockopt);
2554
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002555#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002556int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2557 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002558{
2559 struct sock *sk = sock->sk;
2560
Johannes Berg1e51f952007-03-06 13:44:06 -08002561 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002562 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2563 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002564 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2565}
2566EXPORT_SYMBOL(compat_sock_common_getsockopt);
2567#endif
2568
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2570 struct msghdr *msg, size_t size, int flags)
2571{
2572 struct sock *sk = sock->sk;
2573 int addr_len = 0;
2574 int err;
2575
2576 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2577 flags & ~MSG_DONTWAIT, &addr_len);
2578 if (err >= 0)
2579 msg->msg_namelen = addr_len;
2580 return err;
2581}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582EXPORT_SYMBOL(sock_common_recvmsg);
2583
2584/*
2585 * Set socket options on an inet socket.
2586 */
2587int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002588 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589{
2590 struct sock *sk = sock->sk;
2591
2592 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2593}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594EXPORT_SYMBOL(sock_common_setsockopt);
2595
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002596#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002597int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002598 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002599{
2600 struct sock *sk = sock->sk;
2601
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002602 if (sk->sk_prot->compat_setsockopt != NULL)
2603 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2604 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002605 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2606}
2607EXPORT_SYMBOL(compat_sock_common_setsockopt);
2608#endif
2609
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610void sk_common_release(struct sock *sk)
2611{
2612 if (sk->sk_prot->destroy)
2613 sk->sk_prot->destroy(sk);
2614
2615 /*
2616 * Observation: when sock_common_release is called, processes have
2617 * no access to socket. But net still has.
2618 * Step one, detach it from networking:
2619 *
2620 * A. Remove from hash tables.
2621 */
2622
2623 sk->sk_prot->unhash(sk);
2624
2625 /*
2626 * In this point socket cannot receive new packets, but it is possible
2627 * that some packets are in flight because some CPU runs receiver and
2628 * did hash table lookup before we unhashed socket. They will achieve
2629 * receive queue and will be purged by socket destructor.
2630 *
2631 * Also we still have packets pending on receive queue and probably,
2632 * our own packets waiting in device queues. sock_destroy will drain
2633 * receive queue, but transmitted packets will delay socket destruction
2634 * until the last reference will be released.
2635 */
2636
2637 sock_orphan(sk);
2638
2639 xfrm_sk_free_policy(sk);
2640
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002641 sk_refcnt_debug_release(sk);
Eric Dumazet5640f762012-09-23 23:04:42 +00002642
2643 if (sk->sk_frag.page) {
2644 put_page(sk->sk_frag.page);
2645 sk->sk_frag.page = NULL;
2646 }
2647
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 sock_put(sk);
2649}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650EXPORT_SYMBOL(sk_common_release);
2651
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002652#ifdef CONFIG_PROC_FS
2653#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002654struct prot_inuse {
2655 int val[PROTO_INUSE_NR];
2656};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002657
2658static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002659
2660#ifdef CONFIG_NET_NS
2661void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2662{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002663 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002664}
2665EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2666
2667int sock_prot_inuse_get(struct net *net, struct proto *prot)
2668{
2669 int cpu, idx = prot->inuse_idx;
2670 int res = 0;
2671
2672 for_each_possible_cpu(cpu)
2673 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2674
2675 return res >= 0 ? res : 0;
2676}
2677EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2678
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002679static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002680{
2681 net->core.inuse = alloc_percpu(struct prot_inuse);
2682 return net->core.inuse ? 0 : -ENOMEM;
2683}
2684
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002685static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002686{
2687 free_percpu(net->core.inuse);
2688}
2689
2690static struct pernet_operations net_inuse_ops = {
2691 .init = sock_inuse_init_net,
2692 .exit = sock_inuse_exit_net,
2693};
2694
2695static __init int net_inuse_init(void)
2696{
2697 if (register_pernet_subsys(&net_inuse_ops))
2698 panic("Cannot initialize net inuse counters");
2699
2700 return 0;
2701}
2702
2703core_initcall(net_inuse_init);
2704#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002705static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2706
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002707void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002708{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002709 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002710}
2711EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2712
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002713int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002714{
2715 int cpu, idx = prot->inuse_idx;
2716 int res = 0;
2717
2718 for_each_possible_cpu(cpu)
2719 res += per_cpu(prot_inuse, cpu).val[idx];
2720
2721 return res >= 0 ? res : 0;
2722}
2723EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002724#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002725
2726static void assign_proto_idx(struct proto *prot)
2727{
2728 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2729
2730 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00002731 pr_err("PROTO_INUSE_NR exhausted\n");
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002732 return;
2733 }
2734
2735 set_bit(prot->inuse_idx, proto_inuse_idx);
2736}
2737
2738static void release_proto_idx(struct proto *prot)
2739{
2740 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2741 clear_bit(prot->inuse_idx, proto_inuse_idx);
2742}
2743#else
2744static inline void assign_proto_idx(struct proto *prot)
2745{
2746}
2747
2748static inline void release_proto_idx(struct proto *prot)
2749{
2750}
2751#endif
2752
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753int proto_register(struct proto *prot, int alloc_slab)
2754{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 if (alloc_slab) {
2756 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002757 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2758 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759
2760 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002761 pr_crit("%s: Can't create sock SLAB cache!\n",
2762 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002763 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002765
2766 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002767 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002768 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002769 goto out_free_sock_slab;
2770
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002771 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002772 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002773 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002774
2775 if (prot->rsk_prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002776 pr_crit("%s: Can't create request sock SLAB cache!\n",
2777 prot->name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002778 goto out_free_request_sock_slab_name;
2779 }
2780 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002781
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002782 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002783 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002784
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002785 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002786 goto out_free_request_sock_slab;
2787
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002788 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002789 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002790 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002791 0,
2792 SLAB_HWCACHE_ALIGN |
2793 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002794 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002795 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002796 goto out_free_timewait_sock_slab_name;
2797 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 }
2799
Glauber Costa36b77a52011-12-16 00:51:59 +00002800 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002802 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002803 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002804 return 0;
2805
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002806out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002807 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002808out_free_request_sock_slab:
2809 if (prot->rsk_prot && prot->rsk_prot->slab) {
2810 kmem_cache_destroy(prot->rsk_prot->slab);
2811 prot->rsk_prot->slab = NULL;
2812 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002813out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002814 if (prot->rsk_prot)
2815 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002816out_free_sock_slab:
2817 kmem_cache_destroy(prot->slab);
2818 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002819out:
2820 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822EXPORT_SYMBOL(proto_register);
2823
2824void proto_unregister(struct proto *prot)
2825{
Glauber Costa36b77a52011-12-16 00:51:59 +00002826 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002827 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002828 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002829 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
2831 if (prot->slab != NULL) {
2832 kmem_cache_destroy(prot->slab);
2833 prot->slab = NULL;
2834 }
2835
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002836 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002837 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002838 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002839 prot->rsk_prot->slab = NULL;
2840 }
2841
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002842 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002843 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002844 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002845 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002846 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848EXPORT_SYMBOL(proto_unregister);
2849
2850#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002852 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853{
Glauber Costa36b77a52011-12-16 00:51:59 +00002854 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002855 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856}
2857
2858static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2859{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002860 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861}
2862
2863static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002864 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865{
Glauber Costa36b77a52011-12-16 00:51:59 +00002866 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867}
2868
2869static char proto_method_implemented(const void *method)
2870{
2871 return method == NULL ? 'n' : 'y';
2872}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002873static long sock_prot_memory_allocated(struct proto *proto)
2874{
Jeffrin Josecb75a362012-04-25 19:17:29 +05302875 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002876}
2877
2878static char *sock_prot_memory_pressure(struct proto *proto)
2879{
2880 return proto->memory_pressure != NULL ?
2881 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2882}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883
2884static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2885{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002886
Eric Dumazet8d987e52010-11-09 23:24:26 +00002887 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2889 proto->name,
2890 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002891 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00002892 sock_prot_memory_allocated(proto),
2893 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 proto->max_header,
2895 proto->slab == NULL ? "no" : "yes",
2896 module_name(proto->owner),
2897 proto_method_implemented(proto->close),
2898 proto_method_implemented(proto->connect),
2899 proto_method_implemented(proto->disconnect),
2900 proto_method_implemented(proto->accept),
2901 proto_method_implemented(proto->ioctl),
2902 proto_method_implemented(proto->init),
2903 proto_method_implemented(proto->destroy),
2904 proto_method_implemented(proto->shutdown),
2905 proto_method_implemented(proto->setsockopt),
2906 proto_method_implemented(proto->getsockopt),
2907 proto_method_implemented(proto->sendmsg),
2908 proto_method_implemented(proto->recvmsg),
2909 proto_method_implemented(proto->sendpage),
2910 proto_method_implemented(proto->bind),
2911 proto_method_implemented(proto->backlog_rcv),
2912 proto_method_implemented(proto->hash),
2913 proto_method_implemented(proto->unhash),
2914 proto_method_implemented(proto->get_port),
2915 proto_method_implemented(proto->enter_memory_pressure));
2916}
2917
2918static int proto_seq_show(struct seq_file *seq, void *v)
2919{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002920 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2922 "protocol",
2923 "size",
2924 "sockets",
2925 "memory",
2926 "press",
2927 "maxhdr",
2928 "slab",
2929 "module",
2930 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2931 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002932 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 return 0;
2934}
2935
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002936static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 .start = proto_seq_start,
2938 .next = proto_seq_next,
2939 .stop = proto_seq_stop,
2940 .show = proto_seq_show,
2941};
2942
2943static int proto_seq_open(struct inode *inode, struct file *file)
2944{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002945 return seq_open_net(inode, file, &proto_seq_ops,
2946 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947}
2948
Arjan van de Ven9a321442007-02-12 00:55:35 -08002949static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 .owner = THIS_MODULE,
2951 .open = proto_seq_open,
2952 .read = seq_read,
2953 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002954 .release = seq_release_net,
2955};
2956
2957static __net_init int proto_init_net(struct net *net)
2958{
Gao fengd4beaa62013-02-18 01:34:54 +00002959 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
Eric Dumazet14e943d2008-11-19 15:14:01 -08002960 return -ENOMEM;
2961
2962 return 0;
2963}
2964
2965static __net_exit void proto_exit_net(struct net *net)
2966{
Gao fengece31ff2013-02-18 01:34:56 +00002967 remove_proc_entry("protocols", net->proc_net);
Eric Dumazet14e943d2008-11-19 15:14:01 -08002968}
2969
2970
2971static __net_initdata struct pernet_operations proto_net_ops = {
2972 .init = proto_init_net,
2973 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974};
2975
2976static int __init proto_init(void)
2977{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002978 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979}
2980
2981subsys_initcall(proto_init);
2982
2983#endif /* PROC_FS */