blob: 1c7a33db1314f3e2a7ded154b9ea498023d2d2e4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Joe Perchese005d192012-05-16 19:58:40 +000092#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
Randy Dunlap4fc268d2006-01-11 12:17:47 -080094#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/errno.h>
Richard Cochrancb820f82013-07-19 19:40:09 +020096#include <linux/errqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <linux/types.h>
98#include <linux/socket.h>
99#include <linux/in.h>
100#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <linux/module.h>
102#include <linux/proc_fs.h>
103#include <linux/seq_file.h>
104#include <linux/sched.h>
105#include <linux/timer.h>
106#include <linux/string.h>
107#include <linux/sockios.h>
108#include <linux/net.h>
109#include <linux/mm.h>
110#include <linux/slab.h>
111#include <linux/interrupt.h>
112#include <linux/poll.h>
113#include <linux/tcp.h>
114#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400115#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000116#include <linux/user_namespace.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100117#include <linux/static_key.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800118#include <linux/memcontrol.h>
David S. Miller8c1ae102012-05-03 02:25:55 -0400119#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123#include <linux/netdevice.h>
124#include <net/protocol.h>
125#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200126#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700127#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000129#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#include <net/xfrm.h>
131#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700132#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000133#include <net/netprio_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
135#include <linux/filter.h>
136
Satoru Moriya3847ce32011-06-17 12:00:03 +0000137#include <trace/events/sock.h>
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139#ifdef CONFIG_INET
140#include <net/tcp.h>
141#endif
142
Eliezer Tamir076bb0c2013-07-10 17:13:17 +0300143#include <net/busy_poll.h>
Eliezer Tamir06021292013-06-10 11:39:50 +0300144
Glauber Costa36b77a52011-12-16 00:51:59 +0000145static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000146static LIST_HEAD(proto_list);
147
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700148/**
149 * sk_ns_capable - General socket capability test
150 * @sk: Socket to use a capability on or through
151 * @user_ns: The user namespace of the capability to use
152 * @cap: The capability to use
153 *
154 * Test to see if the opener of the socket had when the socket was
155 * created and the current process has the capability @cap in the user
156 * namespace @user_ns.
157 */
158bool sk_ns_capable(const struct sock *sk,
159 struct user_namespace *user_ns, int cap)
160{
161 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
162 ns_capable(user_ns, cap);
163}
164EXPORT_SYMBOL(sk_ns_capable);
165
166/**
167 * sk_capable - Socket global capability test
168 * @sk: Socket to use a capability on or through
Masanari Iidae793c0f2014-09-04 23:44:36 +0900169 * @cap: The global capability to use
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700170 *
171 * Test to see if the opener of the socket had when the socket was
172 * created and the current process has the capability @cap in all user
173 * namespaces.
174 */
175bool sk_capable(const struct sock *sk, int cap)
176{
177 return sk_ns_capable(sk, &init_user_ns, cap);
178}
179EXPORT_SYMBOL(sk_capable);
180
181/**
182 * sk_net_capable - Network namespace socket capability test
183 * @sk: Socket to use a capability on or through
184 * @cap: The capability to use
185 *
Masanari Iidae793c0f2014-09-04 23:44:36 +0900186 * Test to see if the opener of the socket had when the socket was created
Eric W. Biedermana3b299d2014-04-23 14:26:56 -0700187 * and the current process has the capability @cap over the network namespace
188 * the socket is a member of.
189 */
190bool sk_net_capable(const struct sock *sk, int cap)
191{
192 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
193}
194EXPORT_SYMBOL(sk_net_capable);
195
196
Andrew Mortonc255a452012-07-31 16:43:02 -0700197#ifdef CONFIG_MEMCG_KMEM
Glauber Costa1d62e432012-04-09 19:36:33 -0300198int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000199{
200 struct proto *proto;
201 int ret = 0;
202
Glauber Costa36b77a52011-12-16 00:51:59 +0000203 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000204 list_for_each_entry(proto, &proto_list, node) {
205 if (proto->init_cgroup) {
Glauber Costa1d62e432012-04-09 19:36:33 -0300206 ret = proto->init_cgroup(memcg, ss);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000207 if (ret)
208 goto out;
209 }
210 }
211
Glauber Costa36b77a52011-12-16 00:51:59 +0000212 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000213 return ret;
214out:
215 list_for_each_entry_continue_reverse(proto, &proto_list, node)
216 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300217 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000218 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000219 return ret;
220}
221
Glauber Costa1d62e432012-04-09 19:36:33 -0300222void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000223{
224 struct proto *proto;
225
Glauber Costa36b77a52011-12-16 00:51:59 +0000226 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000227 list_for_each_entry_reverse(proto, &proto_list, node)
228 if (proto->destroy_cgroup)
Glauber Costa1d62e432012-04-09 19:36:33 -0300229 proto->destroy_cgroup(memcg);
Glauber Costa36b77a52011-12-16 00:51:59 +0000230 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000231}
232#endif
233
Ingo Molnarda21f242006-07-03 00:25:12 -0700234/*
235 * Each address family might have different locking rules, so we have
236 * one slock key per address family:
237 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700238static struct lock_class_key af_family_keys[AF_MAX];
239static struct lock_class_key af_family_slock_keys[AF_MAX];
240
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000241#if defined(CONFIG_MEMCG_KMEM)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100242struct static_key memcg_socket_limit_enabled;
Glauber Costae1aab162011-12-11 21:47:03 +0000243EXPORT_SYMBOL(memcg_socket_limit_enabled);
stephen hemmingercbda4ea2013-02-22 07:59:10 +0000244#endif
Glauber Costae1aab162011-12-11 21:47:03 +0000245
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700246/*
247 * Make lock validator output more readable. (we pre-construct these
248 * strings build-time, so that runtime initialization of socket
249 * locks is fast):
250 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700251static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700252 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
253 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
254 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
255 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
256 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
257 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
258 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800259 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700260 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800261 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700262 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700263 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800264 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000265 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700266};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700267static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700268 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
269 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
270 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
271 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
272 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
273 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
274 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800275 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700276 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800277 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700278 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700279 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800280 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000281 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700282};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700283static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700284 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
285 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
286 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
287 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
288 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
289 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
290 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800291 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700292 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700293 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700294 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700295 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800296 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Federico Vaga456db6a2013-05-28 05:02:44 +0000297 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700298};
Ingo Molnarda21f242006-07-03 00:25:12 -0700299
300/*
301 * sk_callback_lock locking rules are per-address-family,
302 * so split the lock classes by using a per-AF key:
303 */
304static struct lock_class_key af_callback_keys[AF_MAX];
305
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306/* Take into consideration the size of the struct sk_buff overhead in the
307 * determination of these values, since that is non-constant across
308 * platforms. This makes socket queueing behavior and performance
309 * not depend upon such differences.
310 */
311#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000312#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
314#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
315
316/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700317__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200318EXPORT_SYMBOL(sysctl_wmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700319__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
Hans Schillstrom6d8ebc82012-04-30 08:13:50 +0200320EXPORT_SYMBOL(sysctl_rmem_max);
Brian Haleyab32ea52006-09-22 14:15:41 -0700321__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
322__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300324/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700325int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000326EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
Mel Gormanc93bdd02012-07-31 16:44:19 -0700328struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
329EXPORT_SYMBOL_GPL(memalloc_socks);
330
Mel Gorman7cb02402012-07-31 16:44:16 -0700331/**
332 * sk_set_memalloc - sets %SOCK_MEMALLOC
333 * @sk: socket to set it on
334 *
335 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
336 * It's the responsibility of the admin to adjust min_free_kbytes
337 * to meet the requirements
338 */
339void sk_set_memalloc(struct sock *sk)
340{
341 sock_set_flag(sk, SOCK_MEMALLOC);
342 sk->sk_allocation |= __GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700343 static_key_slow_inc(&memalloc_socks);
Mel Gorman7cb02402012-07-31 16:44:16 -0700344}
345EXPORT_SYMBOL_GPL(sk_set_memalloc);
346
347void sk_clear_memalloc(struct sock *sk)
348{
349 sock_reset_flag(sk, SOCK_MEMALLOC);
350 sk->sk_allocation &= ~__GFP_MEMALLOC;
Mel Gormanc93bdd02012-07-31 16:44:19 -0700351 static_key_slow_dec(&memalloc_socks);
Mel Gormanc76562b2012-07-31 16:44:41 -0700352
353 /*
354 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
355 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
356 * it has rmem allocations there is a risk that the user of the
357 * socket cannot make forward progress due to exceeding the rmem
358 * limits. By rights, sk_clear_memalloc() should only be called
359 * on sockets being torn down but warn and reset the accounting if
360 * that assumption breaks.
361 */
362 if (WARN_ON(sk->sk_forward_alloc))
363 sk_mem_reclaim(sk);
Mel Gorman7cb02402012-07-31 16:44:16 -0700364}
365EXPORT_SYMBOL_GPL(sk_clear_memalloc);
366
Mel Gormanb4b9e352012-07-31 16:44:26 -0700367int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
368{
369 int ret;
370 unsigned long pflags = current->flags;
371
372 /* these should have been dropped before queueing */
373 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
374
375 current->flags |= PF_MEMALLOC;
376 ret = sk->sk_backlog_rcv(sk, skb);
377 tsk_restore_flags(current, pflags, PF_MEMALLOC);
378
379 return ret;
380}
381EXPORT_SYMBOL(__sk_backlog_rcv);
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
384{
385 struct timeval tv;
386
387 if (optlen < sizeof(tv))
388 return -EINVAL;
389 if (copy_from_user(&tv, optval, sizeof(tv)))
390 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700391 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
392 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Vasily Averinba780732007-05-24 16:58:54 -0700394 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700395 static int warned __read_mostly;
396
Vasily Averinba780732007-05-24 16:58:54 -0700397 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700398 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700399 warned++;
Joe Perchese005d192012-05-16 19:58:40 +0000400 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
401 __func__, current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700402 }
Vasily Averinba780732007-05-24 16:58:54 -0700403 return 0;
404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 *timeo_p = MAX_SCHEDULE_TIMEOUT;
406 if (tv.tv_sec == 0 && tv.tv_usec == 0)
407 return 0;
408 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
409 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
410 return 0;
411}
412
413static void sock_warn_obsolete_bsdism(const char *name)
414{
415 static int warned;
416 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900417 if (strcmp(warncomm, current->comm) && warned < 5) {
418 strcpy(warncomm, current->comm);
Joe Perchese005d192012-05-16 19:58:40 +0000419 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
420 warncomm, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 warned++;
422 }
423}
424
Eric Dumazet08e29af2011-11-28 12:04:18 +0000425#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
426
427static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900428{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000429 if (sk->sk_flags & flags) {
430 sk->sk_flags &= ~flags;
431 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000432 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 }
434}
435
436
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800437int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
438{
Eric Dumazet766e90372009-10-14 20:40:11 -0700439 int err;
Neil Horman3b885782009-10-12 13:26:31 -0700440 unsigned long flags;
441 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800442
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000443 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700444 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000445 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700446 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800447 }
448
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700449 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800450 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700451 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800452
Mel Gormanc76562b2012-07-31 16:44:41 -0700453 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700454 atomic_inc(&sk->sk_drops);
455 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800456 }
457
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800458 skb->dev = NULL;
459 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800460
Eric Dumazet7fee2262010-05-11 23:19:48 +0000461 /* we escape from rcu protected region, make sure we dont leak
462 * a norefcounted dst
463 */
464 skb_dst_force(skb);
465
Neil Horman3b885782009-10-12 13:26:31 -0700466 spin_lock_irqsave(&list->lock, flags);
467 skb->dropcount = atomic_read(&sk->sk_drops);
468 __skb_queue_tail(list, skb);
469 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800470
471 if (!sock_flag(sk, SOCK_DEAD))
David S. Miller676d2362014-04-11 16:15:36 -0400472 sk->sk_data_ready(sk);
Eric Dumazet766e90372009-10-14 20:40:11 -0700473 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800474}
475EXPORT_SYMBOL(sock_queue_rcv_skb);
476
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200477int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800478{
479 int rc = NET_RX_SUCCESS;
480
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700481 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800482 goto discard_and_relse;
483
484 skb->dev = NULL;
485
Sorin Dumitru274f4822014-07-22 21:16:51 +0300486 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
Eric Dumazetc3774112010-04-27 15:13:20 -0700487 atomic_inc(&sk->sk_drops);
488 goto discard_and_relse;
489 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200490 if (nested)
491 bh_lock_sock_nested(sk);
492 else
493 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700494 if (!sock_owned_by_user(sk)) {
495 /*
496 * trylock + unlock semantics:
497 */
498 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
499
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700500 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700501
502 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Eric Dumazetf545a382012-04-22 23:34:26 +0000503 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000504 bh_unlock_sock(sk);
505 atomic_inc(&sk->sk_drops);
506 goto discard_and_relse;
507 }
508
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800509 bh_unlock_sock(sk);
510out:
511 sock_put(sk);
512 return rc;
513discard_and_relse:
514 kfree_skb(skb);
515 goto out;
516}
517EXPORT_SYMBOL(sk_receive_skb);
518
519struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
520{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000521 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800522
523 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000524 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000525 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800526 dst_release(dst);
527 return NULL;
528 }
529
530 return dst;
531}
532EXPORT_SYMBOL(__sk_dst_check);
533
534struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
535{
536 struct dst_entry *dst = sk_dst_get(sk);
537
538 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
539 sk_dst_reset(sk);
540 dst_release(dst);
541 return NULL;
542 }
543
544 return dst;
545}
546EXPORT_SYMBOL(sk_dst_check);
547
Brian Haleyc91f6df2012-11-26 05:21:08 +0000548static int sock_setbindtodevice(struct sock *sk, char __user *optval,
549 int optlen)
David S. Miller48788092007-09-14 16:41:03 -0700550{
551 int ret = -ENOPROTOOPT;
552#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900553 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700554 char devname[IFNAMSIZ];
555 int index;
556
557 /* Sorry... */
558 ret = -EPERM;
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000559 if (!ns_capable(net->user_ns, CAP_NET_RAW))
David S. Miller48788092007-09-14 16:41:03 -0700560 goto out;
561
562 ret = -EINVAL;
563 if (optlen < 0)
564 goto out;
565
566 /* Bind this socket to a particular device like "eth0",
567 * as specified in the passed interface name. If the
568 * name is "" or the option length is zero the socket
569 * is not bound.
570 */
571 if (optlen > IFNAMSIZ - 1)
572 optlen = IFNAMSIZ - 1;
573 memset(devname, 0, sizeof(devname));
574
575 ret = -EFAULT;
576 if (copy_from_user(devname, optval, optlen))
577 goto out;
578
David S. Miller000ba2e2009-11-05 22:37:11 -0800579 index = 0;
580 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800581 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700582
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800583 rcu_read_lock();
584 dev = dev_get_by_name_rcu(net, devname);
585 if (dev)
586 index = dev->ifindex;
587 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700588 ret = -ENODEV;
589 if (!dev)
590 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700591 }
592
593 lock_sock(sk);
594 sk->sk_bound_dev_if = index;
595 sk_dst_reset(sk);
596 release_sock(sk);
597
598 ret = 0;
599
600out:
601#endif
602
603 return ret;
604}
605
Brian Haleyc91f6df2012-11-26 05:21:08 +0000606static int sock_getbindtodevice(struct sock *sk, char __user *optval,
607 int __user *optlen, int len)
608{
609 int ret = -ENOPROTOOPT;
610#ifdef CONFIG_NETDEVICES
611 struct net *net = sock_net(sk);
Brian Haleyc91f6df2012-11-26 05:21:08 +0000612 char devname[IFNAMSIZ];
Brian Haleyc91f6df2012-11-26 05:21:08 +0000613
614 if (sk->sk_bound_dev_if == 0) {
615 len = 0;
616 goto zero;
617 }
618
619 ret = -EINVAL;
620 if (len < IFNAMSIZ)
621 goto out;
622
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200623 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
624 if (ret)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000625 goto out;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000626
627 len = strlen(devname) + 1;
628
629 ret = -EFAULT;
630 if (copy_to_user(optval, devname, len))
631 goto out;
632
633zero:
634 ret = -EFAULT;
635 if (put_user(len, optlen))
636 goto out;
637
638 ret = 0;
639
640out:
641#endif
642
643 return ret;
644}
645
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800646static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
647{
648 if (valbool)
649 sock_set_flag(sk, bit);
650 else
651 sock_reset_flag(sk, bit);
652}
653
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654/*
655 * This is meant for all protocols to use and covers goings on
656 * at the socket level. Everything here is generic.
657 */
658
659int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700660 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
Eric Dumazet2a915252009-05-27 11:30:05 +0000662 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 int val;
664 int valbool;
665 struct linger ling;
666 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900667
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 /*
669 * Options without arguments
670 */
671
David S. Miller48788092007-09-14 16:41:03 -0700672 if (optname == SO_BINDTODEVICE)
Brian Haleyc91f6df2012-11-26 05:21:08 +0000673 return sock_setbindtodevice(sk, optval, optlen);
David S. Miller48788092007-09-14 16:41:03 -0700674
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700675 if (optlen < sizeof(int))
676 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (get_user(val, (int __user *)optval))
679 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900680
Eric Dumazet2a915252009-05-27 11:30:05 +0000681 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
683 lock_sock(sk);
684
Eric Dumazet2a915252009-05-27 11:30:05 +0000685 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700686 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000687 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700688 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000689 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800690 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700691 break;
692 case SO_REUSEADDR:
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000693 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700694 break;
Tom Herbert055dc212013-01-22 09:49:50 +0000695 case SO_REUSEPORT:
696 sk->sk_reuseport = valbool;
697 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700698 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000699 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000700 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700701 case SO_ERROR:
702 ret = -ENOPROTOOPT;
703 break;
704 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800705 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700706 break;
707 case SO_BROADCAST:
708 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
709 break;
710 case SO_SNDBUF:
711 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000712 * about it this is right. Otherwise apps have to
713 * play 'guess the biggest size' games. RCVBUF/SNDBUF
714 * are treated in BSD as hints
715 */
716 val = min_t(u32, val, sysctl_wmem_max);
Patrick McHardyb0573de2005-08-09 19:30:51 -0700717set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700718 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
Eric Dumazet82981932012-04-26 20:07:59 +0000719 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
720 /* Wake up sending tasks if we upped the value. */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700721 sk->sk_write_space(sk);
722 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700724 case SO_SNDBUFFORCE:
725 if (!capable(CAP_NET_ADMIN)) {
726 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 break;
728 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700729 goto set_sndbuf;
730
731 case SO_RCVBUF:
732 /* Don't error on this BSD doesn't and if you think
Eric Dumazet82981932012-04-26 20:07:59 +0000733 * about it this is right. Otherwise apps have to
734 * play 'guess the biggest size' games. RCVBUF/SNDBUF
735 * are treated in BSD as hints
736 */
737 val = min_t(u32, val, sysctl_rmem_max);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700738set_rcvbuf:
739 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
740 /*
741 * We double it on the way in to account for
742 * "struct sk_buff" etc. overhead. Applications
743 * assume that the SO_RCVBUF setting they make will
744 * allow that much actual data to be received on that
745 * socket.
746 *
747 * Applications are unaware that "struct sk_buff" and
748 * other overheads allocate from the receive buffer
749 * during socket buffer allocation.
750 *
751 * And after considering the possible alternatives,
752 * returning the value we actually used in getsockopt
753 * is the most desirable behavior.
754 */
Eric Dumazet82981932012-04-26 20:07:59 +0000755 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700756 break;
757
758 case SO_RCVBUFFORCE:
759 if (!capable(CAP_NET_ADMIN)) {
760 ret = -EPERM;
761 break;
762 }
763 goto set_rcvbuf;
764
765 case SO_KEEPALIVE:
766#ifdef CONFIG_INET
Eric Dumazet3e109862012-09-24 07:00:11 +0000767 if (sk->sk_protocol == IPPROTO_TCP &&
768 sk->sk_type == SOCK_STREAM)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700769 tcp_set_keepalive(sk, valbool);
770#endif
771 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
772 break;
773
774 case SO_OOBINLINE:
775 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
776 break;
777
778 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -0700779 sk->sk_no_check_tx = valbool;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700780 break;
781
782 case SO_PRIORITY:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000783 if ((val >= 0 && val <= 6) ||
784 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700785 sk->sk_priority = val;
786 else
787 ret = -EPERM;
788 break;
789
790 case SO_LINGER:
791 if (optlen < sizeof(ling)) {
792 ret = -EINVAL; /* 1003.1g */
793 break;
794 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000795 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700796 ret = -EFAULT;
797 break;
798 }
799 if (!ling.l_onoff)
800 sock_reset_flag(sk, SOCK_LINGER);
801 else {
802#if (BITS_PER_LONG == 32)
803 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
804 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
805 else
806#endif
807 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
808 sock_set_flag(sk, SOCK_LINGER);
809 }
810 break;
811
812 case SO_BSDCOMPAT:
813 sock_warn_obsolete_bsdism("setsockopt");
814 break;
815
816 case SO_PASSCRED:
817 if (valbool)
818 set_bit(SOCK_PASSCRED, &sock->flags);
819 else
820 clear_bit(SOCK_PASSCRED, &sock->flags);
821 break;
822
823 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700824 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700825 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700826 if (optname == SO_TIMESTAMP)
827 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
828 else
829 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700830 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000831 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700832 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700833 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700834 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
835 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700836 break;
837
Patrick Ohly20d49472009-02-12 05:03:38 +0000838 case SO_TIMESTAMPING:
839 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000840 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000841 break;
842 }
Willem de Bruijn09c2d252014-08-04 22:11:47 -0400843 if (val & SOF_TIMESTAMPING_OPT_ID &&
Willem de Bruijn4ed2d762014-08-04 22:11:49 -0400844 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
845 if (sk->sk_protocol == IPPROTO_TCP) {
846 if (sk->sk_state != TCP_ESTABLISHED) {
847 ret = -EINVAL;
848 break;
849 }
850 sk->sk_tskey = tcp_sk(sk)->snd_una;
851 } else {
852 sk->sk_tskey = 0;
853 }
854 }
Willem de Bruijnb9f40e22014-08-04 22:11:46 -0400855 sk->sk_tsflags = val;
Patrick Ohly20d49472009-02-12 05:03:38 +0000856 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
857 sock_enable_timestamp(sk,
858 SOCK_TIMESTAMPING_RX_SOFTWARE);
859 else
860 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000861 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000862 break;
863
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700864 case SO_RCVLOWAT:
865 if (val < 0)
866 val = INT_MAX;
867 sk->sk_rcvlowat = val ? : 1;
868 break;
869
870 case SO_RCVTIMEO:
871 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
872 break;
873
874 case SO_SNDTIMEO:
875 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
876 break;
877
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700878 case SO_ATTACH_FILTER:
879 ret = -EINVAL;
880 if (optlen == sizeof(struct sock_fprog)) {
881 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700883 ret = -EFAULT;
884 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700887 ret = sk_attach_filter(&fprog, sk);
888 }
889 break;
890
Alexei Starovoitov89aa0752014-12-01 15:06:35 -0800891 case SO_ATTACH_BPF:
892 ret = -EINVAL;
893 if (optlen == sizeof(u32)) {
894 u32 ufd;
895
896 ret = -EFAULT;
897 if (copy_from_user(&ufd, optval, sizeof(ufd)))
898 break;
899
900 ret = sk_attach_bpf(ufd, sk);
901 }
902 break;
903
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700904 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700905 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700906 break;
907
Vincent Bernatd59577b2013-01-16 22:55:49 +0100908 case SO_LOCK_FILTER:
909 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
910 ret = -EPERM;
911 else
912 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
913 break;
914
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700915 case SO_PASSSEC:
916 if (valbool)
917 set_bit(SOCK_PASSSEC, &sock->flags);
918 else
919 clear_bit(SOCK_PASSSEC, &sock->flags);
920 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800921 case SO_MARK:
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000922 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800923 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000924 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800925 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800926 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700927
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 /* We implement the SO_SNDLOWAT etc to
929 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700930 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000931 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700932 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100933
934 case SO_WIFI_STATUS:
935 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
936 break;
937
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000938 case SO_PEEK_OFF:
939 if (sock->ops->set_peek_off)
Sasha Levin12663bf2013-12-07 17:26:27 -0500940 ret = sock->ops->set_peek_off(sk, val);
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000941 else
942 ret = -EOPNOTSUPP;
943 break;
Ben Greear3bdc0eb2012-02-11 15:39:30 +0000944
945 case SO_NOFCS:
946 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
947 break;
948
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +0000949 case SO_SELECT_ERR_QUEUE:
950 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
951 break;
952
Cong Wange0d10952013-08-01 11:10:25 +0800953#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +0300954 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +0300955 /* allow unprivileged users to decrease the value */
956 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
957 ret = -EPERM;
958 else {
959 if (val < 0)
960 ret = -EINVAL;
961 else
962 sk->sk_ll_usec = val;
963 }
964 break;
965#endif
Eric Dumazet62748f32013-09-24 08:20:52 -0700966
967 case SO_MAX_PACING_RATE:
968 sk->sk_max_pacing_rate = val;
969 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
970 sk->sk_max_pacing_rate);
971 break;
972
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700973 default:
974 ret = -ENOPROTOOPT;
975 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900976 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 release_sock(sk);
978 return ret;
979}
Eric Dumazet2a915252009-05-27 11:30:05 +0000980EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
982
stephen hemminger8f098982014-01-03 09:17:14 -0800983static void cred_to_ucred(struct pid *pid, const struct cred *cred,
984 struct ucred *ucred)
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000985{
986 ucred->pid = pid_vnr(pid);
987 ucred->uid = ucred->gid = -1;
988 if (cred) {
989 struct user_namespace *current_ns = current_user_ns();
990
Eric W. Biedermanb2e4f542012-05-23 16:39:45 -0600991 ucred->uid = from_kuid_munged(current_ns, cred->euid);
992 ucred->gid = from_kgid_munged(current_ns, cred->egid);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000993 }
994}
995
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996int sock_getsockopt(struct socket *sock, int level, int optname,
997 char __user *optval, int __user *optlen)
998{
999 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001000
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001001 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001002 int val;
1003 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 struct timeval tm;
1005 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001006
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -08001007 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001009
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001010 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001011 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001012 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001014
Eugene Teo50fee1d2009-02-23 15:38:41 -08001015 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -08001016
Eric Dumazet2a915252009-05-27 11:30:05 +00001017 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001018 case SO_DEBUG:
1019 v.val = sock_flag(sk, SOCK_DBG);
1020 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001021
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001022 case SO_DONTROUTE:
1023 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1024 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001025
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001026 case SO_BROADCAST:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001027 v.val = sock_flag(sk, SOCK_BROADCAST);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001028 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001030 case SO_SNDBUF:
1031 v.val = sk->sk_sndbuf;
1032 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001033
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001034 case SO_RCVBUF:
1035 v.val = sk->sk_rcvbuf;
1036 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001038 case SO_REUSEADDR:
1039 v.val = sk->sk_reuse;
1040 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
Tom Herbert055dc212013-01-22 09:49:50 +00001042 case SO_REUSEPORT:
1043 v.val = sk->sk_reuseport;
1044 break;
1045
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001046 case SO_KEEPALIVE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001047 v.val = sock_flag(sk, SOCK_KEEPOPEN);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001048 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001050 case SO_TYPE:
1051 v.val = sk->sk_type;
1052 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
Jan Engelhardt49c794e2009-08-04 07:28:28 +00001054 case SO_PROTOCOL:
1055 v.val = sk->sk_protocol;
1056 break;
1057
Jan Engelhardt0d6038e2009-08-04 07:28:29 +00001058 case SO_DOMAIN:
1059 v.val = sk->sk_family;
1060 break;
1061
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001062 case SO_ERROR:
1063 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +00001064 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001065 v.val = xchg(&sk->sk_err_soft, 0);
1066 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001068 case SO_OOBINLINE:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001069 v.val = sock_flag(sk, SOCK_URGINLINE);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001070 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001071
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001072 case SO_NO_CHECK:
Tom Herbert28448b82014-05-23 08:47:19 -07001073 v.val = sk->sk_no_check_tx;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001074 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001076 case SO_PRIORITY:
1077 v.val = sk->sk_priority;
1078 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001079
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001080 case SO_LINGER:
1081 lv = sizeof(v.ling);
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001082 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001083 v.ling.l_linger = sk->sk_lingertime / HZ;
1084 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001085
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001086 case SO_BSDCOMPAT:
1087 sock_warn_obsolete_bsdism("getsockopt");
1088 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001090 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -07001091 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1092 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1093 break;
1094
1095 case SO_TIMESTAMPNS:
1096 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001097 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098
Patrick Ohly20d49472009-02-12 05:03:38 +00001099 case SO_TIMESTAMPING:
Willem de Bruijnb9f40e22014-08-04 22:11:46 -04001100 v.val = sk->sk_tsflags;
Patrick Ohly20d49472009-02-12 05:03:38 +00001101 break;
1102
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001103 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001104 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001105 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1106 v.tm.tv_sec = 0;
1107 v.tm.tv_usec = 0;
1108 } else {
1109 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1110 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001112 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001114 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +00001115 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001116 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1117 v.tm.tv_sec = 0;
1118 v.tm.tv_usec = 0;
1119 } else {
1120 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1121 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1122 }
1123 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001125 case SO_RCVLOWAT:
1126 v.val = sk->sk_rcvlowat;
1127 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001128
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001129 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +00001130 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001131 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001133 case SO_PASSCRED:
Eric Dumazet82981932012-04-26 20:07:59 +00001134 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001135 break;
1136
1137 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001138 {
1139 struct ucred peercred;
1140 if (len > sizeof(peercred))
1141 len = sizeof(peercred);
1142 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1143 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001144 return -EFAULT;
1145 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001146 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001147
1148 case SO_PEERNAME:
1149 {
1150 char address[128];
1151
1152 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1153 return -ENOTCONN;
1154 if (lv < len)
1155 return -EINVAL;
1156 if (copy_to_user(optval, address, len))
1157 return -EFAULT;
1158 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001160
1161 /* Dubious BSD thing... Probably nobody even uses it, but
1162 * the UNIX standard wants it for whatever reason... -DaveM
1163 */
1164 case SO_ACCEPTCONN:
1165 v.val = sk->sk_state == TCP_LISTEN;
1166 break;
1167
1168 case SO_PASSSEC:
Eric Dumazet82981932012-04-26 20:07:59 +00001169 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001170 break;
1171
1172 case SO_PEERSEC:
1173 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1174
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001175 case SO_MARK:
1176 v.val = sk->sk_mark;
1177 break;
1178
Neil Horman3b885782009-10-12 13:26:31 -07001179 case SO_RXQ_OVFL:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001180 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
Neil Horman3b885782009-10-12 13:26:31 -07001181 break;
1182
Johannes Berg6e3e9392011-11-09 10:15:42 +01001183 case SO_WIFI_STATUS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001184 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
Johannes Berg6e3e9392011-11-09 10:15:42 +01001185 break;
1186
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001187 case SO_PEEK_OFF:
1188 if (!sock->ops->set_peek_off)
1189 return -EOPNOTSUPP;
1190
1191 v.val = sk->sk_peek_off;
1192 break;
David S. Millerbc2f7992012-02-24 14:48:34 -05001193 case SO_NOFCS:
Eric Dumazet1b23a5d2012-05-16 05:57:07 +00001194 v.val = sock_flag(sk, SOCK_NOFCS);
David S. Millerbc2f7992012-02-24 14:48:34 -05001195 break;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001196
Pavel Emelyanovf7b86bf2012-10-18 23:55:56 +00001197 case SO_BINDTODEVICE:
Brian Haleyc91f6df2012-11-26 05:21:08 +00001198 return sock_getbindtodevice(sk, optval, optlen, len);
1199
Pavel Emelyanova8fc9272012-11-01 02:01:48 +00001200 case SO_GET_FILTER:
1201 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1202 if (len < 0)
1203 return len;
1204
1205 goto lenout;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001206
Vincent Bernatd59577b2013-01-16 22:55:49 +01001207 case SO_LOCK_FILTER:
1208 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1209 break;
1210
Michal Sekletarea02f942014-01-17 17:09:45 +01001211 case SO_BPF_EXTENSIONS:
1212 v.val = bpf_tell_extensions();
1213 break;
1214
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00001215 case SO_SELECT_ERR_QUEUE:
1216 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1217 break;
1218
Cong Wange0d10952013-08-01 11:10:25 +08001219#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03001220 case SO_BUSY_POLL:
Eliezer Tamirdafcc432013-06-14 16:33:57 +03001221 v.val = sk->sk_ll_usec;
1222 break;
1223#endif
1224
Eric Dumazet62748f32013-09-24 08:20:52 -07001225 case SO_MAX_PACING_RATE:
1226 v.val = sk->sk_max_pacing_rate;
1227 break;
1228
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001229 case SO_INCOMING_CPU:
1230 v.val = sk->sk_incoming_cpu;
1231 break;
1232
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001233 default:
1234 return -ENOPROTOOPT;
1235 }
1236
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 if (len > lv)
1238 len = lv;
1239 if (copy_to_user(optval, &v, len))
1240 return -EFAULT;
1241lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001242 if (put_user(len, optlen))
1243 return -EFAULT;
1244 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245}
1246
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001247/*
1248 * Initialize an sk_lock.
1249 *
1250 * (We also register the sk_lock with the lock validator.)
1251 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001252static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001253{
Peter Zijlstraed075362006-12-06 20:35:24 -08001254 sock_lock_init_class_and_name(sk,
1255 af_family_slock_key_strings[sk->sk_family],
1256 af_family_slock_keys + sk->sk_family,
1257 af_family_key_strings[sk->sk_family],
1258 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001259}
1260
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001261/*
1262 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1263 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001264 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001265 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001266static void sock_copy(struct sock *nsk, const struct sock *osk)
1267{
1268#ifdef CONFIG_SECURITY_NETWORK
1269 void *sptr = nsk->sk_security;
1270#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001271 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1272
1273 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1274 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1275
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001276#ifdef CONFIG_SECURITY_NETWORK
1277 nsk->sk_security = sptr;
1278 security_sk_clone(osk, nsk);
1279#endif
1280}
1281
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001282void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1283{
1284 unsigned long nulls1, nulls2;
1285
1286 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1287 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1288 if (nulls1 > nulls2)
1289 swap(nulls1, nulls2);
1290
1291 if (nulls1 != 0)
1292 memset((char *)sk, 0, nulls1);
1293 memset((char *)sk + nulls1 + sizeof(void *), 0,
1294 nulls2 - nulls1 - sizeof(void *));
1295 memset((char *)sk + nulls2 + sizeof(void *), 0,
1296 size - nulls2 - sizeof(void *));
1297}
1298EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1299
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001300static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1301 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001302{
1303 struct sock *sk;
1304 struct kmem_cache *slab;
1305
1306 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001307 if (slab != NULL) {
1308 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1309 if (!sk)
1310 return sk;
1311 if (priority & __GFP_ZERO) {
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001312 if (prot->clear_sk)
1313 prot->clear_sk(sk, prot->obj_size);
1314 else
1315 sk_prot_clear_nulls(sk, prot->obj_size);
Eric Dumazete912b112009-07-08 19:36:05 +00001316 }
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001317 } else
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001318 sk = kmalloc(prot->obj_size, priority);
1319
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001320 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001321 kmemcheck_annotate_bitfield(sk, flags);
1322
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001323 if (security_sk_alloc(sk, family, priority))
1324 goto out_free;
1325
1326 if (!try_module_get(prot->owner))
1327 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001328 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001329 }
1330
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001331 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001332
1333out_free_sec:
1334 security_sk_free(sk);
1335out_free:
1336 if (slab != NULL)
1337 kmem_cache_free(slab, sk);
1338 else
1339 kfree(sk);
1340 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001341}
1342
1343static void sk_prot_free(struct proto *prot, struct sock *sk)
1344{
1345 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001346 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001347
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001348 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001349 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001350
1351 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001352 if (slab != NULL)
1353 kmem_cache_free(slab, sk);
1354 else
1355 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001356 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001357}
1358
Daniel Borkmann86f85152013-12-29 17:27:11 +01001359#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Zefan Li6ffd4642013-04-08 20:03:47 +00001360void sock_update_netprioidx(struct sock *sk)
Neil Horman5bc14212011-11-22 05:10:51 +00001361{
Neil Horman5bc14212011-11-22 05:10:51 +00001362 if (in_interrupt())
1363 return;
Neil Horman2b73bc62012-02-10 05:43:38 +00001364
Zefan Li6ffd4642013-04-08 20:03:47 +00001365 sk->sk_cgrp_prioidx = task_netprioidx(current);
Neil Horman5bc14212011-11-22 05:10:51 +00001366}
1367EXPORT_SYMBOL_GPL(sock_update_netprioidx);
Herbert Xuf8451722010-05-24 00:12:34 -07001368#endif
1369
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370/**
1371 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001372 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001373 * @family: protocol family
1374 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1375 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001377struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001378 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001380 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001382 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001384 sk->sk_family = family;
1385 /*
1386 * See comment in struct sock definition to understand
1387 * why we need sk_prot_creator -acme
1388 */
1389 sk->sk_prot = sk->sk_prot_creator = prot;
1390 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001391 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001392 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001393
Zefan Li211d2f972013-04-08 20:03:35 +00001394 sock_update_classid(sk);
Zefan Li6ffd4642013-04-08 20:03:47 +00001395 sock_update_netprioidx(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 }
Frank Filza79af592005-09-27 15:23:38 -07001397
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001398 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399}
Eric Dumazet2a915252009-05-27 11:30:05 +00001400EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401
Eric Dumazet2b85a342009-06-11 02:55:43 -07001402static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403{
1404 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
1406 if (sk->sk_destruct)
1407 sk->sk_destruct(sk);
1408
Paul E. McKenneya898def2010-02-22 17:04:49 -08001409 filter = rcu_dereference_check(sk->sk_filter,
1410 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001412 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001413 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 }
1415
Eric Dumazet08e29af2011-11-28 12:04:18 +00001416 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
1418 if (atomic_read(&sk->sk_omem_alloc))
Joe Perchese005d192012-05-16 19:58:40 +00001419 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1420 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001422 if (sk->sk_peer_cred)
1423 put_cred(sk->sk_peer_cred);
1424 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001425 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001426 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001428
1429void sk_free(struct sock *sk)
1430{
1431 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001432 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001433 * some packets are still in some tx queue.
1434 * If not null, sock_wfree() will call __sk_free(sk) later
1435 */
1436 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1437 __sk_free(sk);
1438}
Eric Dumazet2a915252009-05-27 11:30:05 +00001439EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
Denis V. Lunevedf02082008-02-29 11:18:32 -08001441/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001442 * Last sock_put should drop reference to sk->sk_net. It has already
1443 * been dropped in sk_change_net. Taking reference to stopping namespace
Denis V. Lunevedf02082008-02-29 11:18:32 -08001444 * is not an option.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001445 * Take reference to a socket to remove it from hash _alive_ and after that
Denis V. Lunevedf02082008-02-29 11:18:32 -08001446 * destroy it in the context of init_net.
1447 */
1448void sk_release_kernel(struct sock *sk)
1449{
1450 if (sk == NULL || sk->sk_socket == NULL)
1451 return;
1452
1453 sock_hold(sk);
1454 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001455 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001456 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001457 sock_put(sk);
1458}
David S. Miller45af1752008-02-29 11:33:19 -08001459EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001460
Stephen Rothwell475f1b52012-01-09 16:33:16 +11001461static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1462{
1463 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1464 sock_update_memcg(newsk);
1465}
1466
Eric Dumazete56c57d2011-11-08 17:07:07 -05001467/**
1468 * sk_clone_lock - clone a socket, and lock its clone
1469 * @sk: the socket to clone
1470 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1471 *
1472 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1473 */
1474struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001475{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001476 struct sock *newsk;
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001477 bool is_charged = true;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001478
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001479 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001480 if (newsk != NULL) {
1481 struct sk_filter *filter;
1482
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001483 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001484
1485 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001486 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001487 sk_node_init(&newsk->sk_node);
1488 sock_lock_init(newsk);
1489 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001490 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001491 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001492
1493 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001494 /*
1495 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1496 */
1497 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001498 atomic_set(&newsk->sk_omem_alloc, 0);
1499 skb_queue_head_init(&newsk->sk_receive_queue);
1500 skb_queue_head_init(&newsk->sk_write_queue);
1501
Eric Dumazetb6c67122010-04-08 23:03:29 +00001502 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001503 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001504 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1505 af_callback_keys + newsk->sk_family,
1506 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001507
1508 newsk->sk_dst_cache = NULL;
1509 newsk->sk_wmem_queued = 0;
1510 newsk->sk_forward_alloc = 0;
1511 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001512 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1513
1514 sock_reset_flag(newsk, SOCK_DONE);
1515 skb_queue_head_init(&newsk->sk_error_queue);
1516
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001517 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001518 if (filter != NULL)
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001519 /* though it's an empty new sock, the charging may fail
1520 * if sysctl_optmem_max was changed between creation of
1521 * original socket and cloning
1522 */
1523 is_charged = sk_filter_charge(newsk, filter);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001524
Alexei Starovoitov278571b2014-07-30 20:34:12 -07001525 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001526 /* It is still raw copy of parent, so invalidate
1527 * destructor and make plain sk_free() */
1528 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001529 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001530 sk_free(newsk);
1531 newsk = NULL;
1532 goto out;
1533 }
1534
1535 newsk->sk_err = 0;
1536 newsk->sk_priority = 0;
Eric Dumazet2c8c56e2014-11-11 05:54:28 -08001537 newsk->sk_incoming_cpu = raw_smp_processor_id();
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001538 /*
1539 * Before updating sk_refcnt, we must commit prior changes to memory
1540 * (Documentation/RCU/rculist_nulls.txt for details)
1541 */
1542 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001543 atomic_set(&newsk->sk_refcnt, 2);
1544
1545 /*
1546 * Increment the counter in the same struct proto as the master
1547 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1548 * is the same as sk->sk_prot->socks, as this field was copied
1549 * with memcpy).
1550 *
1551 * This _changes_ the previous behaviour, where
1552 * tcp_create_openreq_child always was incrementing the
1553 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1554 * to be taken into account in all callers. -acme
1555 */
1556 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001557 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001558 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001559
Glauber Costaf3f511e2012-01-05 20:16:39 +00001560 sk_update_clone(sk, newsk);
1561
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001562 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001563 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001564
Eric Dumazet08e29af2011-11-28 12:04:18 +00001565 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001566 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001567 }
1568out:
1569 return newsk;
1570}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001571EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001572
Andi Kleen99580892007-04-20 17:12:43 -07001573void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1574{
1575 __sk_dst_set(sk, dst);
1576 sk->sk_route_caps = dst->dev->features;
1577 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001578 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001579 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001580 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001581 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001582 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001583 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001584 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001585 sk->sk_gso_max_size = dst->dev->gso_max_size;
Ben Hutchings14853482012-07-30 16:11:42 +00001586 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001587 }
Andi Kleen99580892007-04-20 17:12:43 -07001588 }
1589}
1590EXPORT_SYMBOL_GPL(sk_setup_caps);
1591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592/*
1593 * Simple resource managers for sockets.
1594 */
1595
1596
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001597/*
1598 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 */
1600void sock_wfree(struct sk_buff *skb)
1601{
1602 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001603 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
Eric Dumazetd99927f2009-09-24 10:49:24 +00001605 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1606 /*
1607 * Keep a reference on sk_wmem_alloc, this will be released
1608 * after sk_write_space() call
1609 */
1610 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001612 len = 1;
1613 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001614 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001615 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1616 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001617 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001618 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001619 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620}
Eric Dumazet2a915252009-05-27 11:30:05 +00001621EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
Eric Dumazetf2f872f2013-07-30 17:55:08 -07001623void skb_orphan_partial(struct sk_buff *skb)
1624{
1625 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1626 * so we do not completely orphan skb, but transfert all
1627 * accounted bytes but one, to avoid unexpected reorders.
1628 */
1629 if (skb->destructor == sock_wfree
1630#ifdef CONFIG_INET
1631 || skb->destructor == tcp_wfree
1632#endif
1633 ) {
1634 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1635 skb->truesize = 1;
1636 } else {
1637 skb_orphan(skb);
1638 }
1639}
1640EXPORT_SYMBOL(skb_orphan_partial);
1641
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001642/*
1643 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 */
1645void sock_rfree(struct sk_buff *skb)
1646{
1647 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001648 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
Eric Dumazetd361fd52010-07-10 22:45:17 +00001650 atomic_sub(len, &sk->sk_rmem_alloc);
1651 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652}
Eric Dumazet2a915252009-05-27 11:30:05 +00001653EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
Alexander Duyck62bccb82014-09-04 13:31:35 -04001655void sock_efree(struct sk_buff *skb)
1656{
1657 sock_put(skb->sk);
1658}
1659EXPORT_SYMBOL(sock_efree);
1660
Alexander Duyck82eabd92014-09-04 13:32:11 -04001661#ifdef CONFIG_INET
David S. Miller41063e92012-06-19 21:22:05 -07001662void sock_edemux(struct sk_buff *skb)
1663{
Eric Dumazete8123472012-09-02 23:57:18 +00001664 struct sock *sk = skb->sk;
1665
1666 if (sk->sk_state == TCP_TIME_WAIT)
1667 inet_twsk_put(inet_twsk(sk));
1668 else
1669 sock_put(sk);
David S. Miller41063e92012-06-19 21:22:05 -07001670}
1671EXPORT_SYMBOL(sock_edemux);
Alexander Duyck82eabd92014-09-04 13:32:11 -04001672#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
Eric W. Biederman976d02012012-05-23 17:16:53 -06001674kuid_t sock_i_uid(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675{
Eric W. Biederman976d02012012-05-23 17:16:53 -06001676 kuid_t uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
Eric Dumazetf064af12010-09-22 12:43:39 +00001678 read_lock_bh(&sk->sk_callback_lock);
Eric W. Biederman976d02012012-05-23 17:16:53 -06001679 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
Eric Dumazetf064af12010-09-22 12:43:39 +00001680 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 return uid;
1682}
Eric Dumazet2a915252009-05-27 11:30:05 +00001683EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684
1685unsigned long sock_i_ino(struct sock *sk)
1686{
1687 unsigned long ino;
1688
Eric Dumazetf064af12010-09-22 12:43:39 +00001689 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001691 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 return ino;
1693}
Eric Dumazet2a915252009-05-27 11:30:05 +00001694EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
1696/*
1697 * Allocate a skb from the socket's send buffer.
1698 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001699struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001700 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701{
1702 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001703 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 if (skb) {
1705 skb_set_owner_w(skb, sk);
1706 return skb;
1707 }
1708 }
1709 return NULL;
1710}
Eric Dumazet2a915252009-05-27 11:30:05 +00001711EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
1713/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001715 */
Al Virodd0fc662005-10-07 07:46:04 +01001716void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717{
Eric Dumazet95c96172012-04-15 05:58:06 +00001718 if ((unsigned int)size <= sysctl_optmem_max &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1720 void *mem;
1721 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001722 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 */
1724 atomic_add(size, &sk->sk_omem_alloc);
1725 mem = kmalloc(size, priority);
1726 if (mem)
1727 return mem;
1728 atomic_sub(size, &sk->sk_omem_alloc);
1729 }
1730 return NULL;
1731}
Eric Dumazet2a915252009-05-27 11:30:05 +00001732EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
Daniel Borkmann79e88652014-11-19 17:13:11 +01001734/* Free an option memory block. Note, we actually want the inline
1735 * here as this allows gcc to detect the nullify and fold away the
1736 * condition entirely.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 */
Daniel Borkmann79e88652014-11-19 17:13:11 +01001738static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
1739 const bool nullify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740{
David S. Millere53da5f2014-10-14 17:02:37 -04001741 if (WARN_ON_ONCE(!mem))
1742 return;
Daniel Borkmann79e88652014-11-19 17:13:11 +01001743 if (nullify)
1744 kzfree(mem);
1745 else
1746 kfree(mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 atomic_sub(size, &sk->sk_omem_alloc);
1748}
Daniel Borkmann79e88652014-11-19 17:13:11 +01001749
1750void sock_kfree_s(struct sock *sk, void *mem, int size)
1751{
1752 __sock_kfree_s(sk, mem, size, false);
1753}
Eric Dumazet2a915252009-05-27 11:30:05 +00001754EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755
Daniel Borkmann79e88652014-11-19 17:13:11 +01001756void sock_kzfree_s(struct sock *sk, void *mem, int size)
1757{
1758 __sock_kfree_s(sk, mem, size, true);
1759}
1760EXPORT_SYMBOL(sock_kzfree_s);
1761
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1763 I think, these locks should be removed for datagram sockets.
1764 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001765static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766{
1767 DEFINE_WAIT(wait);
1768
1769 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1770 for (;;) {
1771 if (!timeo)
1772 break;
1773 if (signal_pending(current))
1774 break;
1775 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001776 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1778 break;
1779 if (sk->sk_shutdown & SEND_SHUTDOWN)
1780 break;
1781 if (sk->sk_err)
1782 break;
1783 timeo = schedule_timeout(timeo);
1784 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001785 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 return timeo;
1787}
1788
1789
1790/*
1791 * Generic send/receive buffer handlers
1792 */
1793
Herbert Xu4cc7f682009-02-04 16:55:54 -08001794struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1795 unsigned long data_len, int noblock,
Eric Dumazet28d64272013-08-08 14:38:47 -07001796 int *errcode, int max_page_order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797{
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001798 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 long timeo;
1800 int err;
1801
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 timeo = sock_sndtimeo(sk, noblock);
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001803 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 err = sock_error(sk);
1805 if (err != 0)
1806 goto failure;
1807
1808 err = -EPIPE;
1809 if (sk->sk_shutdown & SEND_SHUTDOWN)
1810 goto failure;
1811
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001812 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1813 break;
Eric Dumazet28d64272013-08-08 14:38:47 -07001814
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001815 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1816 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1817 err = -EAGAIN;
1818 if (!timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 goto failure;
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001820 if (signal_pending(current))
1821 goto interrupted;
1822 timeo = sock_wait_for_wmem(sk, timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 }
Eric Dumazet2e4e4412014-09-17 04:49:49 -07001824 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1825 errcode, sk->sk_allocation);
1826 if (skb)
1827 skb_set_owner_w(skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 return skb;
1829
1830interrupted:
1831 err = sock_intr_errno(timeo);
1832failure:
1833 *errcode = err;
1834 return NULL;
1835}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001836EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001838struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 int noblock, int *errcode)
1840{
Eric Dumazet28d64272013-08-08 14:38:47 -07001841 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842}
Eric Dumazet2a915252009-05-27 11:30:05 +00001843EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844
Eric Dumazet5640f762012-09-23 23:04:42 +00001845/* On 32bit arches, an skb frag is limited to 2^15 */
1846#define SKB_FRAG_PAGE_ORDER get_order(32768)
1847
Eric Dumazet400dfd32013-10-17 16:27:07 -07001848/**
1849 * skb_page_frag_refill - check that a page_frag contains enough room
1850 * @sz: minimum size of the fragment we want to get
1851 * @pfrag: pointer to page_frag
Eric Dumazet82d5e2b2014-09-08 04:00:00 -07001852 * @gfp: priority for memory allocation
Eric Dumazet400dfd32013-10-17 16:27:07 -07001853 *
1854 * Note: While this allocator tries to use high order pages, there is
1855 * no guarantee that allocations succeed. Therefore, @sz MUST be
1856 * less or equal than PAGE_SIZE.
1857 */
Eric Dumazetd9b29382014-08-27 20:49:34 -07001858bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
Eric Dumazet5640f762012-09-23 23:04:42 +00001859{
Eric Dumazet5640f762012-09-23 23:04:42 +00001860 if (pfrag->page) {
1861 if (atomic_read(&pfrag->page->_count) == 1) {
1862 pfrag->offset = 0;
1863 return true;
1864 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07001865 if (pfrag->offset + sz <= pfrag->size)
Eric Dumazet5640f762012-09-23 23:04:42 +00001866 return true;
1867 put_page(pfrag->page);
1868 }
1869
Eric Dumazetd9b29382014-08-27 20:49:34 -07001870 pfrag->offset = 0;
1871 if (SKB_FRAG_PAGE_ORDER) {
1872 pfrag->page = alloc_pages(gfp | __GFP_COMP |
1873 __GFP_NOWARN | __GFP_NORETRY,
1874 SKB_FRAG_PAGE_ORDER);
Eric Dumazet5640f762012-09-23 23:04:42 +00001875 if (likely(pfrag->page)) {
Eric Dumazetd9b29382014-08-27 20:49:34 -07001876 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
Eric Dumazet5640f762012-09-23 23:04:42 +00001877 return true;
1878 }
Eric Dumazetd9b29382014-08-27 20:49:34 -07001879 }
1880 pfrag->page = alloc_page(gfp);
1881 if (likely(pfrag->page)) {
1882 pfrag->size = PAGE_SIZE;
1883 return true;
1884 }
Eric Dumazet400dfd32013-10-17 16:27:07 -07001885 return false;
1886}
1887EXPORT_SYMBOL(skb_page_frag_refill);
1888
1889bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1890{
1891 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1892 return true;
1893
Eric Dumazet5640f762012-09-23 23:04:42 +00001894 sk_enter_memory_pressure(sk);
1895 sk_stream_moderate_sndbuf(sk);
1896 return false;
1897}
1898EXPORT_SYMBOL(sk_page_frag_refill);
1899
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001901 __releases(&sk->sk_lock.slock)
1902 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903{
1904 DEFINE_WAIT(wait);
1905
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001906 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1908 TASK_UNINTERRUPTIBLE);
1909 spin_unlock_bh(&sk->sk_lock.slock);
1910 schedule();
1911 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001912 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 break;
1914 }
1915 finish_wait(&sk->sk_lock.wq, &wait);
1916}
1917
1918static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001919 __releases(&sk->sk_lock.slock)
1920 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921{
1922 struct sk_buff *skb = sk->sk_backlog.head;
1923
1924 do {
1925 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1926 bh_unlock_sock(sk);
1927
1928 do {
1929 struct sk_buff *next = skb->next;
1930
Eric Dumazete4cbb022012-04-30 16:07:09 +00001931 prefetch(next);
Eric Dumazet7fee2262010-05-11 23:19:48 +00001932 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001934 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935
1936 /*
1937 * We are in process context here with softirqs
1938 * disabled, use cond_resched_softirq() to preempt.
1939 * This is safe to do because we've taken the backlog
1940 * queue private:
1941 */
1942 cond_resched_softirq();
1943
1944 skb = next;
1945 } while (skb != NULL);
1946
1947 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001948 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001949
1950 /*
1951 * Doing the zeroing here guarantee we can not loop forever
1952 * while a wild producer attempts to flood us.
1953 */
1954 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955}
1956
1957/**
1958 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001959 * @sk: sock to wait on
1960 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 *
1962 * Now socket state including sk->sk_err is changed only under lock,
1963 * hence we may omit checks after joining wait queue.
1964 * We check receive queue before schedule() only as optimization;
1965 * it is very likely that release_sock() added new data.
1966 */
1967int sk_wait_data(struct sock *sk, long *timeo)
1968{
1969 int rc;
1970 DEFINE_WAIT(wait);
1971
Eric Dumazetaa395142010-04-20 13:03:51 +00001972 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1974 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1975 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001976 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 return rc;
1978}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979EXPORT_SYMBOL(sk_wait_data);
1980
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001981/**
1982 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1983 * @sk: socket
1984 * @size: memory size to allocate
1985 * @kind: allocation type
1986 *
1987 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1988 * rmem allocation. This function assumes that protocols which have
1989 * memory_pressure use sk_wmem_queued as write buffer accounting.
1990 */
1991int __sk_mem_schedule(struct sock *sk, int size, int kind)
1992{
1993 struct proto *prot = sk->sk_prot;
1994 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00001995 long allocated;
Glauber Costae1aab162011-12-11 21:47:03 +00001996 int parent_status = UNDER_LIMIT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001997
1998 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001999
Glauber Costae1aab162011-12-11 21:47:03 +00002000 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002001
2002 /* Under limit. */
Glauber Costae1aab162011-12-11 21:47:03 +00002003 if (parent_status == UNDER_LIMIT &&
2004 allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00002005 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002006 return 1;
2007 }
2008
Glauber Costae1aab162011-12-11 21:47:03 +00002009 /* Under pressure. (we or our parents) */
2010 if ((parent_status > SOFT_LIMIT) ||
2011 allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00002012 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002013
Glauber Costae1aab162011-12-11 21:47:03 +00002014 /* Over hard limit (we or our parents) */
2015 if ((parent_status == OVER_LIMIT) ||
2016 (allocated > sk_prot_mem_limits(sk, 2)))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002017 goto suppress_allocation;
2018
2019 /* guarantee minimum buffer size under pressure */
2020 if (kind == SK_MEM_RECV) {
2021 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2022 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002023
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002024 } else { /* SK_MEM_SEND */
2025 if (sk->sk_type == SOCK_STREAM) {
2026 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2027 return 1;
2028 } else if (atomic_read(&sk->sk_wmem_alloc) <
2029 prot->sysctl_wmem[0])
2030 return 1;
2031 }
2032
Glauber Costa180d8cd2011-12-11 21:47:02 +00002033 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08002034 int alloc;
2035
Glauber Costa180d8cd2011-12-11 21:47:02 +00002036 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08002037 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002038 alloc = sk_sockets_allocated_read_positive(sk);
2039 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002040 sk_mem_pages(sk->sk_wmem_queued +
2041 atomic_read(&sk->sk_rmem_alloc) +
2042 sk->sk_forward_alloc))
2043 return 1;
2044 }
2045
2046suppress_allocation:
2047
2048 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2049 sk_stream_moderate_sndbuf(sk);
2050
2051 /* Fail only if socket is _under_ its sndbuf.
2052 * In this case we cannot block, so that we have to fail.
2053 */
2054 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2055 return 1;
2056 }
2057
Satoru Moriya3847ce32011-06-17 12:00:03 +00002058 trace_sock_exceed_buf_limit(sk, prot, allocated);
2059
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002060 /* Alas. Undo changes. */
2061 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002062
Glauber Costa0e90b312012-01-20 04:57:16 +00002063 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00002064
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002065 return 0;
2066}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002067EXPORT_SYMBOL(__sk_mem_schedule);
2068
2069/**
2070 * __sk_reclaim - reclaim memory_allocated
2071 * @sk: socket
2072 */
2073void __sk_mem_reclaim(struct sock *sk)
2074{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002075 sk_memory_allocated_sub(sk,
Glauber Costa0e90b312012-01-20 04:57:16 +00002076 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002077 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2078
Glauber Costa180d8cd2011-12-11 21:47:02 +00002079 if (sk_under_memory_pressure(sk) &&
2080 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2081 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002082}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002083EXPORT_SYMBOL(__sk_mem_reclaim);
2084
2085
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086/*
2087 * Set of default routines for initialising struct proto_ops when
2088 * the protocol does not support a particular function. In certain
2089 * cases where it makes no sense for a protocol to have a "do nothing"
2090 * function, some default processing is provided.
2091 */
2092
2093int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2094{
2095 return -EOPNOTSUPP;
2096}
Eric Dumazet2a915252009-05-27 11:30:05 +00002097EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002099int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 int len, int flags)
2101{
2102 return -EOPNOTSUPP;
2103}
Eric Dumazet2a915252009-05-27 11:30:05 +00002104EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105
2106int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2107{
2108 return -EOPNOTSUPP;
2109}
Eric Dumazet2a915252009-05-27 11:30:05 +00002110EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111
2112int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2113{
2114 return -EOPNOTSUPP;
2115}
Eric Dumazet2a915252009-05-27 11:30:05 +00002116EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002118int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 int *len, int peer)
2120{
2121 return -EOPNOTSUPP;
2122}
Eric Dumazet2a915252009-05-27 11:30:05 +00002123EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124
Eric Dumazet2a915252009-05-27 11:30:05 +00002125unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126{
2127 return 0;
2128}
Eric Dumazet2a915252009-05-27 11:30:05 +00002129EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
2131int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2132{
2133 return -EOPNOTSUPP;
2134}
Eric Dumazet2a915252009-05-27 11:30:05 +00002135EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136
2137int sock_no_listen(struct socket *sock, int backlog)
2138{
2139 return -EOPNOTSUPP;
2140}
Eric Dumazet2a915252009-05-27 11:30:05 +00002141EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
2143int sock_no_shutdown(struct socket *sock, int how)
2144{
2145 return -EOPNOTSUPP;
2146}
Eric Dumazet2a915252009-05-27 11:30:05 +00002147EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148
2149int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002150 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151{
2152 return -EOPNOTSUPP;
2153}
Eric Dumazet2a915252009-05-27 11:30:05 +00002154EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
2156int sock_no_getsockopt(struct socket *sock, int level, int optname,
2157 char __user *optval, int __user *optlen)
2158{
2159 return -EOPNOTSUPP;
2160}
Eric Dumazet2a915252009-05-27 11:30:05 +00002161EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162
2163int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2164 size_t len)
2165{
2166 return -EOPNOTSUPP;
2167}
Eric Dumazet2a915252009-05-27 11:30:05 +00002168EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169
2170int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2171 size_t len, int flags)
2172{
2173 return -EOPNOTSUPP;
2174}
Eric Dumazet2a915252009-05-27 11:30:05 +00002175EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
2177int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2178{
2179 /* Mirror missing mmap method error code */
2180 return -ENODEV;
2181}
Eric Dumazet2a915252009-05-27 11:30:05 +00002182EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
2184ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2185{
2186 ssize_t res;
2187 struct msghdr msg = {.msg_flags = flags};
2188 struct kvec iov;
2189 char *kaddr = kmap(page);
2190 iov.iov_base = kaddr + offset;
2191 iov.iov_len = size;
2192 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2193 kunmap(page);
2194 return res;
2195}
Eric Dumazet2a915252009-05-27 11:30:05 +00002196EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197
2198/*
2199 * Default Socket Callbacks
2200 */
2201
2202static void sock_def_wakeup(struct sock *sk)
2203{
Eric Dumazet43815482010-04-29 11:01:49 +00002204 struct socket_wq *wq;
2205
2206 rcu_read_lock();
2207 wq = rcu_dereference(sk->sk_wq);
2208 if (wq_has_sleeper(wq))
2209 wake_up_interruptible_all(&wq->wait);
2210 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211}
2212
2213static void sock_def_error_report(struct sock *sk)
2214{
Eric Dumazet43815482010-04-29 11:01:49 +00002215 struct socket_wq *wq;
2216
2217 rcu_read_lock();
2218 wq = rcu_dereference(sk->sk_wq);
2219 if (wq_has_sleeper(wq))
2220 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002221 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00002222 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223}
2224
David S. Miller676d2362014-04-11 16:15:36 -04002225static void sock_def_readable(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226{
Eric Dumazet43815482010-04-29 11:01:49 +00002227 struct socket_wq *wq;
2228
2229 rcu_read_lock();
2230 wq = rcu_dereference(sk->sk_wq);
2231 if (wq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002232 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002233 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002234 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002235 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236}
2237
2238static void sock_def_write_space(struct sock *sk)
2239{
Eric Dumazet43815482010-04-29 11:01:49 +00002240 struct socket_wq *wq;
2241
2242 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
2244 /* Do not wake up a writer until he can make "significant"
2245 * progress. --DaveM
2246 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002247 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002248 wq = rcu_dereference(sk->sk_wq);
2249 if (wq_has_sleeper(wq))
2250 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002251 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252
2253 /* Should agree with poll, otherwise some programs break */
2254 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002255 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 }
2257
Eric Dumazet43815482010-04-29 11:01:49 +00002258 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259}
2260
2261static void sock_def_destruct(struct sock *sk)
2262{
Jesper Juhla51482b2005-11-08 09:41:34 -08002263 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264}
2265
2266void sk_send_sigurg(struct sock *sk)
2267{
2268 if (sk->sk_socket && sk->sk_socket->file)
2269 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002270 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271}
Eric Dumazet2a915252009-05-27 11:30:05 +00002272EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273
2274void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2275 unsigned long expires)
2276{
2277 if (!mod_timer(timer, expires))
2278 sock_hold(sk);
2279}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280EXPORT_SYMBOL(sk_reset_timer);
2281
2282void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2283{
Ying Xue25cc4ae2013-02-03 20:32:57 +00002284 if (del_timer(timer))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 __sock_put(sk);
2286}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287EXPORT_SYMBOL(sk_stop_timer);
2288
2289void sock_init_data(struct socket *sock, struct sock *sk)
2290{
2291 skb_queue_head_init(&sk->sk_receive_queue);
2292 skb_queue_head_init(&sk->sk_write_queue);
2293 skb_queue_head_init(&sk->sk_error_queue);
2294
2295 sk->sk_send_head = NULL;
2296
2297 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002298
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 sk->sk_allocation = GFP_KERNEL;
2300 sk->sk_rcvbuf = sysctl_rmem_default;
2301 sk->sk_sndbuf = sysctl_wmem_default;
2302 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002303 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304
2305 sock_set_flag(sk, SOCK_ZAPPED);
2306
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002307 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002309 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 sock->sk = sk;
2311 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002312 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
Eric Dumazetb6c67122010-04-08 23:03:29 +00002314 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07002316 lockdep_set_class_and_name(&sk->sk_callback_lock,
2317 af_callback_keys + sk->sk_family,
2318 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319
2320 sk->sk_state_change = sock_def_wakeup;
2321 sk->sk_data_ready = sock_def_readable;
2322 sk->sk_write_space = sock_def_write_space;
2323 sk->sk_error_report = sock_def_error_report;
2324 sk->sk_destruct = sock_def_destruct;
2325
Eric Dumazet5640f762012-09-23 23:04:42 +00002326 sk->sk_frag.page = NULL;
2327 sk->sk_frag.offset = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002328 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002330 sk->sk_peer_pid = NULL;
2331 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 sk->sk_write_pending = 0;
2333 sk->sk_rcvlowat = 1;
2334 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2335 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2336
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002337 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338
Cong Wange0d10952013-08-01 11:10:25 +08002339#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir06021292013-06-10 11:39:50 +03002340 sk->sk_napi_id = 0;
Eliezer Tamir64b0dc52013-07-10 17:13:36 +03002341 sk->sk_ll_usec = sysctl_net_busy_read;
Eliezer Tamir06021292013-06-10 11:39:50 +03002342#endif
2343
Eric Dumazet62748f32013-09-24 08:20:52 -07002344 sk->sk_max_pacing_rate = ~0U;
Eric Dumazet7eec4172013-10-08 15:16:00 -07002345 sk->sk_pacing_rate = ~0U;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002346 /*
2347 * Before updating sk_refcnt, we must commit prior changes to memory
2348 * (Documentation/RCU/rculist_nulls.txt for details)
2349 */
2350 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002352 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353}
Eric Dumazet2a915252009-05-27 11:30:05 +00002354EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002356void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357{
2358 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002359 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002360 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002362 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002363 spin_unlock(&sk->sk_lock.slock);
2364 /*
2365 * The sk_lock has mutex_lock() semantics here:
2366 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002367 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002368 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002370EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002372void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002374 /*
2375 * The sk_lock has mutex_unlock() semantics:
2376 */
2377 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2378
2379 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 if (sk->sk_backlog.tail)
2381 __release_sock(sk);
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002382
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002383 /* Warning : release_cb() might need to release sk ownership,
2384 * ie call sock_release_ownership(sk) before us.
2385 */
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002386 if (sk->sk_prot->release_cb)
2387 sk->sk_prot->release_cb(sk);
2388
Eric Dumazetc3f9b012014-03-10 09:50:11 -07002389 sock_release_ownership(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002390 if (waitqueue_active(&sk->sk_lock.wq))
2391 wake_up(&sk->sk_lock.wq);
2392 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393}
2394EXPORT_SYMBOL(release_sock);
2395
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002396/**
2397 * lock_sock_fast - fast version of lock_sock
2398 * @sk: socket
2399 *
2400 * This version should be used for very small section, where process wont block
2401 * return false if fast path is taken
2402 * sk_lock.slock locked, owned = 0, BH disabled
2403 * return true if slow path is taken
2404 * sk_lock.slock unlocked, owned = 1, BH enabled
2405 */
2406bool lock_sock_fast(struct sock *sk)
2407{
2408 might_sleep();
2409 spin_lock_bh(&sk->sk_lock.slock);
2410
2411 if (!sk->sk_lock.owned)
2412 /*
2413 * Note : We must disable BH
2414 */
2415 return false;
2416
2417 __lock_sock(sk);
2418 sk->sk_lock.owned = 1;
2419 spin_unlock(&sk->sk_lock.slock);
2420 /*
2421 * The sk_lock has mutex_lock() semantics here:
2422 */
2423 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2424 local_bh_enable();
2425 return true;
2426}
2427EXPORT_SYMBOL(lock_sock_fast);
2428
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002430{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002431 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002433 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002434 tv = ktime_to_timeval(sk->sk_stamp);
2435 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002437 if (tv.tv_sec == 0) {
2438 sk->sk_stamp = ktime_get_real();
2439 tv = ktime_to_timeval(sk->sk_stamp);
2440 }
2441 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002442}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443EXPORT_SYMBOL(sock_get_timestamp);
2444
Eric Dumazetae40eb12007-03-18 17:33:16 -07002445int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2446{
2447 struct timespec ts;
2448 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002449 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002450 ts = ktime_to_timespec(sk->sk_stamp);
2451 if (ts.tv_sec == -1)
2452 return -ENOENT;
2453 if (ts.tv_sec == 0) {
2454 sk->sk_stamp = ktime_get_real();
2455 ts = ktime_to_timespec(sk->sk_stamp);
2456 }
2457 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2458}
2459EXPORT_SYMBOL(sock_get_timestampns);
2460
Patrick Ohly20d49472009-02-12 05:03:38 +00002461void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002462{
Patrick Ohly20d49472009-02-12 05:03:38 +00002463 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002464 unsigned long previous_flags = sk->sk_flags;
2465
Patrick Ohly20d49472009-02-12 05:03:38 +00002466 sock_set_flag(sk, flag);
2467 /*
2468 * we just set one of the two flags which require net
2469 * time stamping, but time stamping might have been on
2470 * already because of the other one
2471 */
Eric Dumazet08e29af2011-11-28 12:04:18 +00002472 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002473 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 }
2475}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476
Richard Cochrancb820f82013-07-19 19:40:09 +02002477int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2478 int level, int type)
2479{
2480 struct sock_exterr_skb *serr;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002481 struct sk_buff *skb;
Richard Cochrancb820f82013-07-19 19:40:09 +02002482 int copied, err;
2483
2484 err = -EAGAIN;
Willem de Bruijn364a9e92014-08-31 21:30:27 -04002485 skb = sock_dequeue_err_skb(sk);
Richard Cochrancb820f82013-07-19 19:40:09 +02002486 if (skb == NULL)
2487 goto out;
2488
2489 copied = skb->len;
2490 if (copied > len) {
2491 msg->msg_flags |= MSG_TRUNC;
2492 copied = len;
2493 }
David S. Miller51f3d022014-11-05 16:46:40 -05002494 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Richard Cochrancb820f82013-07-19 19:40:09 +02002495 if (err)
2496 goto out_free_skb;
2497
2498 sock_recv_timestamp(msg, sk, skb);
2499
2500 serr = SKB_EXT_ERR(skb);
2501 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2502
2503 msg->msg_flags |= MSG_ERRQUEUE;
2504 err = copied;
2505
Richard Cochrancb820f82013-07-19 19:40:09 +02002506out_free_skb:
2507 kfree_skb(skb);
2508out:
2509 return err;
2510}
2511EXPORT_SYMBOL(sock_recv_errqueue);
2512
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513/*
2514 * Get a socket option on an socket.
2515 *
2516 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2517 * asynchronous errors should be reported by getsockopt. We assume
2518 * this means if you specify SO_ERROR (otherwise whats the point of it).
2519 */
2520int sock_common_getsockopt(struct socket *sock, int level, int optname,
2521 char __user *optval, int __user *optlen)
2522{
2523 struct sock *sk = sock->sk;
2524
2525 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2526}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527EXPORT_SYMBOL(sock_common_getsockopt);
2528
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002529#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002530int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2531 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002532{
2533 struct sock *sk = sock->sk;
2534
Johannes Berg1e51f952007-03-06 13:44:06 -08002535 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002536 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2537 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002538 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2539}
2540EXPORT_SYMBOL(compat_sock_common_getsockopt);
2541#endif
2542
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2544 struct msghdr *msg, size_t size, int flags)
2545{
2546 struct sock *sk = sock->sk;
2547 int addr_len = 0;
2548 int err;
2549
2550 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2551 flags & ~MSG_DONTWAIT, &addr_len);
2552 if (err >= 0)
2553 msg->msg_namelen = addr_len;
2554 return err;
2555}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556EXPORT_SYMBOL(sock_common_recvmsg);
2557
2558/*
2559 * Set socket options on an inet socket.
2560 */
2561int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002562 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563{
2564 struct sock *sk = sock->sk;
2565
2566 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2567}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568EXPORT_SYMBOL(sock_common_setsockopt);
2569
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002570#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002571int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002572 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002573{
2574 struct sock *sk = sock->sk;
2575
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002576 if (sk->sk_prot->compat_setsockopt != NULL)
2577 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2578 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002579 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2580}
2581EXPORT_SYMBOL(compat_sock_common_setsockopt);
2582#endif
2583
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584void sk_common_release(struct sock *sk)
2585{
2586 if (sk->sk_prot->destroy)
2587 sk->sk_prot->destroy(sk);
2588
2589 /*
2590 * Observation: when sock_common_release is called, processes have
2591 * no access to socket. But net still has.
2592 * Step one, detach it from networking:
2593 *
2594 * A. Remove from hash tables.
2595 */
2596
2597 sk->sk_prot->unhash(sk);
2598
2599 /*
2600 * In this point socket cannot receive new packets, but it is possible
2601 * that some packets are in flight because some CPU runs receiver and
2602 * did hash table lookup before we unhashed socket. They will achieve
2603 * receive queue and will be purged by socket destructor.
2604 *
2605 * Also we still have packets pending on receive queue and probably,
2606 * our own packets waiting in device queues. sock_destroy will drain
2607 * receive queue, but transmitted packets will delay socket destruction
2608 * until the last reference will be released.
2609 */
2610
2611 sock_orphan(sk);
2612
2613 xfrm_sk_free_policy(sk);
2614
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002615 sk_refcnt_debug_release(sk);
Eric Dumazet5640f762012-09-23 23:04:42 +00002616
2617 if (sk->sk_frag.page) {
2618 put_page(sk->sk_frag.page);
2619 sk->sk_frag.page = NULL;
2620 }
2621
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 sock_put(sk);
2623}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624EXPORT_SYMBOL(sk_common_release);
2625
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002626#ifdef CONFIG_PROC_FS
2627#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002628struct prot_inuse {
2629 int val[PROTO_INUSE_NR];
2630};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002631
2632static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002633
2634#ifdef CONFIG_NET_NS
2635void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2636{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002637 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002638}
2639EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2640
2641int sock_prot_inuse_get(struct net *net, struct proto *prot)
2642{
2643 int cpu, idx = prot->inuse_idx;
2644 int res = 0;
2645
2646 for_each_possible_cpu(cpu)
2647 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2648
2649 return res >= 0 ? res : 0;
2650}
2651EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2652
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002653static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002654{
2655 net->core.inuse = alloc_percpu(struct prot_inuse);
2656 return net->core.inuse ? 0 : -ENOMEM;
2657}
2658
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002659static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002660{
2661 free_percpu(net->core.inuse);
2662}
2663
2664static struct pernet_operations net_inuse_ops = {
2665 .init = sock_inuse_init_net,
2666 .exit = sock_inuse_exit_net,
2667};
2668
2669static __init int net_inuse_init(void)
2670{
2671 if (register_pernet_subsys(&net_inuse_ops))
2672 panic("Cannot initialize net inuse counters");
2673
2674 return 0;
2675}
2676
2677core_initcall(net_inuse_init);
2678#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002679static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2680
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002681void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002682{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002683 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002684}
2685EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2686
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002687int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002688{
2689 int cpu, idx = prot->inuse_idx;
2690 int res = 0;
2691
2692 for_each_possible_cpu(cpu)
2693 res += per_cpu(prot_inuse, cpu).val[idx];
2694
2695 return res >= 0 ? res : 0;
2696}
2697EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002698#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002699
2700static void assign_proto_idx(struct proto *prot)
2701{
2702 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2703
2704 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
Joe Perchese005d192012-05-16 19:58:40 +00002705 pr_err("PROTO_INUSE_NR exhausted\n");
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002706 return;
2707 }
2708
2709 set_bit(prot->inuse_idx, proto_inuse_idx);
2710}
2711
2712static void release_proto_idx(struct proto *prot)
2713{
2714 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2715 clear_bit(prot->inuse_idx, proto_inuse_idx);
2716}
2717#else
2718static inline void assign_proto_idx(struct proto *prot)
2719{
2720}
2721
2722static inline void release_proto_idx(struct proto *prot)
2723{
2724}
2725#endif
2726
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727int proto_register(struct proto *prot, int alloc_slab)
2728{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 if (alloc_slab) {
2730 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002731 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2732 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733
2734 if (prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002735 pr_crit("%s: Can't create sock SLAB cache!\n",
2736 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002737 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002739
2740 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002741 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002742 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002743 goto out_free_sock_slab;
2744
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002745 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002746 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002747 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002748
2749 if (prot->rsk_prot->slab == NULL) {
Joe Perchese005d192012-05-16 19:58:40 +00002750 pr_crit("%s: Can't create request sock SLAB cache!\n",
2751 prot->name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002752 goto out_free_request_sock_slab_name;
2753 }
2754 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002755
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002756 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002757 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002758
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002759 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002760 goto out_free_request_sock_slab;
2761
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002762 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002763 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002764 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002765 0,
2766 SLAB_HWCACHE_ALIGN |
2767 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002768 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002769 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002770 goto out_free_timewait_sock_slab_name;
2771 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 }
2773
Glauber Costa36b77a52011-12-16 00:51:59 +00002774 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002776 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002777 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002778 return 0;
2779
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002780out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002781 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002782out_free_request_sock_slab:
2783 if (prot->rsk_prot && prot->rsk_prot->slab) {
2784 kmem_cache_destroy(prot->rsk_prot->slab);
2785 prot->rsk_prot->slab = NULL;
2786 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002787out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002788 if (prot->rsk_prot)
2789 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002790out_free_sock_slab:
2791 kmem_cache_destroy(prot->slab);
2792 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002793out:
2794 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796EXPORT_SYMBOL(proto_register);
2797
2798void proto_unregister(struct proto *prot)
2799{
Glauber Costa36b77a52011-12-16 00:51:59 +00002800 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002801 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002802 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002803 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
2805 if (prot->slab != NULL) {
2806 kmem_cache_destroy(prot->slab);
2807 prot->slab = NULL;
2808 }
2809
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002810 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002811 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002812 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002813 prot->rsk_prot->slab = NULL;
2814 }
2815
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002816 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002817 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002818 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002819 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002820 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822EXPORT_SYMBOL(proto_unregister);
2823
2824#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002826 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827{
Glauber Costa36b77a52011-12-16 00:51:59 +00002828 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002829 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830}
2831
2832static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2833{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002834 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835}
2836
2837static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002838 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839{
Glauber Costa36b77a52011-12-16 00:51:59 +00002840 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841}
2842
2843static char proto_method_implemented(const void *method)
2844{
2845 return method == NULL ? 'n' : 'y';
2846}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002847static long sock_prot_memory_allocated(struct proto *proto)
2848{
Jeffrin Josecb75a362012-04-25 19:17:29 +05302849 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
Glauber Costa180d8cd2011-12-11 21:47:02 +00002850}
2851
2852static char *sock_prot_memory_pressure(struct proto *proto)
2853{
2854 return proto->memory_pressure != NULL ?
2855 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2856}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857
2858static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2859{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002860
Eric Dumazet8d987e52010-11-09 23:24:26 +00002861 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2863 proto->name,
2864 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002865 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00002866 sock_prot_memory_allocated(proto),
2867 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 proto->max_header,
2869 proto->slab == NULL ? "no" : "yes",
2870 module_name(proto->owner),
2871 proto_method_implemented(proto->close),
2872 proto_method_implemented(proto->connect),
2873 proto_method_implemented(proto->disconnect),
2874 proto_method_implemented(proto->accept),
2875 proto_method_implemented(proto->ioctl),
2876 proto_method_implemented(proto->init),
2877 proto_method_implemented(proto->destroy),
2878 proto_method_implemented(proto->shutdown),
2879 proto_method_implemented(proto->setsockopt),
2880 proto_method_implemented(proto->getsockopt),
2881 proto_method_implemented(proto->sendmsg),
2882 proto_method_implemented(proto->recvmsg),
2883 proto_method_implemented(proto->sendpage),
2884 proto_method_implemented(proto->bind),
2885 proto_method_implemented(proto->backlog_rcv),
2886 proto_method_implemented(proto->hash),
2887 proto_method_implemented(proto->unhash),
2888 proto_method_implemented(proto->get_port),
2889 proto_method_implemented(proto->enter_memory_pressure));
2890}
2891
2892static int proto_seq_show(struct seq_file *seq, void *v)
2893{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002894 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2896 "protocol",
2897 "size",
2898 "sockets",
2899 "memory",
2900 "press",
2901 "maxhdr",
2902 "slab",
2903 "module",
2904 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2905 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002906 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 return 0;
2908}
2909
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002910static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 .start = proto_seq_start,
2912 .next = proto_seq_next,
2913 .stop = proto_seq_stop,
2914 .show = proto_seq_show,
2915};
2916
2917static int proto_seq_open(struct inode *inode, struct file *file)
2918{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002919 return seq_open_net(inode, file, &proto_seq_ops,
2920 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921}
2922
Arjan van de Ven9a321442007-02-12 00:55:35 -08002923static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 .owner = THIS_MODULE,
2925 .open = proto_seq_open,
2926 .read = seq_read,
2927 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002928 .release = seq_release_net,
2929};
2930
2931static __net_init int proto_init_net(struct net *net)
2932{
Gao fengd4beaa62013-02-18 01:34:54 +00002933 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
Eric Dumazet14e943d2008-11-19 15:14:01 -08002934 return -ENOMEM;
2935
2936 return 0;
2937}
2938
2939static __net_exit void proto_exit_net(struct net *net)
2940{
Gao fengece31ff2013-02-18 01:34:56 +00002941 remove_proc_entry("protocols", net->proc_net);
Eric Dumazet14e943d2008-11-19 15:14:01 -08002942}
2943
2944
2945static __net_initdata struct pernet_operations proto_net_ops = {
2946 .init = proto_init_net,
2947 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948};
2949
2950static int __init proto_init(void)
2951{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002952 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953}
2954
2955subsys_initcall(proto_init);
2956
2957#endif /* PROC_FS */