blob: 19942d4bb6e6e63a6b52ebc51c66698adcc5876a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Randy Dunlap4fc268d2006-01-11 12:17:47 -080092#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <linux/errno.h>
94#include <linux/types.h>
95#include <linux/socket.h>
96#include <linux/in.h>
97#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/module.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/sched.h>
102#include <linux/timer.h>
103#include <linux/string.h>
104#include <linux/sockios.h>
105#include <linux/net.h>
106#include <linux/mm.h>
107#include <linux/slab.h>
108#include <linux/interrupt.h>
109#include <linux/poll.h>
110#include <linux/tcp.h>
111#include <linux/init.h>
Al Viroa1f8e7f2006-10-19 16:08:53 -0400112#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000113#include <linux/user_namespace.h>
Glauber Costae1aab162011-12-11 21:47:03 +0000114#include <linux/jump_label.h>
David S. Miller3969eb32012-01-09 13:44:23 -0800115#include <linux/memcontrol.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
117#include <asm/uaccess.h>
118#include <asm/system.h>
119
120#include <linux/netdevice.h>
121#include <net/protocol.h>
122#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200123#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700124#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000126#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#include <net/xfrm.h>
128#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700129#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000130#include <net/netprio_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
132#include <linux/filter.h>
133
Satoru Moriya3847ce32011-06-17 12:00:03 +0000134#include <trace/events/sock.h>
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136#ifdef CONFIG_INET
137#include <net/tcp.h>
138#endif
139
Glauber Costa36b77a52011-12-16 00:51:59 +0000140static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000141static LIST_HEAD(proto_list);
142
143#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
144int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
145{
146 struct proto *proto;
147 int ret = 0;
148
Glauber Costa36b77a52011-12-16 00:51:59 +0000149 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000150 list_for_each_entry(proto, &proto_list, node) {
151 if (proto->init_cgroup) {
152 ret = proto->init_cgroup(cgrp, ss);
153 if (ret)
154 goto out;
155 }
156 }
157
Glauber Costa36b77a52011-12-16 00:51:59 +0000158 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000159 return ret;
160out:
161 list_for_each_entry_continue_reverse(proto, &proto_list, node)
162 if (proto->destroy_cgroup)
163 proto->destroy_cgroup(cgrp, ss);
Glauber Costa36b77a52011-12-16 00:51:59 +0000164 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000165 return ret;
166}
167
168void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
169{
170 struct proto *proto;
171
Glauber Costa36b77a52011-12-16 00:51:59 +0000172 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000173 list_for_each_entry_reverse(proto, &proto_list, node)
174 if (proto->destroy_cgroup)
175 proto->destroy_cgroup(cgrp, ss);
Glauber Costa36b77a52011-12-16 00:51:59 +0000176 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000177}
178#endif
179
Ingo Molnarda21f242006-07-03 00:25:12 -0700180/*
181 * Each address family might have different locking rules, so we have
182 * one slock key per address family:
183 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700184static struct lock_class_key af_family_keys[AF_MAX];
185static struct lock_class_key af_family_slock_keys[AF_MAX];
186
Glauber Costae1aab162011-12-11 21:47:03 +0000187struct jump_label_key memcg_socket_limit_enabled;
188EXPORT_SYMBOL(memcg_socket_limit_enabled);
189
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700190/*
191 * Make lock validator output more readable. (we pre-construct these
192 * strings build-time, so that runtime initialization of socket
193 * locks is fast):
194 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700195static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700196 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
197 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
198 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
199 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
200 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
201 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
202 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800203 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700204 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800205 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700206 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700207 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800208 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300209 "sk_lock-AF_NFC" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700210};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700211static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700212 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
213 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
214 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
215 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
216 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
217 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
218 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800219 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700220 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800221 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700222 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700223 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800224 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300225 "slock-AF_NFC" , "slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700226};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700227static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700228 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
229 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
230 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
231 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
232 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
233 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
234 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800235 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700236 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700237 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700238 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700239 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800240 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300241 "clock-AF_NFC" , "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700242};
Ingo Molnarda21f242006-07-03 00:25:12 -0700243
244/*
245 * sk_callback_lock locking rules are per-address-family,
246 * so split the lock classes by using a per-AF key:
247 */
248static struct lock_class_key af_callback_keys[AF_MAX];
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250/* Take into consideration the size of the struct sk_buff overhead in the
251 * determination of these values, since that is non-constant across
252 * platforms. This makes socket queueing behavior and performance
253 * not depend upon such differences.
254 */
255#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000256#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
258#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
259
260/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700261__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
262__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
263__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
264__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300266/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700267int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000268EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Neil Horman5bc14212011-11-22 05:10:51 +0000270#if defined(CONFIG_CGROUPS)
271#if !defined(CONFIG_NET_CLS_CGROUP)
Herbert Xuf8451722010-05-24 00:12:34 -0700272int net_cls_subsys_id = -1;
273EXPORT_SYMBOL_GPL(net_cls_subsys_id);
274#endif
Neil Horman5bc14212011-11-22 05:10:51 +0000275#if !defined(CONFIG_NETPRIO_CGROUP)
276int net_prio_subsys_id = -1;
277EXPORT_SYMBOL_GPL(net_prio_subsys_id);
278#endif
279#endif
Herbert Xuf8451722010-05-24 00:12:34 -0700280
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
282{
283 struct timeval tv;
284
285 if (optlen < sizeof(tv))
286 return -EINVAL;
287 if (copy_from_user(&tv, optval, sizeof(tv)))
288 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700289 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
290 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Vasily Averinba780732007-05-24 16:58:54 -0700292 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700293 static int warned __read_mostly;
294
Vasily Averinba780732007-05-24 16:58:54 -0700295 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700296 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700297 warned++;
298 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
299 "tries to set negative timeout\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700300 current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700301 }
Vasily Averinba780732007-05-24 16:58:54 -0700302 return 0;
303 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 *timeo_p = MAX_SCHEDULE_TIMEOUT;
305 if (tv.tv_sec == 0 && tv.tv_usec == 0)
306 return 0;
307 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
308 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
309 return 0;
310}
311
312static void sock_warn_obsolete_bsdism(const char *name)
313{
314 static int warned;
315 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900316 if (strcmp(warncomm, current->comm) && warned < 5) {
317 strcpy(warncomm, current->comm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 printk(KERN_WARNING "process `%s' is using obsolete "
319 "%s SO_BSDCOMPAT\n", warncomm, name);
320 warned++;
321 }
322}
323
Eric Dumazet08e29af2011-11-28 12:04:18 +0000324#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
325
326static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900327{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000328 if (sk->sk_flags & flags) {
329 sk->sk_flags &= ~flags;
330 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000331 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 }
333}
334
335
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800336int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
337{
Eric Dumazet766e90372009-10-14 20:40:11 -0700338 int err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800339 int skb_len;
Neil Horman3b885782009-10-12 13:26:31 -0700340 unsigned long flags;
341 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800342
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000343 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700344 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000345 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700346 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800347 }
348
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700349 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800350 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700351 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800352
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800353 if (!sk_rmem_schedule(sk, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700354 atomic_inc(&sk->sk_drops);
355 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800356 }
357
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800358 skb->dev = NULL;
359 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800360
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800361 /* Cache the SKB length before we tack it onto the receive
362 * queue. Once it is added it no longer belongs to us and
363 * may be freed by other threads of control pulling packets
364 * from the queue.
365 */
366 skb_len = skb->len;
367
Eric Dumazet7fee2262010-05-11 23:19:48 +0000368 /* we escape from rcu protected region, make sure we dont leak
369 * a norefcounted dst
370 */
371 skb_dst_force(skb);
372
Neil Horman3b885782009-10-12 13:26:31 -0700373 spin_lock_irqsave(&list->lock, flags);
374 skb->dropcount = atomic_read(&sk->sk_drops);
375 __skb_queue_tail(list, skb);
376 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800377
378 if (!sock_flag(sk, SOCK_DEAD))
379 sk->sk_data_ready(sk, skb_len);
Eric Dumazet766e90372009-10-14 20:40:11 -0700380 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800381}
382EXPORT_SYMBOL(sock_queue_rcv_skb);
383
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200384int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800385{
386 int rc = NET_RX_SUCCESS;
387
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700388 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800389 goto discard_and_relse;
390
391 skb->dev = NULL;
392
Eric Dumazetc3774112010-04-27 15:13:20 -0700393 if (sk_rcvqueues_full(sk, skb)) {
394 atomic_inc(&sk->sk_drops);
395 goto discard_and_relse;
396 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200397 if (nested)
398 bh_lock_sock_nested(sk);
399 else
400 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700401 if (!sock_owned_by_user(sk)) {
402 /*
403 * trylock + unlock semantics:
404 */
405 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
406
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700407 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700408
409 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Zhu Yia3a858f2010-03-04 18:01:47 +0000410 } else if (sk_add_backlog(sk, skb)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000411 bh_unlock_sock(sk);
412 atomic_inc(&sk->sk_drops);
413 goto discard_and_relse;
414 }
415
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800416 bh_unlock_sock(sk);
417out:
418 sock_put(sk);
419 return rc;
420discard_and_relse:
421 kfree_skb(skb);
422 goto out;
423}
424EXPORT_SYMBOL(sk_receive_skb);
425
Krishna Kumarea94ff32009-10-19 23:46:45 +0000426void sk_reset_txq(struct sock *sk)
427{
428 sk_tx_queue_clear(sk);
429}
430EXPORT_SYMBOL(sk_reset_txq);
431
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800432struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
433{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000434 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800435
436 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000437 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000438 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800439 dst_release(dst);
440 return NULL;
441 }
442
443 return dst;
444}
445EXPORT_SYMBOL(__sk_dst_check);
446
447struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
448{
449 struct dst_entry *dst = sk_dst_get(sk);
450
451 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
452 sk_dst_reset(sk);
453 dst_release(dst);
454 return NULL;
455 }
456
457 return dst;
458}
459EXPORT_SYMBOL(sk_dst_check);
460
David S. Miller48788092007-09-14 16:41:03 -0700461static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
462{
463 int ret = -ENOPROTOOPT;
464#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900465 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700466 char devname[IFNAMSIZ];
467 int index;
468
469 /* Sorry... */
470 ret = -EPERM;
471 if (!capable(CAP_NET_RAW))
472 goto out;
473
474 ret = -EINVAL;
475 if (optlen < 0)
476 goto out;
477
478 /* Bind this socket to a particular device like "eth0",
479 * as specified in the passed interface name. If the
480 * name is "" or the option length is zero the socket
481 * is not bound.
482 */
483 if (optlen > IFNAMSIZ - 1)
484 optlen = IFNAMSIZ - 1;
485 memset(devname, 0, sizeof(devname));
486
487 ret = -EFAULT;
488 if (copy_from_user(devname, optval, optlen))
489 goto out;
490
David S. Miller000ba2e2009-11-05 22:37:11 -0800491 index = 0;
492 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800493 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700494
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800495 rcu_read_lock();
496 dev = dev_get_by_name_rcu(net, devname);
497 if (dev)
498 index = dev->ifindex;
499 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700500 ret = -ENODEV;
501 if (!dev)
502 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700503 }
504
505 lock_sock(sk);
506 sk->sk_bound_dev_if = index;
507 sk_dst_reset(sk);
508 release_sock(sk);
509
510 ret = 0;
511
512out:
513#endif
514
515 return ret;
516}
517
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800518static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
519{
520 if (valbool)
521 sock_set_flag(sk, bit);
522 else
523 sock_reset_flag(sk, bit);
524}
525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526/*
527 * This is meant for all protocols to use and covers goings on
528 * at the socket level. Everything here is generic.
529 */
530
531int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700532 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533{
Eric Dumazet2a915252009-05-27 11:30:05 +0000534 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 int val;
536 int valbool;
537 struct linger ling;
538 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 /*
541 * Options without arguments
542 */
543
David S. Miller48788092007-09-14 16:41:03 -0700544 if (optname == SO_BINDTODEVICE)
545 return sock_bindtodevice(sk, optval, optlen);
546
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700547 if (optlen < sizeof(int))
548 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (get_user(val, (int __user *)optval))
551 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900552
Eric Dumazet2a915252009-05-27 11:30:05 +0000553 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555 lock_sock(sk);
556
Eric Dumazet2a915252009-05-27 11:30:05 +0000557 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700558 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000559 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700560 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000561 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800562 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700563 break;
564 case SO_REUSEADDR:
565 sk->sk_reuse = valbool;
566 break;
567 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000568 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000569 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700570 case SO_ERROR:
571 ret = -ENOPROTOOPT;
572 break;
573 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800574 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700575 break;
576 case SO_BROADCAST:
577 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
578 break;
579 case SO_SNDBUF:
580 /* Don't error on this BSD doesn't and if you think
581 about it this is right. Otherwise apps have to
582 play 'guess the biggest size' games. RCVBUF/SNDBUF
583 are treated in BSD as hints */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900584
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700585 if (val > sysctl_wmem_max)
586 val = sysctl_wmem_max;
Patrick McHardyb0573de2005-08-09 19:30:51 -0700587set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700588 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
589 if ((val * 2) < SOCK_MIN_SNDBUF)
590 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
591 else
592 sk->sk_sndbuf = val * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700594 /*
595 * Wake up sending tasks if we
596 * upped the value.
597 */
598 sk->sk_write_space(sk);
599 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700601 case SO_SNDBUFFORCE:
602 if (!capable(CAP_NET_ADMIN)) {
603 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 break;
605 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700606 goto set_sndbuf;
607
608 case SO_RCVBUF:
609 /* Don't error on this BSD doesn't and if you think
610 about it this is right. Otherwise apps have to
611 play 'guess the biggest size' games. RCVBUF/SNDBUF
612 are treated in BSD as hints */
613
614 if (val > sysctl_rmem_max)
615 val = sysctl_rmem_max;
616set_rcvbuf:
617 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
618 /*
619 * We double it on the way in to account for
620 * "struct sk_buff" etc. overhead. Applications
621 * assume that the SO_RCVBUF setting they make will
622 * allow that much actual data to be received on that
623 * socket.
624 *
625 * Applications are unaware that "struct sk_buff" and
626 * other overheads allocate from the receive buffer
627 * during socket buffer allocation.
628 *
629 * And after considering the possible alternatives,
630 * returning the value we actually used in getsockopt
631 * is the most desirable behavior.
632 */
633 if ((val * 2) < SOCK_MIN_RCVBUF)
634 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
635 else
636 sk->sk_rcvbuf = val * 2;
637 break;
638
639 case SO_RCVBUFFORCE:
640 if (!capable(CAP_NET_ADMIN)) {
641 ret = -EPERM;
642 break;
643 }
644 goto set_rcvbuf;
645
646 case SO_KEEPALIVE:
647#ifdef CONFIG_INET
648 if (sk->sk_protocol == IPPROTO_TCP)
649 tcp_set_keepalive(sk, valbool);
650#endif
651 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
652 break;
653
654 case SO_OOBINLINE:
655 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
656 break;
657
658 case SO_NO_CHECK:
659 sk->sk_no_check = valbool;
660 break;
661
662 case SO_PRIORITY:
663 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
664 sk->sk_priority = val;
665 else
666 ret = -EPERM;
667 break;
668
669 case SO_LINGER:
670 if (optlen < sizeof(ling)) {
671 ret = -EINVAL; /* 1003.1g */
672 break;
673 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000674 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700675 ret = -EFAULT;
676 break;
677 }
678 if (!ling.l_onoff)
679 sock_reset_flag(sk, SOCK_LINGER);
680 else {
681#if (BITS_PER_LONG == 32)
682 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
683 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
684 else
685#endif
686 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
687 sock_set_flag(sk, SOCK_LINGER);
688 }
689 break;
690
691 case SO_BSDCOMPAT:
692 sock_warn_obsolete_bsdism("setsockopt");
693 break;
694
695 case SO_PASSCRED:
696 if (valbool)
697 set_bit(SOCK_PASSCRED, &sock->flags);
698 else
699 clear_bit(SOCK_PASSCRED, &sock->flags);
700 break;
701
702 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700703 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700704 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700705 if (optname == SO_TIMESTAMP)
706 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
707 else
708 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700709 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000710 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700711 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700712 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700713 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
714 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700715 break;
716
Patrick Ohly20d49472009-02-12 05:03:38 +0000717 case SO_TIMESTAMPING:
718 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000719 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000720 break;
721 }
722 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
723 val & SOF_TIMESTAMPING_TX_HARDWARE);
724 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
725 val & SOF_TIMESTAMPING_TX_SOFTWARE);
726 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
727 val & SOF_TIMESTAMPING_RX_HARDWARE);
728 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
729 sock_enable_timestamp(sk,
730 SOCK_TIMESTAMPING_RX_SOFTWARE);
731 else
732 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000733 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000734 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
735 val & SOF_TIMESTAMPING_SOFTWARE);
736 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
737 val & SOF_TIMESTAMPING_SYS_HARDWARE);
738 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
739 val & SOF_TIMESTAMPING_RAW_HARDWARE);
740 break;
741
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700742 case SO_RCVLOWAT:
743 if (val < 0)
744 val = INT_MAX;
745 sk->sk_rcvlowat = val ? : 1;
746 break;
747
748 case SO_RCVTIMEO:
749 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
750 break;
751
752 case SO_SNDTIMEO:
753 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
754 break;
755
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700756 case SO_ATTACH_FILTER:
757 ret = -EINVAL;
758 if (optlen == sizeof(struct sock_fprog)) {
759 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700761 ret = -EFAULT;
762 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700765 ret = sk_attach_filter(&fprog, sk);
766 }
767 break;
768
769 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700770 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700771 break;
772
773 case SO_PASSSEC:
774 if (valbool)
775 set_bit(SOCK_PASSSEC, &sock->flags);
776 else
777 clear_bit(SOCK_PASSSEC, &sock->flags);
778 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800779 case SO_MARK:
780 if (!capable(CAP_NET_ADMIN))
781 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000782 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800783 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800784 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700785
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 /* We implement the SO_SNDLOWAT etc to
787 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700788 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000789 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700790 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100791
792 case SO_WIFI_STATUS:
793 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
794 break;
795
Pavel Emelyanovef64a542012-02-21 07:31:34 +0000796 case SO_PEEK_OFF:
797 if (sock->ops->set_peek_off)
798 sock->ops->set_peek_off(sk, val);
799 else
800 ret = -EOPNOTSUPP;
801 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700802 default:
803 ret = -ENOPROTOOPT;
804 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900805 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 release_sock(sk);
807 return ret;
808}
Eric Dumazet2a915252009-05-27 11:30:05 +0000809EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810
811
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000812void cred_to_ucred(struct pid *pid, const struct cred *cred,
813 struct ucred *ucred)
814{
815 ucred->pid = pid_vnr(pid);
816 ucred->uid = ucred->gid = -1;
817 if (cred) {
818 struct user_namespace *current_ns = current_user_ns();
819
820 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
821 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
822 }
823}
David S. Miller39247732010-06-16 16:18:25 -0700824EXPORT_SYMBOL_GPL(cred_to_ucred);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826int sock_getsockopt(struct socket *sock, int level, int optname,
827 char __user *optval, int __user *optlen)
828{
829 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900830
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700831 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900832 int val;
833 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 struct timeval tm;
835 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900836
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -0800837 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900839
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700840 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900841 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700842 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900844
Eugene Teo50fee1d2009-02-23 15:38:41 -0800845 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -0800846
Eric Dumazet2a915252009-05-27 11:30:05 +0000847 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700848 case SO_DEBUG:
849 v.val = sock_flag(sk, SOCK_DBG);
850 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900851
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700852 case SO_DONTROUTE:
853 v.val = sock_flag(sk, SOCK_LOCALROUTE);
854 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900855
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700856 case SO_BROADCAST:
857 v.val = !!sock_flag(sk, SOCK_BROADCAST);
858 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700860 case SO_SNDBUF:
861 v.val = sk->sk_sndbuf;
862 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900863
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700864 case SO_RCVBUF:
865 v.val = sk->sk_rcvbuf;
866 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700868 case SO_REUSEADDR:
869 v.val = sk->sk_reuse;
870 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700872 case SO_KEEPALIVE:
873 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
874 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700876 case SO_TYPE:
877 v.val = sk->sk_type;
878 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000880 case SO_PROTOCOL:
881 v.val = sk->sk_protocol;
882 break;
883
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000884 case SO_DOMAIN:
885 v.val = sk->sk_family;
886 break;
887
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700888 case SO_ERROR:
889 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +0000890 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700891 v.val = xchg(&sk->sk_err_soft, 0);
892 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700894 case SO_OOBINLINE:
895 v.val = !!sock_flag(sk, SOCK_URGINLINE);
896 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900897
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700898 case SO_NO_CHECK:
899 v.val = sk->sk_no_check;
900 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700902 case SO_PRIORITY:
903 v.val = sk->sk_priority;
904 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900905
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700906 case SO_LINGER:
907 lv = sizeof(v.ling);
908 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
909 v.ling.l_linger = sk->sk_lingertime / HZ;
910 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900911
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700912 case SO_BSDCOMPAT:
913 sock_warn_obsolete_bsdism("getsockopt");
914 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700916 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700917 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
918 !sock_flag(sk, SOCK_RCVTSTAMPNS);
919 break;
920
921 case SO_TIMESTAMPNS:
922 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700923 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
Patrick Ohly20d49472009-02-12 05:03:38 +0000925 case SO_TIMESTAMPING:
926 v.val = 0;
927 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
928 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
929 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
930 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
931 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
932 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
933 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
934 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
935 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
936 v.val |= SOF_TIMESTAMPING_SOFTWARE;
937 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
938 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
939 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
940 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
941 break;
942
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700943 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000944 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700945 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
946 v.tm.tv_sec = 0;
947 v.tm.tv_usec = 0;
948 } else {
949 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
950 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700952 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700954 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000955 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700956 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
957 v.tm.tv_sec = 0;
958 v.tm.tv_usec = 0;
959 } else {
960 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
961 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
962 }
963 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700965 case SO_RCVLOWAT:
966 v.val = sk->sk_rcvlowat;
967 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700968
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700969 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +0000970 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700971 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700973 case SO_PASSCRED:
974 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
975 break;
976
977 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000978 {
979 struct ucred peercred;
980 if (len > sizeof(peercred))
981 len = sizeof(peercred);
982 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
983 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700984 return -EFAULT;
985 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000986 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700987
988 case SO_PEERNAME:
989 {
990 char address[128];
991
992 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
993 return -ENOTCONN;
994 if (lv < len)
995 return -EINVAL;
996 if (copy_to_user(optval, address, len))
997 return -EFAULT;
998 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001000
1001 /* Dubious BSD thing... Probably nobody even uses it, but
1002 * the UNIX standard wants it for whatever reason... -DaveM
1003 */
1004 case SO_ACCEPTCONN:
1005 v.val = sk->sk_state == TCP_LISTEN;
1006 break;
1007
1008 case SO_PASSSEC:
1009 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
1010 break;
1011
1012 case SO_PEERSEC:
1013 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1014
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001015 case SO_MARK:
1016 v.val = sk->sk_mark;
1017 break;
1018
Neil Horman3b885782009-10-12 13:26:31 -07001019 case SO_RXQ_OVFL:
1020 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
1021 break;
1022
Johannes Berg6e3e9392011-11-09 10:15:42 +01001023 case SO_WIFI_STATUS:
1024 v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
1025 break;
1026
Pavel Emelyanovef64a542012-02-21 07:31:34 +00001027 case SO_PEEK_OFF:
1028 if (!sock->ops->set_peek_off)
1029 return -EOPNOTSUPP;
1030
1031 v.val = sk->sk_peek_off;
1032 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001033 default:
1034 return -ENOPROTOOPT;
1035 }
1036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 if (len > lv)
1038 len = lv;
1039 if (copy_to_user(optval, &v, len))
1040 return -EFAULT;
1041lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001042 if (put_user(len, optlen))
1043 return -EFAULT;
1044 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045}
1046
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001047/*
1048 * Initialize an sk_lock.
1049 *
1050 * (We also register the sk_lock with the lock validator.)
1051 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001052static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001053{
Peter Zijlstraed075362006-12-06 20:35:24 -08001054 sock_lock_init_class_and_name(sk,
1055 af_family_slock_key_strings[sk->sk_family],
1056 af_family_slock_keys + sk->sk_family,
1057 af_family_key_strings[sk->sk_family],
1058 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001059}
1060
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001061/*
1062 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1063 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001064 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001065 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001066static void sock_copy(struct sock *nsk, const struct sock *osk)
1067{
1068#ifdef CONFIG_SECURITY_NETWORK
1069 void *sptr = nsk->sk_security;
1070#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001071 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1072
1073 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1074 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1075
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001076#ifdef CONFIG_SECURITY_NETWORK
1077 nsk->sk_security = sptr;
1078 security_sk_clone(osk, nsk);
1079#endif
1080}
1081
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001082/*
1083 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1084 * un-modified. Special care is taken when initializing object to zero.
1085 */
1086static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1087{
1088 if (offsetof(struct sock, sk_node.next) != 0)
1089 memset(sk, 0, offsetof(struct sock, sk_node.next));
1090 memset(&sk->sk_node.pprev, 0,
1091 size - offsetof(struct sock, sk_node.pprev));
1092}
1093
1094void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1095{
1096 unsigned long nulls1, nulls2;
1097
1098 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1099 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1100 if (nulls1 > nulls2)
1101 swap(nulls1, nulls2);
1102
1103 if (nulls1 != 0)
1104 memset((char *)sk, 0, nulls1);
1105 memset((char *)sk + nulls1 + sizeof(void *), 0,
1106 nulls2 - nulls1 - sizeof(void *));
1107 memset((char *)sk + nulls2 + sizeof(void *), 0,
1108 size - nulls2 - sizeof(void *));
1109}
1110EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1111
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001112static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1113 int family)
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001114{
1115 struct sock *sk;
1116 struct kmem_cache *slab;
1117
1118 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001119 if (slab != NULL) {
1120 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1121 if (!sk)
1122 return sk;
1123 if (priority & __GFP_ZERO) {
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001124 if (prot->clear_sk)
1125 prot->clear_sk(sk, prot->obj_size);
1126 else
1127 sk_prot_clear_nulls(sk, prot->obj_size);
Eric Dumazete912b112009-07-08 19:36:05 +00001128 }
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001129 } else
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001130 sk = kmalloc(prot->obj_size, priority);
1131
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001132 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001133 kmemcheck_annotate_bitfield(sk, flags);
1134
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001135 if (security_sk_alloc(sk, family, priority))
1136 goto out_free;
1137
1138 if (!try_module_get(prot->owner))
1139 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001140 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001141 }
1142
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001143 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001144
1145out_free_sec:
1146 security_sk_free(sk);
1147out_free:
1148 if (slab != NULL)
1149 kmem_cache_free(slab, sk);
1150 else
1151 kfree(sk);
1152 return NULL;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001153}
1154
1155static void sk_prot_free(struct proto *prot, struct sock *sk)
1156{
1157 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001158 struct module *owner;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001159
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001160 owner = prot->owner;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001161 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001162
1163 security_sk_free(sk);
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001164 if (slab != NULL)
1165 kmem_cache_free(slab, sk);
1166 else
1167 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001168 module_put(owner);
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001169}
1170
Herbert Xuf8451722010-05-24 00:12:34 -07001171#ifdef CONFIG_CGROUPS
1172void sock_update_classid(struct sock *sk)
1173{
Paul E. McKenney11441822010-10-06 17:15:35 -07001174 u32 classid;
Herbert Xuf8451722010-05-24 00:12:34 -07001175
Paul E. McKenney11441822010-10-06 17:15:35 -07001176 rcu_read_lock(); /* doing current task, which cannot vanish. */
1177 classid = task_cls_classid(current);
1178 rcu_read_unlock();
Herbert Xuf8451722010-05-24 00:12:34 -07001179 if (classid && classid != sk->sk_classid)
1180 sk->sk_classid = classid;
1181}
Herbert Xu82862742010-05-24 00:14:10 -07001182EXPORT_SYMBOL(sock_update_classid);
Neil Horman5bc14212011-11-22 05:10:51 +00001183
1184void sock_update_netprioidx(struct sock *sk)
1185{
Neil Horman5bc14212011-11-22 05:10:51 +00001186 if (in_interrupt())
1187 return;
Neil Horman2b73bc62012-02-10 05:43:38 +00001188
1189 sk->sk_cgrp_prioidx = task_netprioidx(current);
Neil Horman5bc14212011-11-22 05:10:51 +00001190}
1191EXPORT_SYMBOL_GPL(sock_update_netprioidx);
Herbert Xuf8451722010-05-24 00:12:34 -07001192#endif
1193
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194/**
1195 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001196 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001197 * @family: protocol family
1198 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1199 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001201struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001202 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203{
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001204 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001206 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001208 sk->sk_family = family;
1209 /*
1210 * See comment in struct sock definition to understand
1211 * why we need sk_prot_creator -acme
1212 */
1213 sk->sk_prot = sk->sk_prot_creator = prot;
1214 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001215 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001216 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001217
1218 sock_update_classid(sk);
Neil Horman5bc14212011-11-22 05:10:51 +00001219 sock_update_netprioidx(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 }
Frank Filza79af592005-09-27 15:23:38 -07001221
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001222 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223}
Eric Dumazet2a915252009-05-27 11:30:05 +00001224EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
Eric Dumazet2b85a342009-06-11 02:55:43 -07001226static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227{
1228 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229
1230 if (sk->sk_destruct)
1231 sk->sk_destruct(sk);
1232
Paul E. McKenneya898def2010-02-22 17:04:49 -08001233 filter = rcu_dereference_check(sk->sk_filter,
1234 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001236 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001237 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 }
1239
Eric Dumazet08e29af2011-11-28 12:04:18 +00001240 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
1242 if (atomic_read(&sk->sk_omem_alloc))
1243 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
Harvey Harrison0dc47872008-03-05 20:47:47 -08001244 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001246 if (sk->sk_peer_cred)
1247 put_cred(sk->sk_peer_cred);
1248 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001249 put_net(sock_net(sk));
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001250 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001252
1253void sk_free(struct sock *sk)
1254{
1255 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001256 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001257 * some packets are still in some tx queue.
1258 * If not null, sock_wfree() will call __sk_free(sk) later
1259 */
1260 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1261 __sk_free(sk);
1262}
Eric Dumazet2a915252009-05-27 11:30:05 +00001263EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Denis V. Lunevedf02082008-02-29 11:18:32 -08001265/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001266 * Last sock_put should drop reference to sk->sk_net. It has already
1267 * been dropped in sk_change_net. Taking reference to stopping namespace
Denis V. Lunevedf02082008-02-29 11:18:32 -08001268 * is not an option.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001269 * Take reference to a socket to remove it from hash _alive_ and after that
Denis V. Lunevedf02082008-02-29 11:18:32 -08001270 * destroy it in the context of init_net.
1271 */
1272void sk_release_kernel(struct sock *sk)
1273{
1274 if (sk == NULL || sk->sk_socket == NULL)
1275 return;
1276
1277 sock_hold(sk);
1278 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001279 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001280 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001281 sock_put(sk);
1282}
David S. Miller45af1752008-02-29 11:33:19 -08001283EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001284
Stephen Rothwell475f1b52012-01-09 16:33:16 +11001285static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1286{
1287 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1288 sock_update_memcg(newsk);
1289}
1290
Eric Dumazete56c57d2011-11-08 17:07:07 -05001291/**
1292 * sk_clone_lock - clone a socket, and lock its clone
1293 * @sk: the socket to clone
1294 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1295 *
1296 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1297 */
1298struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001299{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001300 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001301
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001302 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001303 if (newsk != NULL) {
1304 struct sk_filter *filter;
1305
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001306 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001307
1308 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001309 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001310 sk_node_init(&newsk->sk_node);
1311 sock_lock_init(newsk);
1312 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001313 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001314 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001315
1316 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001317 /*
1318 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1319 */
1320 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001321 atomic_set(&newsk->sk_omem_alloc, 0);
1322 skb_queue_head_init(&newsk->sk_receive_queue);
1323 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001324#ifdef CONFIG_NET_DMA
1325 skb_queue_head_init(&newsk->sk_async_wait_queue);
1326#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001327
Eric Dumazetb6c67122010-04-08 23:03:29 +00001328 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001329 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001330 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1331 af_callback_keys + newsk->sk_family,
1332 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001333
1334 newsk->sk_dst_cache = NULL;
1335 newsk->sk_wmem_queued = 0;
1336 newsk->sk_forward_alloc = 0;
1337 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001338 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1339
1340 sock_reset_flag(newsk, SOCK_DONE);
1341 skb_queue_head_init(&newsk->sk_error_queue);
1342
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001343 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001344 if (filter != NULL)
1345 sk_filter_charge(newsk, filter);
1346
1347 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1348 /* It is still raw copy of parent, so invalidate
1349 * destructor and make plain sk_free() */
1350 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001351 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001352 sk_free(newsk);
1353 newsk = NULL;
1354 goto out;
1355 }
1356
1357 newsk->sk_err = 0;
1358 newsk->sk_priority = 0;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001359 /*
1360 * Before updating sk_refcnt, we must commit prior changes to memory
1361 * (Documentation/RCU/rculist_nulls.txt for details)
1362 */
1363 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001364 atomic_set(&newsk->sk_refcnt, 2);
1365
1366 /*
1367 * Increment the counter in the same struct proto as the master
1368 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1369 * is the same as sk->sk_prot->socks, as this field was copied
1370 * with memcpy).
1371 *
1372 * This _changes_ the previous behaviour, where
1373 * tcp_create_openreq_child always was incrementing the
1374 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1375 * to be taken into account in all callers. -acme
1376 */
1377 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001378 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001379 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001380
Glauber Costaf3f511e2012-01-05 20:16:39 +00001381 sk_update_clone(sk, newsk);
1382
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001383 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001384 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001385
Eric Dumazet08e29af2011-11-28 12:04:18 +00001386 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001387 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001388 }
1389out:
1390 return newsk;
1391}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001392EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001393
Andi Kleen99580892007-04-20 17:12:43 -07001394void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1395{
1396 __sk_dst_set(sk, dst);
1397 sk->sk_route_caps = dst->dev->features;
1398 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001399 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001400 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001401 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001402 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001403 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001404 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001405 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001406 sk->sk_gso_max_size = dst->dev->gso_max_size;
1407 }
Andi Kleen99580892007-04-20 17:12:43 -07001408 }
1409}
1410EXPORT_SYMBOL_GPL(sk_setup_caps);
1411
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412void __init sk_init(void)
1413{
Jan Beulich44813742009-09-21 17:03:05 -07001414 if (totalram_pages <= 4096) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 sysctl_wmem_max = 32767;
1416 sysctl_rmem_max = 32767;
1417 sysctl_wmem_default = 32767;
1418 sysctl_rmem_default = 32767;
Jan Beulich44813742009-09-21 17:03:05 -07001419 } else if (totalram_pages >= 131072) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 sysctl_wmem_max = 131071;
1421 sysctl_rmem_max = 131071;
1422 }
1423}
1424
1425/*
1426 * Simple resource managers for sockets.
1427 */
1428
1429
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001430/*
1431 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 */
1433void sock_wfree(struct sk_buff *skb)
1434{
1435 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001436 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
Eric Dumazetd99927f2009-09-24 10:49:24 +00001438 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1439 /*
1440 * Keep a reference on sk_wmem_alloc, this will be released
1441 * after sk_write_space() call
1442 */
1443 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001445 len = 1;
1446 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001447 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001448 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1449 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001450 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001451 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001452 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453}
Eric Dumazet2a915252009-05-27 11:30:05 +00001454EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001456/*
1457 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 */
1459void sock_rfree(struct sk_buff *skb)
1460{
1461 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001462 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
Eric Dumazetd361fd52010-07-10 22:45:17 +00001464 atomic_sub(len, &sk->sk_rmem_alloc);
1465 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466}
Eric Dumazet2a915252009-05-27 11:30:05 +00001467EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
1469
1470int sock_i_uid(struct sock *sk)
1471{
1472 int uid;
1473
Eric Dumazetf064af12010-09-22 12:43:39 +00001474 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001476 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 return uid;
1478}
Eric Dumazet2a915252009-05-27 11:30:05 +00001479EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480
1481unsigned long sock_i_ino(struct sock *sk)
1482{
1483 unsigned long ino;
1484
Eric Dumazetf064af12010-09-22 12:43:39 +00001485 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001487 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 return ino;
1489}
Eric Dumazet2a915252009-05-27 11:30:05 +00001490EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491
1492/*
1493 * Allocate a skb from the socket's send buffer.
1494 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001495struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001496 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497{
1498 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001499 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 if (skb) {
1501 skb_set_owner_w(skb, sk);
1502 return skb;
1503 }
1504 }
1505 return NULL;
1506}
Eric Dumazet2a915252009-05-27 11:30:05 +00001507EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
1509/*
1510 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001511 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001512struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001513 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514{
1515 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1516 struct sk_buff *skb = alloc_skb(size, priority);
1517 if (skb) {
1518 skb_set_owner_r(skb, sk);
1519 return skb;
1520 }
1521 }
1522 return NULL;
1523}
1524
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001525/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001527 */
Al Virodd0fc662005-10-07 07:46:04 +01001528void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529{
1530 if ((unsigned)size <= sysctl_optmem_max &&
1531 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1532 void *mem;
1533 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001534 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 */
1536 atomic_add(size, &sk->sk_omem_alloc);
1537 mem = kmalloc(size, priority);
1538 if (mem)
1539 return mem;
1540 atomic_sub(size, &sk->sk_omem_alloc);
1541 }
1542 return NULL;
1543}
Eric Dumazet2a915252009-05-27 11:30:05 +00001544EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545
1546/*
1547 * Free an option memory block.
1548 */
1549void sock_kfree_s(struct sock *sk, void *mem, int size)
1550{
1551 kfree(mem);
1552 atomic_sub(size, &sk->sk_omem_alloc);
1553}
Eric Dumazet2a915252009-05-27 11:30:05 +00001554EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
1556/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1557 I think, these locks should be removed for datagram sockets.
1558 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001559static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560{
1561 DEFINE_WAIT(wait);
1562
1563 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1564 for (;;) {
1565 if (!timeo)
1566 break;
1567 if (signal_pending(current))
1568 break;
1569 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001570 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1572 break;
1573 if (sk->sk_shutdown & SEND_SHUTDOWN)
1574 break;
1575 if (sk->sk_err)
1576 break;
1577 timeo = schedule_timeout(timeo);
1578 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001579 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 return timeo;
1581}
1582
1583
1584/*
1585 * Generic send/receive buffer handlers
1586 */
1587
Herbert Xu4cc7f682009-02-04 16:55:54 -08001588struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1589 unsigned long data_len, int noblock,
1590 int *errcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591{
1592 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001593 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 long timeo;
1595 int err;
1596
1597 gfp_mask = sk->sk_allocation;
1598 if (gfp_mask & __GFP_WAIT)
1599 gfp_mask |= __GFP_REPEAT;
1600
1601 timeo = sock_sndtimeo(sk, noblock);
1602 while (1) {
1603 err = sock_error(sk);
1604 if (err != 0)
1605 goto failure;
1606
1607 err = -EPIPE;
1608 if (sk->sk_shutdown & SEND_SHUTDOWN)
1609 goto failure;
1610
1611 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001612 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 if (skb) {
1614 int npages;
1615 int i;
1616
1617 /* No pages, we're done... */
1618 if (!data_len)
1619 break;
1620
1621 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1622 skb->truesize += data_len;
1623 skb_shinfo(skb)->nr_frags = npages;
1624 for (i = 0; i < npages; i++) {
1625 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
1627 page = alloc_pages(sk->sk_allocation, 0);
1628 if (!page) {
1629 err = -ENOBUFS;
1630 skb_shinfo(skb)->nr_frags = i;
1631 kfree_skb(skb);
1632 goto failure;
1633 }
1634
Ian Campbellea2ab692011-08-22 23:44:58 +00001635 __skb_fill_page_desc(skb, i,
1636 page, 0,
1637 (data_len >= PAGE_SIZE ?
1638 PAGE_SIZE :
1639 data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 data_len -= PAGE_SIZE;
1641 }
1642
1643 /* Full success... */
1644 break;
1645 }
1646 err = -ENOBUFS;
1647 goto failure;
1648 }
1649 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1650 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1651 err = -EAGAIN;
1652 if (!timeo)
1653 goto failure;
1654 if (signal_pending(current))
1655 goto interrupted;
1656 timeo = sock_wait_for_wmem(sk, timeo);
1657 }
1658
1659 skb_set_owner_w(skb, sk);
1660 return skb;
1661
1662interrupted:
1663 err = sock_intr_errno(timeo);
1664failure:
1665 *errcode = err;
1666 return NULL;
1667}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001668EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001670struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 int noblock, int *errcode)
1672{
1673 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1674}
Eric Dumazet2a915252009-05-27 11:30:05 +00001675EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
1677static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001678 __releases(&sk->sk_lock.slock)
1679 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680{
1681 DEFINE_WAIT(wait);
1682
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001683 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1685 TASK_UNINTERRUPTIBLE);
1686 spin_unlock_bh(&sk->sk_lock.slock);
1687 schedule();
1688 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001689 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 break;
1691 }
1692 finish_wait(&sk->sk_lock.wq, &wait);
1693}
1694
1695static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001696 __releases(&sk->sk_lock.slock)
1697 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698{
1699 struct sk_buff *skb = sk->sk_backlog.head;
1700
1701 do {
1702 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1703 bh_unlock_sock(sk);
1704
1705 do {
1706 struct sk_buff *next = skb->next;
1707
Eric Dumazet7fee2262010-05-11 23:19:48 +00001708 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001710 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711
1712 /*
1713 * We are in process context here with softirqs
1714 * disabled, use cond_resched_softirq() to preempt.
1715 * This is safe to do because we've taken the backlog
1716 * queue private:
1717 */
1718 cond_resched_softirq();
1719
1720 skb = next;
1721 } while (skb != NULL);
1722
1723 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001724 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001725
1726 /*
1727 * Doing the zeroing here guarantee we can not loop forever
1728 * while a wild producer attempts to flood us.
1729 */
1730 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731}
1732
1733/**
1734 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001735 * @sk: sock to wait on
1736 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 *
1738 * Now socket state including sk->sk_err is changed only under lock,
1739 * hence we may omit checks after joining wait queue.
1740 * We check receive queue before schedule() only as optimization;
1741 * it is very likely that release_sock() added new data.
1742 */
1743int sk_wait_data(struct sock *sk, long *timeo)
1744{
1745 int rc;
1746 DEFINE_WAIT(wait);
1747
Eric Dumazetaa395142010-04-20 13:03:51 +00001748 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1750 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1751 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001752 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 return rc;
1754}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755EXPORT_SYMBOL(sk_wait_data);
1756
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001757/**
1758 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1759 * @sk: socket
1760 * @size: memory size to allocate
1761 * @kind: allocation type
1762 *
1763 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1764 * rmem allocation. This function assumes that protocols which have
1765 * memory_pressure use sk_wmem_queued as write buffer accounting.
1766 */
1767int __sk_mem_schedule(struct sock *sk, int size, int kind)
1768{
1769 struct proto *prot = sk->sk_prot;
1770 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00001771 long allocated;
Glauber Costae1aab162011-12-11 21:47:03 +00001772 int parent_status = UNDER_LIMIT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001773
1774 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001775
Glauber Costae1aab162011-12-11 21:47:03 +00001776 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001777
1778 /* Under limit. */
Glauber Costae1aab162011-12-11 21:47:03 +00001779 if (parent_status == UNDER_LIMIT &&
1780 allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00001781 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001782 return 1;
1783 }
1784
Glauber Costae1aab162011-12-11 21:47:03 +00001785 /* Under pressure. (we or our parents) */
1786 if ((parent_status > SOFT_LIMIT) ||
1787 allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00001788 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001789
Glauber Costae1aab162011-12-11 21:47:03 +00001790 /* Over hard limit (we or our parents) */
1791 if ((parent_status == OVER_LIMIT) ||
1792 (allocated > sk_prot_mem_limits(sk, 2)))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001793 goto suppress_allocation;
1794
1795 /* guarantee minimum buffer size under pressure */
1796 if (kind == SK_MEM_RECV) {
1797 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1798 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001799
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001800 } else { /* SK_MEM_SEND */
1801 if (sk->sk_type == SOCK_STREAM) {
1802 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1803 return 1;
1804 } else if (atomic_read(&sk->sk_wmem_alloc) <
1805 prot->sysctl_wmem[0])
1806 return 1;
1807 }
1808
Glauber Costa180d8cd2011-12-11 21:47:02 +00001809 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08001810 int alloc;
1811
Glauber Costa180d8cd2011-12-11 21:47:02 +00001812 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08001813 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001814 alloc = sk_sockets_allocated_read_positive(sk);
1815 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001816 sk_mem_pages(sk->sk_wmem_queued +
1817 atomic_read(&sk->sk_rmem_alloc) +
1818 sk->sk_forward_alloc))
1819 return 1;
1820 }
1821
1822suppress_allocation:
1823
1824 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1825 sk_stream_moderate_sndbuf(sk);
1826
1827 /* Fail only if socket is _under_ its sndbuf.
1828 * In this case we cannot block, so that we have to fail.
1829 */
1830 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1831 return 1;
1832 }
1833
Satoru Moriya3847ce32011-06-17 12:00:03 +00001834 trace_sock_exceed_buf_limit(sk, prot, allocated);
1835
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001836 /* Alas. Undo changes. */
1837 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001838
Glauber Costa0e90b312012-01-20 04:57:16 +00001839 sk_memory_allocated_sub(sk, amt);
Glauber Costa180d8cd2011-12-11 21:47:02 +00001840
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001841 return 0;
1842}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001843EXPORT_SYMBOL(__sk_mem_schedule);
1844
1845/**
1846 * __sk_reclaim - reclaim memory_allocated
1847 * @sk: socket
1848 */
1849void __sk_mem_reclaim(struct sock *sk)
1850{
Glauber Costa180d8cd2011-12-11 21:47:02 +00001851 sk_memory_allocated_sub(sk,
Glauber Costa0e90b312012-01-20 04:57:16 +00001852 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001853 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1854
Glauber Costa180d8cd2011-12-11 21:47:02 +00001855 if (sk_under_memory_pressure(sk) &&
1856 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
1857 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001858}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001859EXPORT_SYMBOL(__sk_mem_reclaim);
1860
1861
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862/*
1863 * Set of default routines for initialising struct proto_ops when
1864 * the protocol does not support a particular function. In certain
1865 * cases where it makes no sense for a protocol to have a "do nothing"
1866 * function, some default processing is provided.
1867 */
1868
1869int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1870{
1871 return -EOPNOTSUPP;
1872}
Eric Dumazet2a915252009-05-27 11:30:05 +00001873EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001875int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 int len, int flags)
1877{
1878 return -EOPNOTSUPP;
1879}
Eric Dumazet2a915252009-05-27 11:30:05 +00001880EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
1882int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1883{
1884 return -EOPNOTSUPP;
1885}
Eric Dumazet2a915252009-05-27 11:30:05 +00001886EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887
1888int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1889{
1890 return -EOPNOTSUPP;
1891}
Eric Dumazet2a915252009-05-27 11:30:05 +00001892EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001894int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 int *len, int peer)
1896{
1897 return -EOPNOTSUPP;
1898}
Eric Dumazet2a915252009-05-27 11:30:05 +00001899EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900
Eric Dumazet2a915252009-05-27 11:30:05 +00001901unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902{
1903 return 0;
1904}
Eric Dumazet2a915252009-05-27 11:30:05 +00001905EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
1907int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1908{
1909 return -EOPNOTSUPP;
1910}
Eric Dumazet2a915252009-05-27 11:30:05 +00001911EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
1913int sock_no_listen(struct socket *sock, int backlog)
1914{
1915 return -EOPNOTSUPP;
1916}
Eric Dumazet2a915252009-05-27 11:30:05 +00001917EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918
1919int sock_no_shutdown(struct socket *sock, int how)
1920{
1921 return -EOPNOTSUPP;
1922}
Eric Dumazet2a915252009-05-27 11:30:05 +00001923EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
1925int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07001926 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927{
1928 return -EOPNOTSUPP;
1929}
Eric Dumazet2a915252009-05-27 11:30:05 +00001930EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
1932int sock_no_getsockopt(struct socket *sock, int level, int optname,
1933 char __user *optval, int __user *optlen)
1934{
1935 return -EOPNOTSUPP;
1936}
Eric Dumazet2a915252009-05-27 11:30:05 +00001937EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
1939int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1940 size_t len)
1941{
1942 return -EOPNOTSUPP;
1943}
Eric Dumazet2a915252009-05-27 11:30:05 +00001944EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
1946int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1947 size_t len, int flags)
1948{
1949 return -EOPNOTSUPP;
1950}
Eric Dumazet2a915252009-05-27 11:30:05 +00001951EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952
1953int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1954{
1955 /* Mirror missing mmap method error code */
1956 return -ENODEV;
1957}
Eric Dumazet2a915252009-05-27 11:30:05 +00001958EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
1960ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1961{
1962 ssize_t res;
1963 struct msghdr msg = {.msg_flags = flags};
1964 struct kvec iov;
1965 char *kaddr = kmap(page);
1966 iov.iov_base = kaddr + offset;
1967 iov.iov_len = size;
1968 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1969 kunmap(page);
1970 return res;
1971}
Eric Dumazet2a915252009-05-27 11:30:05 +00001972EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973
1974/*
1975 * Default Socket Callbacks
1976 */
1977
1978static void sock_def_wakeup(struct sock *sk)
1979{
Eric Dumazet43815482010-04-29 11:01:49 +00001980 struct socket_wq *wq;
1981
1982 rcu_read_lock();
1983 wq = rcu_dereference(sk->sk_wq);
1984 if (wq_has_sleeper(wq))
1985 wake_up_interruptible_all(&wq->wait);
1986 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987}
1988
1989static void sock_def_error_report(struct sock *sk)
1990{
Eric Dumazet43815482010-04-29 11:01:49 +00001991 struct socket_wq *wq;
1992
1993 rcu_read_lock();
1994 wq = rcu_dereference(sk->sk_wq);
1995 if (wq_has_sleeper(wq))
1996 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001997 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00001998 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999}
2000
2001static void sock_def_readable(struct sock *sk, int len)
2002{
Eric Dumazet43815482010-04-29 11:01:49 +00002003 struct socket_wq *wq;
2004
2005 rcu_read_lock();
2006 wq = rcu_dereference(sk->sk_wq);
2007 if (wq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08002008 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07002009 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002010 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00002011 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012}
2013
2014static void sock_def_write_space(struct sock *sk)
2015{
Eric Dumazet43815482010-04-29 11:01:49 +00002016 struct socket_wq *wq;
2017
2018 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
2020 /* Do not wake up a writer until he can make "significant"
2021 * progress. --DaveM
2022 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002023 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002024 wq = rcu_dereference(sk->sk_wq);
2025 if (wq_has_sleeper(wq))
2026 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002027 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
2029 /* Should agree with poll, otherwise some programs break */
2030 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002031 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 }
2033
Eric Dumazet43815482010-04-29 11:01:49 +00002034 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035}
2036
2037static void sock_def_destruct(struct sock *sk)
2038{
Jesper Juhla51482b2005-11-08 09:41:34 -08002039 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040}
2041
2042void sk_send_sigurg(struct sock *sk)
2043{
2044 if (sk->sk_socket && sk->sk_socket->file)
2045 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002046 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047}
Eric Dumazet2a915252009-05-27 11:30:05 +00002048EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049
2050void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2051 unsigned long expires)
2052{
2053 if (!mod_timer(timer, expires))
2054 sock_hold(sk);
2055}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056EXPORT_SYMBOL(sk_reset_timer);
2057
2058void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2059{
2060 if (timer_pending(timer) && del_timer(timer))
2061 __sock_put(sk);
2062}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063EXPORT_SYMBOL(sk_stop_timer);
2064
2065void sock_init_data(struct socket *sock, struct sock *sk)
2066{
2067 skb_queue_head_init(&sk->sk_receive_queue);
2068 skb_queue_head_init(&sk->sk_write_queue);
2069 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07002070#ifdef CONFIG_NET_DMA
2071 skb_queue_head_init(&sk->sk_async_wait_queue);
2072#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
2074 sk->sk_send_head = NULL;
2075
2076 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002077
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 sk->sk_allocation = GFP_KERNEL;
2079 sk->sk_rcvbuf = sysctl_rmem_default;
2080 sk->sk_sndbuf = sysctl_wmem_default;
2081 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002082 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083
2084 sock_set_flag(sk, SOCK_ZAPPED);
2085
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002086 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002088 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 sock->sk = sk;
2090 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002091 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092
Eric Dumazetb6c67122010-04-08 23:03:29 +00002093 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07002095 lockdep_set_class_and_name(&sk->sk_callback_lock,
2096 af_callback_keys + sk->sk_family,
2097 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
2099 sk->sk_state_change = sock_def_wakeup;
2100 sk->sk_data_ready = sock_def_readable;
2101 sk->sk_write_space = sock_def_write_space;
2102 sk->sk_error_report = sock_def_error_report;
2103 sk->sk_destruct = sock_def_destruct;
2104
2105 sk->sk_sndmsg_page = NULL;
2106 sk->sk_sndmsg_off = 0;
Pavel Emelyanovef64a542012-02-21 07:31:34 +00002107 sk->sk_peek_off = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002109 sk->sk_peer_pid = NULL;
2110 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 sk->sk_write_pending = 0;
2112 sk->sk_rcvlowat = 1;
2113 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2114 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2115
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002116 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002118 /*
2119 * Before updating sk_refcnt, we must commit prior changes to memory
2120 * (Documentation/RCU/rculist_nulls.txt for details)
2121 */
2122 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002124 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125}
Eric Dumazet2a915252009-05-27 11:30:05 +00002126EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002128void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129{
2130 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002131 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002132 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002134 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002135 spin_unlock(&sk->sk_lock.slock);
2136 /*
2137 * The sk_lock has mutex_lock() semantics here:
2138 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002139 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002140 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002142EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002144void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002146 /*
2147 * The sk_lock has mutex_unlock() semantics:
2148 */
2149 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2150
2151 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 if (sk->sk_backlog.tail)
2153 __release_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002154 sk->sk_lock.owned = 0;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002155 if (waitqueue_active(&sk->sk_lock.wq))
2156 wake_up(&sk->sk_lock.wq);
2157 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158}
2159EXPORT_SYMBOL(release_sock);
2160
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002161/**
2162 * lock_sock_fast - fast version of lock_sock
2163 * @sk: socket
2164 *
2165 * This version should be used for very small section, where process wont block
2166 * return false if fast path is taken
2167 * sk_lock.slock locked, owned = 0, BH disabled
2168 * return true if slow path is taken
2169 * sk_lock.slock unlocked, owned = 1, BH enabled
2170 */
2171bool lock_sock_fast(struct sock *sk)
2172{
2173 might_sleep();
2174 spin_lock_bh(&sk->sk_lock.slock);
2175
2176 if (!sk->sk_lock.owned)
2177 /*
2178 * Note : We must disable BH
2179 */
2180 return false;
2181
2182 __lock_sock(sk);
2183 sk->sk_lock.owned = 1;
2184 spin_unlock(&sk->sk_lock.slock);
2185 /*
2186 * The sk_lock has mutex_lock() semantics here:
2187 */
2188 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2189 local_bh_enable();
2190 return true;
2191}
2192EXPORT_SYMBOL(lock_sock_fast);
2193
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002195{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002196 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002198 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002199 tv = ktime_to_timeval(sk->sk_stamp);
2200 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002202 if (tv.tv_sec == 0) {
2203 sk->sk_stamp = ktime_get_real();
2204 tv = ktime_to_timeval(sk->sk_stamp);
2205 }
2206 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002207}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208EXPORT_SYMBOL(sock_get_timestamp);
2209
Eric Dumazetae40eb12007-03-18 17:33:16 -07002210int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2211{
2212 struct timespec ts;
2213 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002214 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002215 ts = ktime_to_timespec(sk->sk_stamp);
2216 if (ts.tv_sec == -1)
2217 return -ENOENT;
2218 if (ts.tv_sec == 0) {
2219 sk->sk_stamp = ktime_get_real();
2220 ts = ktime_to_timespec(sk->sk_stamp);
2221 }
2222 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2223}
2224EXPORT_SYMBOL(sock_get_timestampns);
2225
Patrick Ohly20d49472009-02-12 05:03:38 +00002226void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002227{
Patrick Ohly20d49472009-02-12 05:03:38 +00002228 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002229 unsigned long previous_flags = sk->sk_flags;
2230
Patrick Ohly20d49472009-02-12 05:03:38 +00002231 sock_set_flag(sk, flag);
2232 /*
2233 * we just set one of the two flags which require net
2234 * time stamping, but time stamping might have been on
2235 * already because of the other one
2236 */
Eric Dumazet08e29af2011-11-28 12:04:18 +00002237 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002238 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 }
2240}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
2242/*
2243 * Get a socket option on an socket.
2244 *
2245 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2246 * asynchronous errors should be reported by getsockopt. We assume
2247 * this means if you specify SO_ERROR (otherwise whats the point of it).
2248 */
2249int sock_common_getsockopt(struct socket *sock, int level, int optname,
2250 char __user *optval, int __user *optlen)
2251{
2252 struct sock *sk = sock->sk;
2253
2254 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2255}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256EXPORT_SYMBOL(sock_common_getsockopt);
2257
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002258#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002259int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2260 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002261{
2262 struct sock *sk = sock->sk;
2263
Johannes Berg1e51f952007-03-06 13:44:06 -08002264 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002265 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2266 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002267 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2268}
2269EXPORT_SYMBOL(compat_sock_common_getsockopt);
2270#endif
2271
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2273 struct msghdr *msg, size_t size, int flags)
2274{
2275 struct sock *sk = sock->sk;
2276 int addr_len = 0;
2277 int err;
2278
2279 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2280 flags & ~MSG_DONTWAIT, &addr_len);
2281 if (err >= 0)
2282 msg->msg_namelen = addr_len;
2283 return err;
2284}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285EXPORT_SYMBOL(sock_common_recvmsg);
2286
2287/*
2288 * Set socket options on an inet socket.
2289 */
2290int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002291 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292{
2293 struct sock *sk = sock->sk;
2294
2295 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2296}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297EXPORT_SYMBOL(sock_common_setsockopt);
2298
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002299#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002300int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002301 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002302{
2303 struct sock *sk = sock->sk;
2304
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002305 if (sk->sk_prot->compat_setsockopt != NULL)
2306 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2307 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002308 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2309}
2310EXPORT_SYMBOL(compat_sock_common_setsockopt);
2311#endif
2312
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313void sk_common_release(struct sock *sk)
2314{
2315 if (sk->sk_prot->destroy)
2316 sk->sk_prot->destroy(sk);
2317
2318 /*
2319 * Observation: when sock_common_release is called, processes have
2320 * no access to socket. But net still has.
2321 * Step one, detach it from networking:
2322 *
2323 * A. Remove from hash tables.
2324 */
2325
2326 sk->sk_prot->unhash(sk);
2327
2328 /*
2329 * In this point socket cannot receive new packets, but it is possible
2330 * that some packets are in flight because some CPU runs receiver and
2331 * did hash table lookup before we unhashed socket. They will achieve
2332 * receive queue and will be purged by socket destructor.
2333 *
2334 * Also we still have packets pending on receive queue and probably,
2335 * our own packets waiting in device queues. sock_destroy will drain
2336 * receive queue, but transmitted packets will delay socket destruction
2337 * until the last reference will be released.
2338 */
2339
2340 sock_orphan(sk);
2341
2342 xfrm_sk_free_policy(sk);
2343
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002344 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 sock_put(sk);
2346}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347EXPORT_SYMBOL(sk_common_release);
2348
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002349#ifdef CONFIG_PROC_FS
2350#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002351struct prot_inuse {
2352 int val[PROTO_INUSE_NR];
2353};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002354
2355static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002356
2357#ifdef CONFIG_NET_NS
2358void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2359{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002360 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002361}
2362EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2363
2364int sock_prot_inuse_get(struct net *net, struct proto *prot)
2365{
2366 int cpu, idx = prot->inuse_idx;
2367 int res = 0;
2368
2369 for_each_possible_cpu(cpu)
2370 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2371
2372 return res >= 0 ? res : 0;
2373}
2374EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2375
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002376static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002377{
2378 net->core.inuse = alloc_percpu(struct prot_inuse);
2379 return net->core.inuse ? 0 : -ENOMEM;
2380}
2381
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002382static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002383{
2384 free_percpu(net->core.inuse);
2385}
2386
2387static struct pernet_operations net_inuse_ops = {
2388 .init = sock_inuse_init_net,
2389 .exit = sock_inuse_exit_net,
2390};
2391
2392static __init int net_inuse_init(void)
2393{
2394 if (register_pernet_subsys(&net_inuse_ops))
2395 panic("Cannot initialize net inuse counters");
2396
2397 return 0;
2398}
2399
2400core_initcall(net_inuse_init);
2401#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002402static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2403
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002404void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002405{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002406 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002407}
2408EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2409
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002410int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002411{
2412 int cpu, idx = prot->inuse_idx;
2413 int res = 0;
2414
2415 for_each_possible_cpu(cpu)
2416 res += per_cpu(prot_inuse, cpu).val[idx];
2417
2418 return res >= 0 ? res : 0;
2419}
2420EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002421#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002422
2423static void assign_proto_idx(struct proto *prot)
2424{
2425 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2426
2427 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2428 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2429 return;
2430 }
2431
2432 set_bit(prot->inuse_idx, proto_inuse_idx);
2433}
2434
2435static void release_proto_idx(struct proto *prot)
2436{
2437 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2438 clear_bit(prot->inuse_idx, proto_inuse_idx);
2439}
2440#else
2441static inline void assign_proto_idx(struct proto *prot)
2442{
2443}
2444
2445static inline void release_proto_idx(struct proto *prot)
2446{
2447}
2448#endif
2449
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450int proto_register(struct proto *prot, int alloc_slab)
2451{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 if (alloc_slab) {
2453 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002454 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2455 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456
2457 if (prot->slab == NULL) {
2458 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2459 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002460 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002462
2463 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002464 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002465 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002466 goto out_free_sock_slab;
2467
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002468 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002469 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002470 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002471
2472 if (prot->rsk_prot->slab == NULL) {
2473 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2474 prot->name);
2475 goto out_free_request_sock_slab_name;
2476 }
2477 }
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002478
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002479 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002480 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002481
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002482 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002483 goto out_free_request_sock_slab;
2484
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002485 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002486 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002487 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002488 0,
2489 SLAB_HWCACHE_ALIGN |
2490 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002491 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002492 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002493 goto out_free_timewait_sock_slab_name;
2494 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 }
2496
Glauber Costa36b77a52011-12-16 00:51:59 +00002497 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002499 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002500 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002501 return 0;
2502
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002503out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002504 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002505out_free_request_sock_slab:
2506 if (prot->rsk_prot && prot->rsk_prot->slab) {
2507 kmem_cache_destroy(prot->rsk_prot->slab);
2508 prot->rsk_prot->slab = NULL;
2509 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002510out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002511 if (prot->rsk_prot)
2512 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002513out_free_sock_slab:
2514 kmem_cache_destroy(prot->slab);
2515 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002516out:
2517 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519EXPORT_SYMBOL(proto_register);
2520
2521void proto_unregister(struct proto *prot)
2522{
Glauber Costa36b77a52011-12-16 00:51:59 +00002523 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002524 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002525 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002526 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527
2528 if (prot->slab != NULL) {
2529 kmem_cache_destroy(prot->slab);
2530 prot->slab = NULL;
2531 }
2532
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002533 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002534 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002535 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002536 prot->rsk_prot->slab = NULL;
2537 }
2538
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002539 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002540 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002541 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002542 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002543 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545EXPORT_SYMBOL(proto_unregister);
2546
2547#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002549 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550{
Glauber Costa36b77a52011-12-16 00:51:59 +00002551 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002552 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553}
2554
2555static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2556{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002557 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558}
2559
2560static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002561 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562{
Glauber Costa36b77a52011-12-16 00:51:59 +00002563 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564}
2565
2566static char proto_method_implemented(const void *method)
2567{
2568 return method == NULL ? 'n' : 'y';
2569}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002570static long sock_prot_memory_allocated(struct proto *proto)
2571{
2572 return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L;
2573}
2574
2575static char *sock_prot_memory_pressure(struct proto *proto)
2576{
2577 return proto->memory_pressure != NULL ?
2578 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2579}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580
2581static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2582{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002583
Eric Dumazet8d987e52010-11-09 23:24:26 +00002584 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2586 proto->name,
2587 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002588 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00002589 sock_prot_memory_allocated(proto),
2590 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 proto->max_header,
2592 proto->slab == NULL ? "no" : "yes",
2593 module_name(proto->owner),
2594 proto_method_implemented(proto->close),
2595 proto_method_implemented(proto->connect),
2596 proto_method_implemented(proto->disconnect),
2597 proto_method_implemented(proto->accept),
2598 proto_method_implemented(proto->ioctl),
2599 proto_method_implemented(proto->init),
2600 proto_method_implemented(proto->destroy),
2601 proto_method_implemented(proto->shutdown),
2602 proto_method_implemented(proto->setsockopt),
2603 proto_method_implemented(proto->getsockopt),
2604 proto_method_implemented(proto->sendmsg),
2605 proto_method_implemented(proto->recvmsg),
2606 proto_method_implemented(proto->sendpage),
2607 proto_method_implemented(proto->bind),
2608 proto_method_implemented(proto->backlog_rcv),
2609 proto_method_implemented(proto->hash),
2610 proto_method_implemented(proto->unhash),
2611 proto_method_implemented(proto->get_port),
2612 proto_method_implemented(proto->enter_memory_pressure));
2613}
2614
2615static int proto_seq_show(struct seq_file *seq, void *v)
2616{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002617 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2619 "protocol",
2620 "size",
2621 "sockets",
2622 "memory",
2623 "press",
2624 "maxhdr",
2625 "slab",
2626 "module",
2627 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2628 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002629 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 return 0;
2631}
2632
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002633static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 .start = proto_seq_start,
2635 .next = proto_seq_next,
2636 .stop = proto_seq_stop,
2637 .show = proto_seq_show,
2638};
2639
2640static int proto_seq_open(struct inode *inode, struct file *file)
2641{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002642 return seq_open_net(inode, file, &proto_seq_ops,
2643 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644}
2645
Arjan van de Ven9a321442007-02-12 00:55:35 -08002646static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 .owner = THIS_MODULE,
2648 .open = proto_seq_open,
2649 .read = seq_read,
2650 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002651 .release = seq_release_net,
2652};
2653
2654static __net_init int proto_init_net(struct net *net)
2655{
2656 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2657 return -ENOMEM;
2658
2659 return 0;
2660}
2661
2662static __net_exit void proto_exit_net(struct net *net)
2663{
2664 proc_net_remove(net, "protocols");
2665}
2666
2667
2668static __net_initdata struct pernet_operations proto_net_ops = {
2669 .init = proto_init_net,
2670 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671};
2672
2673static int __init proto_init(void)
2674{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002675 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676}
2677
2678subsys_initcall(proto_init);
2679
2680#endif /* PROC_FS */