blob: e80b64fbd663e702612a809dfba49527345b627b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Randy Dunlap4fc268d2006-01-11 12:17:47 -080092#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <linux/errno.h>
94#include <linux/types.h>
95#include <linux/socket.h>
96#include <linux/in.h>
97#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/module.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/sched.h>
102#include <linux/timer.h>
103#include <linux/string.h>
104#include <linux/sockios.h>
105#include <linux/net.h>
106#include <linux/mm.h>
107#include <linux/slab.h>
108#include <linux/interrupt.h>
109#include <linux/poll.h>
110#include <linux/tcp.h>
111#include <linux/init.h>
Al Viroa1f8e7f2006-10-19 16:08:53 -0400112#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000113#include <linux/user_namespace.h>
Glauber Costae1aab162011-12-11 21:47:03 +0000114#include <linux/jump_label.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116#include <asm/uaccess.h>
117#include <asm/system.h>
118
119#include <linux/netdevice.h>
120#include <net/protocol.h>
121#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200122#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700123#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000125#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126#include <net/xfrm.h>
127#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700128#include <net/cls_cgroup.h>
Neil Horman5bc14212011-11-22 05:10:51 +0000129#include <net/netprio_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131#include <linux/filter.h>
132
Satoru Moriya3847ce32011-06-17 12:00:03 +0000133#include <trace/events/sock.h>
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135#ifdef CONFIG_INET
136#include <net/tcp.h>
137#endif
138
Glauber Costa36b77a52011-12-16 00:51:59 +0000139static DEFINE_MUTEX(proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000140static LIST_HEAD(proto_list);
141
142#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
143int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
144{
145 struct proto *proto;
146 int ret = 0;
147
Glauber Costa36b77a52011-12-16 00:51:59 +0000148 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000149 list_for_each_entry(proto, &proto_list, node) {
150 if (proto->init_cgroup) {
151 ret = proto->init_cgroup(cgrp, ss);
152 if (ret)
153 goto out;
154 }
155 }
156
Glauber Costa36b77a52011-12-16 00:51:59 +0000157 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000158 return ret;
159out:
160 list_for_each_entry_continue_reverse(proto, &proto_list, node)
161 if (proto->destroy_cgroup)
162 proto->destroy_cgroup(cgrp, ss);
Glauber Costa36b77a52011-12-16 00:51:59 +0000163 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000164 return ret;
165}
166
167void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
168{
169 struct proto *proto;
170
Glauber Costa36b77a52011-12-16 00:51:59 +0000171 mutex_lock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000172 list_for_each_entry_reverse(proto, &proto_list, node)
173 if (proto->destroy_cgroup)
174 proto->destroy_cgroup(cgrp, ss);
Glauber Costa36b77a52011-12-16 00:51:59 +0000175 mutex_unlock(&proto_list_mutex);
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000176}
177#endif
178
Ingo Molnarda21f242006-07-03 00:25:12 -0700179/*
180 * Each address family might have different locking rules, so we have
181 * one slock key per address family:
182 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700183static struct lock_class_key af_family_keys[AF_MAX];
184static struct lock_class_key af_family_slock_keys[AF_MAX];
185
Glauber Costae1aab162011-12-11 21:47:03 +0000186struct jump_label_key memcg_socket_limit_enabled;
187EXPORT_SYMBOL(memcg_socket_limit_enabled);
188
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700189/*
190 * Make lock validator output more readable. (we pre-construct these
191 * strings build-time, so that runtime initialization of socket
192 * locks is fast):
193 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700194static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700195 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
196 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
197 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
198 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
199 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
200 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
201 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800202 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700203 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800204 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700205 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700206 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800207 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300208 "sk_lock-AF_NFC" , "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700209};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700210static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700211 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
212 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
213 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
214 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
215 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
216 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
217 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800218 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700219 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800220 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700221 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700222 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800223 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300224 "slock-AF_NFC" , "slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700225};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700226static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700227 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
228 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
229 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
230 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
231 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
232 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
233 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800234 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700235 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700236 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700237 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700238 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Miloslav Trmač6f107b52010-12-08 14:35:34 +0800239 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
Aloisio Almeida Jrc7fe3b52011-07-01 19:31:35 -0300240 "clock-AF_NFC" , "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700241};
Ingo Molnarda21f242006-07-03 00:25:12 -0700242
243/*
244 * sk_callback_lock locking rules are per-address-family,
245 * so split the lock classes by using a per-AF key:
246 */
247static struct lock_class_key af_callback_keys[AF_MAX];
248
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249/* Take into consideration the size of the struct sk_buff overhead in the
250 * determination of these values, since that is non-constant across
251 * platforms. This makes socket queueing behavior and performance
252 * not depend upon such differences.
253 */
254#define _SK_MEM_PACKETS 256
Eric Dumazet87fb4b72011-10-13 07:28:54 +0000255#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
257#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
258
259/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700260__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
261__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
262__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
263__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300265/* Maximal space eaten by iovec or ancillary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700266int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000267EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Neil Horman5bc14212011-11-22 05:10:51 +0000269#if defined(CONFIG_CGROUPS)
270#if !defined(CONFIG_NET_CLS_CGROUP)
Herbert Xuf8451722010-05-24 00:12:34 -0700271int net_cls_subsys_id = -1;
272EXPORT_SYMBOL_GPL(net_cls_subsys_id);
273#endif
Neil Horman5bc14212011-11-22 05:10:51 +0000274#if !defined(CONFIG_NETPRIO_CGROUP)
275int net_prio_subsys_id = -1;
276EXPORT_SYMBOL_GPL(net_prio_subsys_id);
277#endif
278#endif
Herbert Xuf8451722010-05-24 00:12:34 -0700279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
281{
282 struct timeval tv;
283
284 if (optlen < sizeof(tv))
285 return -EINVAL;
286 if (copy_from_user(&tv, optval, sizeof(tv)))
287 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700288 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
289 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
Vasily Averinba780732007-05-24 16:58:54 -0700291 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700292 static int warned __read_mostly;
293
Vasily Averinba780732007-05-24 16:58:54 -0700294 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700295 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700296 warned++;
297 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
298 "tries to set negative timeout\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700299 current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700300 }
Vasily Averinba780732007-05-24 16:58:54 -0700301 return 0;
302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 *timeo_p = MAX_SCHEDULE_TIMEOUT;
304 if (tv.tv_sec == 0 && tv.tv_usec == 0)
305 return 0;
306 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
307 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
308 return 0;
309}
310
311static void sock_warn_obsolete_bsdism(const char *name)
312{
313 static int warned;
314 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900315 if (strcmp(warncomm, current->comm) && warned < 5) {
316 strcpy(warncomm, current->comm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 printk(KERN_WARNING "process `%s' is using obsolete "
318 "%s SO_BSDCOMPAT\n", warncomm, name);
319 warned++;
320 }
321}
322
Eric Dumazet08e29af2011-11-28 12:04:18 +0000323#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
324
325static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900326{
Eric Dumazet08e29af2011-11-28 12:04:18 +0000327 if (sk->sk_flags & flags) {
328 sk->sk_flags &= ~flags;
329 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +0000330 net_disable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 }
332}
333
334
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800335int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
336{
Eric Dumazet766e90372009-10-14 20:40:11 -0700337 int err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800338 int skb_len;
Neil Horman3b885782009-10-12 13:26:31 -0700339 unsigned long flags;
340 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800341
Eric Dumazet0fd7bac2011-12-21 07:11:44 +0000342 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700343 atomic_inc(&sk->sk_drops);
Satoru Moriya3847ce32011-06-17 12:00:03 +0000344 trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet766e90372009-10-14 20:40:11 -0700345 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800346 }
347
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700348 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800349 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700350 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800351
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800352 if (!sk_rmem_schedule(sk, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700353 atomic_inc(&sk->sk_drops);
354 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800355 }
356
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800357 skb->dev = NULL;
358 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800359
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800360 /* Cache the SKB length before we tack it onto the receive
361 * queue. Once it is added it no longer belongs to us and
362 * may be freed by other threads of control pulling packets
363 * from the queue.
364 */
365 skb_len = skb->len;
366
Eric Dumazet7fee2262010-05-11 23:19:48 +0000367 /* we escape from rcu protected region, make sure we dont leak
368 * a norefcounted dst
369 */
370 skb_dst_force(skb);
371
Neil Horman3b885782009-10-12 13:26:31 -0700372 spin_lock_irqsave(&list->lock, flags);
373 skb->dropcount = atomic_read(&sk->sk_drops);
374 __skb_queue_tail(list, skb);
375 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800376
377 if (!sock_flag(sk, SOCK_DEAD))
378 sk->sk_data_ready(sk, skb_len);
Eric Dumazet766e90372009-10-14 20:40:11 -0700379 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800380}
381EXPORT_SYMBOL(sock_queue_rcv_skb);
382
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200383int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800384{
385 int rc = NET_RX_SUCCESS;
386
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700387 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800388 goto discard_and_relse;
389
390 skb->dev = NULL;
391
Eric Dumazetc3774112010-04-27 15:13:20 -0700392 if (sk_rcvqueues_full(sk, skb)) {
393 atomic_inc(&sk->sk_drops);
394 goto discard_and_relse;
395 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200396 if (nested)
397 bh_lock_sock_nested(sk);
398 else
399 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700400 if (!sock_owned_by_user(sk)) {
401 /*
402 * trylock + unlock semantics:
403 */
404 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
405
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700406 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700407
408 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Zhu Yia3a858f2010-03-04 18:01:47 +0000409 } else if (sk_add_backlog(sk, skb)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000410 bh_unlock_sock(sk);
411 atomic_inc(&sk->sk_drops);
412 goto discard_and_relse;
413 }
414
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800415 bh_unlock_sock(sk);
416out:
417 sock_put(sk);
418 return rc;
419discard_and_relse:
420 kfree_skb(skb);
421 goto out;
422}
423EXPORT_SYMBOL(sk_receive_skb);
424
Krishna Kumarea94ff32009-10-19 23:46:45 +0000425void sk_reset_txq(struct sock *sk)
426{
427 sk_tx_queue_clear(sk);
428}
429EXPORT_SYMBOL(sk_reset_txq);
430
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800431struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
432{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000433 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800434
435 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000436 sk_tx_queue_clear(sk);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +0000437 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800438 dst_release(dst);
439 return NULL;
440 }
441
442 return dst;
443}
444EXPORT_SYMBOL(__sk_dst_check);
445
446struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
447{
448 struct dst_entry *dst = sk_dst_get(sk);
449
450 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
451 sk_dst_reset(sk);
452 dst_release(dst);
453 return NULL;
454 }
455
456 return dst;
457}
458EXPORT_SYMBOL(sk_dst_check);
459
David S. Miller48788092007-09-14 16:41:03 -0700460static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
461{
462 int ret = -ENOPROTOOPT;
463#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900464 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700465 char devname[IFNAMSIZ];
466 int index;
467
468 /* Sorry... */
469 ret = -EPERM;
470 if (!capable(CAP_NET_RAW))
471 goto out;
472
473 ret = -EINVAL;
474 if (optlen < 0)
475 goto out;
476
477 /* Bind this socket to a particular device like "eth0",
478 * as specified in the passed interface name. If the
479 * name is "" or the option length is zero the socket
480 * is not bound.
481 */
482 if (optlen > IFNAMSIZ - 1)
483 optlen = IFNAMSIZ - 1;
484 memset(devname, 0, sizeof(devname));
485
486 ret = -EFAULT;
487 if (copy_from_user(devname, optval, optlen))
488 goto out;
489
David S. Miller000ba2e2009-11-05 22:37:11 -0800490 index = 0;
491 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800492 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700493
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800494 rcu_read_lock();
495 dev = dev_get_by_name_rcu(net, devname);
496 if (dev)
497 index = dev->ifindex;
498 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700499 ret = -ENODEV;
500 if (!dev)
501 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700502 }
503
504 lock_sock(sk);
505 sk->sk_bound_dev_if = index;
506 sk_dst_reset(sk);
507 release_sock(sk);
508
509 ret = 0;
510
511out:
512#endif
513
514 return ret;
515}
516
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800517static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
518{
519 if (valbool)
520 sock_set_flag(sk, bit);
521 else
522 sock_reset_flag(sk, bit);
523}
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525/*
526 * This is meant for all protocols to use and covers goings on
527 * at the socket level. Everything here is generic.
528 */
529
530int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700531 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532{
Eric Dumazet2a915252009-05-27 11:30:05 +0000533 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 int val;
535 int valbool;
536 struct linger ling;
537 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 /*
540 * Options without arguments
541 */
542
David S. Miller48788092007-09-14 16:41:03 -0700543 if (optname == SO_BINDTODEVICE)
544 return sock_bindtodevice(sk, optval, optlen);
545
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700546 if (optlen < sizeof(int))
547 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 if (get_user(val, (int __user *)optval))
550 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900551
Eric Dumazet2a915252009-05-27 11:30:05 +0000552 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
554 lock_sock(sk);
555
Eric Dumazet2a915252009-05-27 11:30:05 +0000556 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700557 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000558 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700559 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000560 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800561 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700562 break;
563 case SO_REUSEADDR:
564 sk->sk_reuse = valbool;
565 break;
566 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000567 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000568 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700569 case SO_ERROR:
570 ret = -ENOPROTOOPT;
571 break;
572 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800573 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700574 break;
575 case SO_BROADCAST:
576 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
577 break;
578 case SO_SNDBUF:
579 /* Don't error on this BSD doesn't and if you think
580 about it this is right. Otherwise apps have to
581 play 'guess the biggest size' games. RCVBUF/SNDBUF
582 are treated in BSD as hints */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900583
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700584 if (val > sysctl_wmem_max)
585 val = sysctl_wmem_max;
Patrick McHardyb0573de2005-08-09 19:30:51 -0700586set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700587 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
588 if ((val * 2) < SOCK_MIN_SNDBUF)
589 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
590 else
591 sk->sk_sndbuf = val * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700593 /*
594 * Wake up sending tasks if we
595 * upped the value.
596 */
597 sk->sk_write_space(sk);
598 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700600 case SO_SNDBUFFORCE:
601 if (!capable(CAP_NET_ADMIN)) {
602 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 break;
604 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700605 goto set_sndbuf;
606
607 case SO_RCVBUF:
608 /* Don't error on this BSD doesn't and if you think
609 about it this is right. Otherwise apps have to
610 play 'guess the biggest size' games. RCVBUF/SNDBUF
611 are treated in BSD as hints */
612
613 if (val > sysctl_rmem_max)
614 val = sysctl_rmem_max;
615set_rcvbuf:
616 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
617 /*
618 * We double it on the way in to account for
619 * "struct sk_buff" etc. overhead. Applications
620 * assume that the SO_RCVBUF setting they make will
621 * allow that much actual data to be received on that
622 * socket.
623 *
624 * Applications are unaware that "struct sk_buff" and
625 * other overheads allocate from the receive buffer
626 * during socket buffer allocation.
627 *
628 * And after considering the possible alternatives,
629 * returning the value we actually used in getsockopt
630 * is the most desirable behavior.
631 */
632 if ((val * 2) < SOCK_MIN_RCVBUF)
633 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
634 else
635 sk->sk_rcvbuf = val * 2;
636 break;
637
638 case SO_RCVBUFFORCE:
639 if (!capable(CAP_NET_ADMIN)) {
640 ret = -EPERM;
641 break;
642 }
643 goto set_rcvbuf;
644
645 case SO_KEEPALIVE:
646#ifdef CONFIG_INET
647 if (sk->sk_protocol == IPPROTO_TCP)
648 tcp_set_keepalive(sk, valbool);
649#endif
650 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
651 break;
652
653 case SO_OOBINLINE:
654 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
655 break;
656
657 case SO_NO_CHECK:
658 sk->sk_no_check = valbool;
659 break;
660
661 case SO_PRIORITY:
662 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
663 sk->sk_priority = val;
664 else
665 ret = -EPERM;
666 break;
667
668 case SO_LINGER:
669 if (optlen < sizeof(ling)) {
670 ret = -EINVAL; /* 1003.1g */
671 break;
672 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000673 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700674 ret = -EFAULT;
675 break;
676 }
677 if (!ling.l_onoff)
678 sock_reset_flag(sk, SOCK_LINGER);
679 else {
680#if (BITS_PER_LONG == 32)
681 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
682 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
683 else
684#endif
685 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
686 sock_set_flag(sk, SOCK_LINGER);
687 }
688 break;
689
690 case SO_BSDCOMPAT:
691 sock_warn_obsolete_bsdism("setsockopt");
692 break;
693
694 case SO_PASSCRED:
695 if (valbool)
696 set_bit(SOCK_PASSCRED, &sock->flags);
697 else
698 clear_bit(SOCK_PASSCRED, &sock->flags);
699 break;
700
701 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700702 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700703 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700704 if (optname == SO_TIMESTAMP)
705 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
706 else
707 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700708 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000709 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700710 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700711 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700712 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
713 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700714 break;
715
Patrick Ohly20d49472009-02-12 05:03:38 +0000716 case SO_TIMESTAMPING:
717 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000718 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000719 break;
720 }
721 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
722 val & SOF_TIMESTAMPING_TX_HARDWARE);
723 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
724 val & SOF_TIMESTAMPING_TX_SOFTWARE);
725 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
726 val & SOF_TIMESTAMPING_RX_HARDWARE);
727 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
728 sock_enable_timestamp(sk,
729 SOCK_TIMESTAMPING_RX_SOFTWARE);
730 else
731 sock_disable_timestamp(sk,
Eric Dumazet08e29af2011-11-28 12:04:18 +0000732 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
Patrick Ohly20d49472009-02-12 05:03:38 +0000733 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
734 val & SOF_TIMESTAMPING_SOFTWARE);
735 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
736 val & SOF_TIMESTAMPING_SYS_HARDWARE);
737 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
738 val & SOF_TIMESTAMPING_RAW_HARDWARE);
739 break;
740
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700741 case SO_RCVLOWAT:
742 if (val < 0)
743 val = INT_MAX;
744 sk->sk_rcvlowat = val ? : 1;
745 break;
746
747 case SO_RCVTIMEO:
748 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
749 break;
750
751 case SO_SNDTIMEO:
752 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
753 break;
754
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700755 case SO_ATTACH_FILTER:
756 ret = -EINVAL;
757 if (optlen == sizeof(struct sock_fprog)) {
758 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700760 ret = -EFAULT;
761 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700764 ret = sk_attach_filter(&fprog, sk);
765 }
766 break;
767
768 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700769 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700770 break;
771
772 case SO_PASSSEC:
773 if (valbool)
774 set_bit(SOCK_PASSSEC, &sock->flags);
775 else
776 clear_bit(SOCK_PASSSEC, &sock->flags);
777 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800778 case SO_MARK:
779 if (!capable(CAP_NET_ADMIN))
780 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000781 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800782 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800783 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700784
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 /* We implement the SO_SNDLOWAT etc to
786 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700787 case SO_RXQ_OVFL:
Johannes Berg8083f0f2011-10-07 03:30:20 +0000788 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
Neil Horman3b885782009-10-12 13:26:31 -0700789 break;
Johannes Berg6e3e9392011-11-09 10:15:42 +0100790
791 case SO_WIFI_STATUS:
792 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
793 break;
794
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700795 default:
796 ret = -ENOPROTOOPT;
797 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900798 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 release_sock(sk);
800 return ret;
801}
Eric Dumazet2a915252009-05-27 11:30:05 +0000802EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
804
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000805void cred_to_ucred(struct pid *pid, const struct cred *cred,
806 struct ucred *ucred)
807{
808 ucred->pid = pid_vnr(pid);
809 ucred->uid = ucred->gid = -1;
810 if (cred) {
811 struct user_namespace *current_ns = current_user_ns();
812
813 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
814 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
815 }
816}
David S. Miller39247732010-06-16 16:18:25 -0700817EXPORT_SYMBOL_GPL(cred_to_ucred);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000818
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819int sock_getsockopt(struct socket *sock, int level, int optname,
820 char __user *optval, int __user *optlen)
821{
822 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900823
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700824 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900825 int val;
826 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 struct timeval tm;
828 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900829
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -0800830 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900832
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700833 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900834 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700835 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900837
Eugene Teo50fee1d2009-02-23 15:38:41 -0800838 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -0800839
Eric Dumazet2a915252009-05-27 11:30:05 +0000840 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700841 case SO_DEBUG:
842 v.val = sock_flag(sk, SOCK_DBG);
843 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900844
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700845 case SO_DONTROUTE:
846 v.val = sock_flag(sk, SOCK_LOCALROUTE);
847 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900848
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700849 case SO_BROADCAST:
850 v.val = !!sock_flag(sk, SOCK_BROADCAST);
851 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700853 case SO_SNDBUF:
854 v.val = sk->sk_sndbuf;
855 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900856
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700857 case SO_RCVBUF:
858 v.val = sk->sk_rcvbuf;
859 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700861 case SO_REUSEADDR:
862 v.val = sk->sk_reuse;
863 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700865 case SO_KEEPALIVE:
866 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
867 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700869 case SO_TYPE:
870 v.val = sk->sk_type;
871 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000873 case SO_PROTOCOL:
874 v.val = sk->sk_protocol;
875 break;
876
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000877 case SO_DOMAIN:
878 v.val = sk->sk_family;
879 break;
880
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700881 case SO_ERROR:
882 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +0000883 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700884 v.val = xchg(&sk->sk_err_soft, 0);
885 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700887 case SO_OOBINLINE:
888 v.val = !!sock_flag(sk, SOCK_URGINLINE);
889 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900890
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700891 case SO_NO_CHECK:
892 v.val = sk->sk_no_check;
893 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700895 case SO_PRIORITY:
896 v.val = sk->sk_priority;
897 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900898
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700899 case SO_LINGER:
900 lv = sizeof(v.ling);
901 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
902 v.ling.l_linger = sk->sk_lingertime / HZ;
903 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900904
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700905 case SO_BSDCOMPAT:
906 sock_warn_obsolete_bsdism("getsockopt");
907 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700909 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700910 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
911 !sock_flag(sk, SOCK_RCVTSTAMPNS);
912 break;
913
914 case SO_TIMESTAMPNS:
915 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700916 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917
Patrick Ohly20d49472009-02-12 05:03:38 +0000918 case SO_TIMESTAMPING:
919 v.val = 0;
920 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
921 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
922 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
923 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
924 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
925 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
926 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
927 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
928 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
929 v.val |= SOF_TIMESTAMPING_SOFTWARE;
930 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
931 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
932 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
933 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
934 break;
935
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700936 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000937 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700938 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
939 v.tm.tv_sec = 0;
940 v.tm.tv_usec = 0;
941 } else {
942 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
943 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700945 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700947 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000948 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700949 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
950 v.tm.tv_sec = 0;
951 v.tm.tv_usec = 0;
952 } else {
953 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
954 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
955 }
956 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700958 case SO_RCVLOWAT:
959 v.val = sk->sk_rcvlowat;
960 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700961
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700962 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +0000963 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700964 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700966 case SO_PASSCRED:
967 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
968 break;
969
970 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000971 {
972 struct ucred peercred;
973 if (len > sizeof(peercred))
974 len = sizeof(peercred);
975 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
976 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700977 return -EFAULT;
978 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000979 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700980
981 case SO_PEERNAME:
982 {
983 char address[128];
984
985 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
986 return -ENOTCONN;
987 if (lv < len)
988 return -EINVAL;
989 if (copy_to_user(optval, address, len))
990 return -EFAULT;
991 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700993
994 /* Dubious BSD thing... Probably nobody even uses it, but
995 * the UNIX standard wants it for whatever reason... -DaveM
996 */
997 case SO_ACCEPTCONN:
998 v.val = sk->sk_state == TCP_LISTEN;
999 break;
1000
1001 case SO_PASSSEC:
1002 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
1003 break;
1004
1005 case SO_PEERSEC:
1006 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1007
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001008 case SO_MARK:
1009 v.val = sk->sk_mark;
1010 break;
1011
Neil Horman3b885782009-10-12 13:26:31 -07001012 case SO_RXQ_OVFL:
1013 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
1014 break;
1015
Johannes Berg6e3e9392011-11-09 10:15:42 +01001016 case SO_WIFI_STATUS:
1017 v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
1018 break;
1019
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001020 default:
1021 return -ENOPROTOOPT;
1022 }
1023
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 if (len > lv)
1025 len = lv;
1026 if (copy_to_user(optval, &v, len))
1027 return -EFAULT;
1028lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001029 if (put_user(len, optlen))
1030 return -EFAULT;
1031 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032}
1033
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001034/*
1035 * Initialize an sk_lock.
1036 *
1037 * (We also register the sk_lock with the lock validator.)
1038 */
Dave Jonesb6f99a22007-03-22 12:27:49 -07001039static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001040{
Peter Zijlstraed075362006-12-06 20:35:24 -08001041 sock_lock_init_class_and_name(sk,
1042 af_family_slock_key_strings[sk->sk_family],
1043 af_family_slock_keys + sk->sk_family,
1044 af_family_key_strings[sk->sk_family],
1045 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001046}
1047
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001048/*
1049 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1050 * even temporarly, because of RCU lookups. sk_node should also be left as is.
Eric Dumazet68835ab2010-11-30 19:04:07 +00001051 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001052 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001053static void sock_copy(struct sock *nsk, const struct sock *osk)
1054{
1055#ifdef CONFIG_SECURITY_NETWORK
1056 void *sptr = nsk->sk_security;
1057#endif
Eric Dumazet68835ab2010-11-30 19:04:07 +00001058 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1059
1060 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1061 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1062
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001063#ifdef CONFIG_SECURITY_NETWORK
1064 nsk->sk_security = sptr;
1065 security_sk_clone(osk, nsk);
1066#endif
1067}
1068
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001069/*
1070 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1071 * un-modified. Special care is taken when initializing object to zero.
1072 */
1073static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1074{
1075 if (offsetof(struct sock, sk_node.next) != 0)
1076 memset(sk, 0, offsetof(struct sock, sk_node.next));
1077 memset(&sk->sk_node.pprev, 0,
1078 size - offsetof(struct sock, sk_node.pprev));
1079}
1080
1081void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1082{
1083 unsigned long nulls1, nulls2;
1084
1085 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1086 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1087 if (nulls1 > nulls2)
1088 swap(nulls1, nulls2);
1089
1090 if (nulls1 != 0)
1091 memset((char *)sk, 0, nulls1);
1092 memset((char *)sk + nulls1 + sizeof(void *), 0,
1093 nulls2 - nulls1 - sizeof(void *));
1094 memset((char *)sk + nulls2 + sizeof(void *), 0,
1095 size - nulls2 - sizeof(void *));
1096}
1097EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1098
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001099static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1100 int family)
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001101{
1102 struct sock *sk;
1103 struct kmem_cache *slab;
1104
1105 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001106 if (slab != NULL) {
1107 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1108 if (!sk)
1109 return sk;
1110 if (priority & __GFP_ZERO) {
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001111 if (prot->clear_sk)
1112 prot->clear_sk(sk, prot->obj_size);
1113 else
1114 sk_prot_clear_nulls(sk, prot->obj_size);
Eric Dumazete912b112009-07-08 19:36:05 +00001115 }
Octavian Purdilafcbdf092010-12-16 14:26:56 -08001116 } else
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001117 sk = kmalloc(prot->obj_size, priority);
1118
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001119 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001120 kmemcheck_annotate_bitfield(sk, flags);
1121
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001122 if (security_sk_alloc(sk, family, priority))
1123 goto out_free;
1124
1125 if (!try_module_get(prot->owner))
1126 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001127 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001128 }
1129
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001130 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001131
1132out_free_sec:
1133 security_sk_free(sk);
1134out_free:
1135 if (slab != NULL)
1136 kmem_cache_free(slab, sk);
1137 else
1138 kfree(sk);
1139 return NULL;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001140}
1141
1142static void sk_prot_free(struct proto *prot, struct sock *sk)
1143{
1144 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001145 struct module *owner;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001146
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001147 owner = prot->owner;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001148 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001149
1150 security_sk_free(sk);
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001151 if (slab != NULL)
1152 kmem_cache_free(slab, sk);
1153 else
1154 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001155 module_put(owner);
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001156}
1157
Herbert Xuf8451722010-05-24 00:12:34 -07001158#ifdef CONFIG_CGROUPS
1159void sock_update_classid(struct sock *sk)
1160{
Paul E. McKenney11441822010-10-06 17:15:35 -07001161 u32 classid;
Herbert Xuf8451722010-05-24 00:12:34 -07001162
Paul E. McKenney11441822010-10-06 17:15:35 -07001163 rcu_read_lock(); /* doing current task, which cannot vanish. */
1164 classid = task_cls_classid(current);
1165 rcu_read_unlock();
Herbert Xuf8451722010-05-24 00:12:34 -07001166 if (classid && classid != sk->sk_classid)
1167 sk->sk_classid = classid;
1168}
Herbert Xu82862742010-05-24 00:14:10 -07001169EXPORT_SYMBOL(sock_update_classid);
Neil Horman5bc14212011-11-22 05:10:51 +00001170
1171void sock_update_netprioidx(struct sock *sk)
1172{
1173 struct cgroup_netprio_state *state;
1174 if (in_interrupt())
1175 return;
1176 rcu_read_lock();
1177 state = task_netprio_state(current);
1178 sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
1179 rcu_read_unlock();
1180}
1181EXPORT_SYMBOL_GPL(sock_update_netprioidx);
Herbert Xuf8451722010-05-24 00:12:34 -07001182#endif
1183
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184/**
1185 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001186 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001187 * @family: protocol family
1188 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1189 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001191struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001192 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193{
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001194 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001196 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001198 sk->sk_family = family;
1199 /*
1200 * See comment in struct sock definition to understand
1201 * why we need sk_prot_creator -acme
1202 */
1203 sk->sk_prot = sk->sk_prot_creator = prot;
1204 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001205 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001206 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001207
1208 sock_update_classid(sk);
Neil Horman5bc14212011-11-22 05:10:51 +00001209 sock_update_netprioidx(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 }
Frank Filza79af592005-09-27 15:23:38 -07001211
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001212 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213}
Eric Dumazet2a915252009-05-27 11:30:05 +00001214EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
Eric Dumazet2b85a342009-06-11 02:55:43 -07001216static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217{
1218 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
1220 if (sk->sk_destruct)
1221 sk->sk_destruct(sk);
1222
Paul E. McKenneya898def2010-02-22 17:04:49 -08001223 filter = rcu_dereference_check(sk->sk_filter,
1224 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001226 sk_filter_uncharge(sk, filter);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001227 RCU_INIT_POINTER(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 }
1229
Eric Dumazet08e29af2011-11-28 12:04:18 +00001230 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
1232 if (atomic_read(&sk->sk_omem_alloc))
1233 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
Harvey Harrison0dc47872008-03-05 20:47:47 -08001234 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001236 if (sk->sk_peer_cred)
1237 put_cred(sk->sk_peer_cred);
1238 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001239 put_net(sock_net(sk));
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -07001240 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001242
1243void sk_free(struct sock *sk)
1244{
1245 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001246 * We subtract one from sk_wmem_alloc and can know if
Eric Dumazet2b85a342009-06-11 02:55:43 -07001247 * some packets are still in some tx queue.
1248 * If not null, sock_wfree() will call __sk_free(sk) later
1249 */
1250 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1251 __sk_free(sk);
1252}
Eric Dumazet2a915252009-05-27 11:30:05 +00001253EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Denis V. Lunevedf02082008-02-29 11:18:32 -08001255/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001256 * Last sock_put should drop reference to sk->sk_net. It has already
1257 * been dropped in sk_change_net. Taking reference to stopping namespace
Denis V. Lunevedf02082008-02-29 11:18:32 -08001258 * is not an option.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001259 * Take reference to a socket to remove it from hash _alive_ and after that
Denis V. Lunevedf02082008-02-29 11:18:32 -08001260 * destroy it in the context of init_net.
1261 */
1262void sk_release_kernel(struct sock *sk)
1263{
1264 if (sk == NULL || sk->sk_socket == NULL)
1265 return;
1266
1267 sock_hold(sk);
1268 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001269 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001270 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001271 sock_put(sk);
1272}
David S. Miller45af1752008-02-29 11:33:19 -08001273EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001274
Eric Dumazete56c57d2011-11-08 17:07:07 -05001275/**
1276 * sk_clone_lock - clone a socket, and lock its clone
1277 * @sk: the socket to clone
1278 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1279 *
1280 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1281 */
1282struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001283{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001284 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001285
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001286 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001287 if (newsk != NULL) {
1288 struct sk_filter *filter;
1289
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001290 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001291
1292 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001293 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001294 sk_node_init(&newsk->sk_node);
1295 sock_lock_init(newsk);
1296 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001297 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001298 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001299
1300 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001301 /*
1302 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1303 */
1304 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001305 atomic_set(&newsk->sk_omem_alloc, 0);
1306 skb_queue_head_init(&newsk->sk_receive_queue);
1307 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001308#ifdef CONFIG_NET_DMA
1309 skb_queue_head_init(&newsk->sk_async_wait_queue);
1310#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001311
Eric Dumazetb6c67122010-04-08 23:03:29 +00001312 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001313 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001314 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1315 af_callback_keys + newsk->sk_family,
1316 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001317
1318 newsk->sk_dst_cache = NULL;
1319 newsk->sk_wmem_queued = 0;
1320 newsk->sk_forward_alloc = 0;
1321 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001322 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1323
1324 sock_reset_flag(newsk, SOCK_DONE);
1325 skb_queue_head_init(&newsk->sk_error_queue);
1326
Eric Dumazet0d7da9d2010-10-25 03:47:05 +00001327 filter = rcu_dereference_protected(newsk->sk_filter, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001328 if (filter != NULL)
1329 sk_filter_charge(newsk, filter);
1330
1331 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1332 /* It is still raw copy of parent, so invalidate
1333 * destructor and make plain sk_free() */
1334 newsk->sk_destruct = NULL;
Thomas Gleixnerb0691c82011-10-25 02:30:50 +00001335 bh_unlock_sock(newsk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001336 sk_free(newsk);
1337 newsk = NULL;
1338 goto out;
1339 }
1340
1341 newsk->sk_err = 0;
1342 newsk->sk_priority = 0;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001343 /*
1344 * Before updating sk_refcnt, we must commit prior changes to memory
1345 * (Documentation/RCU/rculist_nulls.txt for details)
1346 */
1347 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001348 atomic_set(&newsk->sk_refcnt, 2);
1349
1350 /*
1351 * Increment the counter in the same struct proto as the master
1352 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1353 * is the same as sk->sk_prot->socks, as this field was copied
1354 * with memcpy).
1355 *
1356 * This _changes_ the previous behaviour, where
1357 * tcp_create_openreq_child always was incrementing the
1358 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1359 * to be taken into account in all callers. -acme
1360 */
1361 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001362 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001363 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001364
Glauber Costaf3f511e2012-01-05 20:16:39 +00001365 sk_update_clone(sk, newsk);
1366
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001367 if (newsk->sk_prot->sockets_allocated)
Glauber Costa180d8cd2011-12-11 21:47:02 +00001368 sk_sockets_allocated_inc(newsk);
Octavian Purdila704da5602010-01-08 00:00:09 -08001369
Eric Dumazet08e29af2011-11-28 12:04:18 +00001370 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
Octavian Purdila704da5602010-01-08 00:00:09 -08001371 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001372 }
1373out:
1374 return newsk;
1375}
Eric Dumazete56c57d2011-11-08 17:07:07 -05001376EXPORT_SYMBOL_GPL(sk_clone_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001377
Andi Kleen99580892007-04-20 17:12:43 -07001378void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1379{
1380 __sk_dst_set(sk, dst);
1381 sk->sk_route_caps = dst->dev->features;
1382 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001383 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001384 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001385 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001386 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001387 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001388 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001389 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001390 sk->sk_gso_max_size = dst->dev->gso_max_size;
1391 }
Andi Kleen99580892007-04-20 17:12:43 -07001392 }
1393}
1394EXPORT_SYMBOL_GPL(sk_setup_caps);
1395
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396void __init sk_init(void)
1397{
Jan Beulich44813742009-09-21 17:03:05 -07001398 if (totalram_pages <= 4096) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 sysctl_wmem_max = 32767;
1400 sysctl_rmem_max = 32767;
1401 sysctl_wmem_default = 32767;
1402 sysctl_rmem_default = 32767;
Jan Beulich44813742009-09-21 17:03:05 -07001403 } else if (totalram_pages >= 131072) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 sysctl_wmem_max = 131071;
1405 sysctl_rmem_max = 131071;
1406 }
1407}
1408
1409/*
1410 * Simple resource managers for sockets.
1411 */
1412
1413
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001414/*
1415 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 */
1417void sock_wfree(struct sk_buff *skb)
1418{
1419 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001420 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
Eric Dumazetd99927f2009-09-24 10:49:24 +00001422 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1423 /*
1424 * Keep a reference on sk_wmem_alloc, this will be released
1425 * after sk_write_space() call
1426 */
1427 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001429 len = 1;
1430 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001431 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001432 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1433 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001434 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001435 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001436 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437}
Eric Dumazet2a915252009-05-27 11:30:05 +00001438EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001440/*
1441 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 */
1443void sock_rfree(struct sk_buff *skb)
1444{
1445 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001446 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
Eric Dumazetd361fd52010-07-10 22:45:17 +00001448 atomic_sub(len, &sk->sk_rmem_alloc);
1449 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450}
Eric Dumazet2a915252009-05-27 11:30:05 +00001451EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
1453
1454int sock_i_uid(struct sock *sk)
1455{
1456 int uid;
1457
Eric Dumazetf064af12010-09-22 12:43:39 +00001458 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001460 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 return uid;
1462}
Eric Dumazet2a915252009-05-27 11:30:05 +00001463EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464
1465unsigned long sock_i_ino(struct sock *sk)
1466{
1467 unsigned long ino;
1468
Eric Dumazetf064af12010-09-22 12:43:39 +00001469 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001471 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 return ino;
1473}
Eric Dumazet2a915252009-05-27 11:30:05 +00001474EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
1476/*
1477 * Allocate a skb from the socket's send buffer.
1478 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001479struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001480 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481{
1482 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001483 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 if (skb) {
1485 skb_set_owner_w(skb, sk);
1486 return skb;
1487 }
1488 }
1489 return NULL;
1490}
Eric Dumazet2a915252009-05-27 11:30:05 +00001491EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492
1493/*
1494 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001495 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001496struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001497 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498{
1499 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1500 struct sk_buff *skb = alloc_skb(size, priority);
1501 if (skb) {
1502 skb_set_owner_r(skb, sk);
1503 return skb;
1504 }
1505 }
1506 return NULL;
1507}
1508
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001509/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001511 */
Al Virodd0fc662005-10-07 07:46:04 +01001512void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513{
1514 if ((unsigned)size <= sysctl_optmem_max &&
1515 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1516 void *mem;
1517 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001518 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 */
1520 atomic_add(size, &sk->sk_omem_alloc);
1521 mem = kmalloc(size, priority);
1522 if (mem)
1523 return mem;
1524 atomic_sub(size, &sk->sk_omem_alloc);
1525 }
1526 return NULL;
1527}
Eric Dumazet2a915252009-05-27 11:30:05 +00001528EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529
1530/*
1531 * Free an option memory block.
1532 */
1533void sock_kfree_s(struct sock *sk, void *mem, int size)
1534{
1535 kfree(mem);
1536 atomic_sub(size, &sk->sk_omem_alloc);
1537}
Eric Dumazet2a915252009-05-27 11:30:05 +00001538EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539
1540/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1541 I think, these locks should be removed for datagram sockets.
1542 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001543static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544{
1545 DEFINE_WAIT(wait);
1546
1547 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1548 for (;;) {
1549 if (!timeo)
1550 break;
1551 if (signal_pending(current))
1552 break;
1553 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001554 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1556 break;
1557 if (sk->sk_shutdown & SEND_SHUTDOWN)
1558 break;
1559 if (sk->sk_err)
1560 break;
1561 timeo = schedule_timeout(timeo);
1562 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001563 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 return timeo;
1565}
1566
1567
1568/*
1569 * Generic send/receive buffer handlers
1570 */
1571
Herbert Xu4cc7f682009-02-04 16:55:54 -08001572struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1573 unsigned long data_len, int noblock,
1574 int *errcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575{
1576 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001577 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 long timeo;
1579 int err;
1580
1581 gfp_mask = sk->sk_allocation;
1582 if (gfp_mask & __GFP_WAIT)
1583 gfp_mask |= __GFP_REPEAT;
1584
1585 timeo = sock_sndtimeo(sk, noblock);
1586 while (1) {
1587 err = sock_error(sk);
1588 if (err != 0)
1589 goto failure;
1590
1591 err = -EPIPE;
1592 if (sk->sk_shutdown & SEND_SHUTDOWN)
1593 goto failure;
1594
1595 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001596 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 if (skb) {
1598 int npages;
1599 int i;
1600
1601 /* No pages, we're done... */
1602 if (!data_len)
1603 break;
1604
1605 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1606 skb->truesize += data_len;
1607 skb_shinfo(skb)->nr_frags = npages;
1608 for (i = 0; i < npages; i++) {
1609 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
1611 page = alloc_pages(sk->sk_allocation, 0);
1612 if (!page) {
1613 err = -ENOBUFS;
1614 skb_shinfo(skb)->nr_frags = i;
1615 kfree_skb(skb);
1616 goto failure;
1617 }
1618
Ian Campbellea2ab692011-08-22 23:44:58 +00001619 __skb_fill_page_desc(skb, i,
1620 page, 0,
1621 (data_len >= PAGE_SIZE ?
1622 PAGE_SIZE :
1623 data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 data_len -= PAGE_SIZE;
1625 }
1626
1627 /* Full success... */
1628 break;
1629 }
1630 err = -ENOBUFS;
1631 goto failure;
1632 }
1633 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1634 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1635 err = -EAGAIN;
1636 if (!timeo)
1637 goto failure;
1638 if (signal_pending(current))
1639 goto interrupted;
1640 timeo = sock_wait_for_wmem(sk, timeo);
1641 }
1642
1643 skb_set_owner_w(skb, sk);
1644 return skb;
1645
1646interrupted:
1647 err = sock_intr_errno(timeo);
1648failure:
1649 *errcode = err;
1650 return NULL;
1651}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001652EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001654struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 int noblock, int *errcode)
1656{
1657 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1658}
Eric Dumazet2a915252009-05-27 11:30:05 +00001659EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
1661static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001662 __releases(&sk->sk_lock.slock)
1663 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664{
1665 DEFINE_WAIT(wait);
1666
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001667 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1669 TASK_UNINTERRUPTIBLE);
1670 spin_unlock_bh(&sk->sk_lock.slock);
1671 schedule();
1672 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001673 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 break;
1675 }
1676 finish_wait(&sk->sk_lock.wq, &wait);
1677}
1678
1679static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001680 __releases(&sk->sk_lock.slock)
1681 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682{
1683 struct sk_buff *skb = sk->sk_backlog.head;
1684
1685 do {
1686 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1687 bh_unlock_sock(sk);
1688
1689 do {
1690 struct sk_buff *next = skb->next;
1691
Eric Dumazet7fee2262010-05-11 23:19:48 +00001692 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001694 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
1696 /*
1697 * We are in process context here with softirqs
1698 * disabled, use cond_resched_softirq() to preempt.
1699 * This is safe to do because we've taken the backlog
1700 * queue private:
1701 */
1702 cond_resched_softirq();
1703
1704 skb = next;
1705 } while (skb != NULL);
1706
1707 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001708 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001709
1710 /*
1711 * Doing the zeroing here guarantee we can not loop forever
1712 * while a wild producer attempts to flood us.
1713 */
1714 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715}
1716
1717/**
1718 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001719 * @sk: sock to wait on
1720 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 *
1722 * Now socket state including sk->sk_err is changed only under lock,
1723 * hence we may omit checks after joining wait queue.
1724 * We check receive queue before schedule() only as optimization;
1725 * it is very likely that release_sock() added new data.
1726 */
1727int sk_wait_data(struct sock *sk, long *timeo)
1728{
1729 int rc;
1730 DEFINE_WAIT(wait);
1731
Eric Dumazetaa395142010-04-20 13:03:51 +00001732 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1734 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1735 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001736 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 return rc;
1738}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739EXPORT_SYMBOL(sk_wait_data);
1740
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001741/**
1742 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1743 * @sk: socket
1744 * @size: memory size to allocate
1745 * @kind: allocation type
1746 *
1747 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1748 * rmem allocation. This function assumes that protocols which have
1749 * memory_pressure use sk_wmem_queued as write buffer accounting.
1750 */
1751int __sk_mem_schedule(struct sock *sk, int size, int kind)
1752{
1753 struct proto *prot = sk->sk_prot;
1754 int amt = sk_mem_pages(size);
Eric Dumazet8d987e52010-11-09 23:24:26 +00001755 long allocated;
Glauber Costae1aab162011-12-11 21:47:03 +00001756 int parent_status = UNDER_LIMIT;
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001757
1758 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001759
Glauber Costae1aab162011-12-11 21:47:03 +00001760 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001761
1762 /* Under limit. */
Glauber Costae1aab162011-12-11 21:47:03 +00001763 if (parent_status == UNDER_LIMIT &&
1764 allocated <= sk_prot_mem_limits(sk, 0)) {
Glauber Costa180d8cd2011-12-11 21:47:02 +00001765 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001766 return 1;
1767 }
1768
Glauber Costae1aab162011-12-11 21:47:03 +00001769 /* Under pressure. (we or our parents) */
1770 if ((parent_status > SOFT_LIMIT) ||
1771 allocated > sk_prot_mem_limits(sk, 1))
Glauber Costa180d8cd2011-12-11 21:47:02 +00001772 sk_enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001773
Glauber Costae1aab162011-12-11 21:47:03 +00001774 /* Over hard limit (we or our parents) */
1775 if ((parent_status == OVER_LIMIT) ||
1776 (allocated > sk_prot_mem_limits(sk, 2)))
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001777 goto suppress_allocation;
1778
1779 /* guarantee minimum buffer size under pressure */
1780 if (kind == SK_MEM_RECV) {
1781 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1782 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001783
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001784 } else { /* SK_MEM_SEND */
1785 if (sk->sk_type == SOCK_STREAM) {
1786 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1787 return 1;
1788 } else if (atomic_read(&sk->sk_wmem_alloc) <
1789 prot->sysctl_wmem[0])
1790 return 1;
1791 }
1792
Glauber Costa180d8cd2011-12-11 21:47:02 +00001793 if (sk_has_memory_pressure(sk)) {
Eric Dumazet17483762008-11-25 21:16:35 -08001794 int alloc;
1795
Glauber Costa180d8cd2011-12-11 21:47:02 +00001796 if (!sk_under_memory_pressure(sk))
Eric Dumazet17483762008-11-25 21:16:35 -08001797 return 1;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001798 alloc = sk_sockets_allocated_read_positive(sk);
1799 if (sk_prot_mem_limits(sk, 2) > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001800 sk_mem_pages(sk->sk_wmem_queued +
1801 atomic_read(&sk->sk_rmem_alloc) +
1802 sk->sk_forward_alloc))
1803 return 1;
1804 }
1805
1806suppress_allocation:
1807
1808 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1809 sk_stream_moderate_sndbuf(sk);
1810
1811 /* Fail only if socket is _under_ its sndbuf.
1812 * In this case we cannot block, so that we have to fail.
1813 */
1814 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1815 return 1;
1816 }
1817
Satoru Moriya3847ce32011-06-17 12:00:03 +00001818 trace_sock_exceed_buf_limit(sk, prot, allocated);
1819
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001820 /* Alas. Undo changes. */
1821 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
Glauber Costa180d8cd2011-12-11 21:47:02 +00001822
Glauber Costae1aab162011-12-11 21:47:03 +00001823 sk_memory_allocated_sub(sk, amt, parent_status);
Glauber Costa180d8cd2011-12-11 21:47:02 +00001824
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001825 return 0;
1826}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001827EXPORT_SYMBOL(__sk_mem_schedule);
1828
1829/**
1830 * __sk_reclaim - reclaim memory_allocated
1831 * @sk: socket
1832 */
1833void __sk_mem_reclaim(struct sock *sk)
1834{
Glauber Costa180d8cd2011-12-11 21:47:02 +00001835 sk_memory_allocated_sub(sk,
Glauber Costae1aab162011-12-11 21:47:03 +00001836 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001837 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1838
Glauber Costa180d8cd2011-12-11 21:47:02 +00001839 if (sk_under_memory_pressure(sk) &&
1840 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
1841 sk_leave_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001842}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001843EXPORT_SYMBOL(__sk_mem_reclaim);
1844
1845
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846/*
1847 * Set of default routines for initialising struct proto_ops when
1848 * the protocol does not support a particular function. In certain
1849 * cases where it makes no sense for a protocol to have a "do nothing"
1850 * function, some default processing is provided.
1851 */
1852
1853int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1854{
1855 return -EOPNOTSUPP;
1856}
Eric Dumazet2a915252009-05-27 11:30:05 +00001857EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001859int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 int len, int flags)
1861{
1862 return -EOPNOTSUPP;
1863}
Eric Dumazet2a915252009-05-27 11:30:05 +00001864EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865
1866int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1867{
1868 return -EOPNOTSUPP;
1869}
Eric Dumazet2a915252009-05-27 11:30:05 +00001870EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871
1872int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1873{
1874 return -EOPNOTSUPP;
1875}
Eric Dumazet2a915252009-05-27 11:30:05 +00001876EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001878int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 int *len, int peer)
1880{
1881 return -EOPNOTSUPP;
1882}
Eric Dumazet2a915252009-05-27 11:30:05 +00001883EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
Eric Dumazet2a915252009-05-27 11:30:05 +00001885unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886{
1887 return 0;
1888}
Eric Dumazet2a915252009-05-27 11:30:05 +00001889EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890
1891int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1892{
1893 return -EOPNOTSUPP;
1894}
Eric Dumazet2a915252009-05-27 11:30:05 +00001895EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
1897int sock_no_listen(struct socket *sock, int backlog)
1898{
1899 return -EOPNOTSUPP;
1900}
Eric Dumazet2a915252009-05-27 11:30:05 +00001901EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902
1903int sock_no_shutdown(struct socket *sock, int how)
1904{
1905 return -EOPNOTSUPP;
1906}
Eric Dumazet2a915252009-05-27 11:30:05 +00001907EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
1909int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07001910 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911{
1912 return -EOPNOTSUPP;
1913}
Eric Dumazet2a915252009-05-27 11:30:05 +00001914EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
1916int sock_no_getsockopt(struct socket *sock, int level, int optname,
1917 char __user *optval, int __user *optlen)
1918{
1919 return -EOPNOTSUPP;
1920}
Eric Dumazet2a915252009-05-27 11:30:05 +00001921EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922
1923int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1924 size_t len)
1925{
1926 return -EOPNOTSUPP;
1927}
Eric Dumazet2a915252009-05-27 11:30:05 +00001928EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
1930int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1931 size_t len, int flags)
1932{
1933 return -EOPNOTSUPP;
1934}
Eric Dumazet2a915252009-05-27 11:30:05 +00001935EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
1937int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1938{
1939 /* Mirror missing mmap method error code */
1940 return -ENODEV;
1941}
Eric Dumazet2a915252009-05-27 11:30:05 +00001942EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943
1944ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1945{
1946 ssize_t res;
1947 struct msghdr msg = {.msg_flags = flags};
1948 struct kvec iov;
1949 char *kaddr = kmap(page);
1950 iov.iov_base = kaddr + offset;
1951 iov.iov_len = size;
1952 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1953 kunmap(page);
1954 return res;
1955}
Eric Dumazet2a915252009-05-27 11:30:05 +00001956EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957
1958/*
1959 * Default Socket Callbacks
1960 */
1961
1962static void sock_def_wakeup(struct sock *sk)
1963{
Eric Dumazet43815482010-04-29 11:01:49 +00001964 struct socket_wq *wq;
1965
1966 rcu_read_lock();
1967 wq = rcu_dereference(sk->sk_wq);
1968 if (wq_has_sleeper(wq))
1969 wake_up_interruptible_all(&wq->wait);
1970 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971}
1972
1973static void sock_def_error_report(struct sock *sk)
1974{
Eric Dumazet43815482010-04-29 11:01:49 +00001975 struct socket_wq *wq;
1976
1977 rcu_read_lock();
1978 wq = rcu_dereference(sk->sk_wq);
1979 if (wq_has_sleeper(wq))
1980 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001981 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00001982 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983}
1984
1985static void sock_def_readable(struct sock *sk, int len)
1986{
Eric Dumazet43815482010-04-29 11:01:49 +00001987 struct socket_wq *wq;
1988
1989 rcu_read_lock();
1990 wq = rcu_dereference(sk->sk_wq);
1991 if (wq_has_sleeper(wq))
Eric Dumazet2c6607c2011-01-06 10:54:29 -08001992 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
Davide Libenzi37e55402009-03-31 15:24:21 -07001993 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001994 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00001995 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996}
1997
1998static void sock_def_write_space(struct sock *sk)
1999{
Eric Dumazet43815482010-04-29 11:01:49 +00002000 struct socket_wq *wq;
2001
2002 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
2004 /* Do not wake up a writer until he can make "significant"
2005 * progress. --DaveM
2006 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002007 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00002008 wq = rcu_dereference(sk->sk_wq);
2009 if (wq_has_sleeper(wq))
2010 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07002011 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012
2013 /* Should agree with poll, otherwise some programs break */
2014 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002015 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 }
2017
Eric Dumazet43815482010-04-29 11:01:49 +00002018 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019}
2020
2021static void sock_def_destruct(struct sock *sk)
2022{
Jesper Juhla51482b2005-11-08 09:41:34 -08002023 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024}
2025
2026void sk_send_sigurg(struct sock *sk)
2027{
2028 if (sk->sk_socket && sk->sk_socket->file)
2029 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08002030 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031}
Eric Dumazet2a915252009-05-27 11:30:05 +00002032EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033
2034void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2035 unsigned long expires)
2036{
2037 if (!mod_timer(timer, expires))
2038 sock_hold(sk);
2039}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040EXPORT_SYMBOL(sk_reset_timer);
2041
2042void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2043{
2044 if (timer_pending(timer) && del_timer(timer))
2045 __sock_put(sk);
2046}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047EXPORT_SYMBOL(sk_stop_timer);
2048
2049void sock_init_data(struct socket *sock, struct sock *sk)
2050{
2051 skb_queue_head_init(&sk->sk_receive_queue);
2052 skb_queue_head_init(&sk->sk_write_queue);
2053 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07002054#ifdef CONFIG_NET_DMA
2055 skb_queue_head_init(&sk->sk_async_wait_queue);
2056#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
2058 sk->sk_send_head = NULL;
2059
2060 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 sk->sk_allocation = GFP_KERNEL;
2063 sk->sk_rcvbuf = sysctl_rmem_default;
2064 sk->sk_sndbuf = sysctl_wmem_default;
2065 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07002066 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
2068 sock_set_flag(sk, SOCK_ZAPPED);
2069
Stephen Hemmingere71a4782007-04-10 20:10:33 -07002070 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00002072 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 sock->sk = sk;
2074 } else
Eric Dumazet43815482010-04-29 11:01:49 +00002075 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076
Eric Dumazetb6c67122010-04-08 23:03:29 +00002077 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07002079 lockdep_set_class_and_name(&sk->sk_callback_lock,
2080 af_callback_keys + sk->sk_family,
2081 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
2083 sk->sk_state_change = sock_def_wakeup;
2084 sk->sk_data_ready = sock_def_readable;
2085 sk->sk_write_space = sock_def_write_space;
2086 sk->sk_error_report = sock_def_error_report;
2087 sk->sk_destruct = sock_def_destruct;
2088
2089 sk->sk_sndmsg_page = NULL;
2090 sk->sk_sndmsg_off = 0;
2091
Eric W. Biederman109f6e32010-06-13 03:30:14 +00002092 sk->sk_peer_pid = NULL;
2093 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 sk->sk_write_pending = 0;
2095 sk->sk_rcvlowat = 1;
2096 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2097 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2098
Eric Dumazetf37f0af2008-04-13 21:39:26 -07002099 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00002101 /*
2102 * Before updating sk_refcnt, we must commit prior changes to memory
2103 * (Documentation/RCU/rculist_nulls.txt for details)
2104 */
2105 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08002107 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108}
Eric Dumazet2a915252009-05-27 11:30:05 +00002109EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002111void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112{
2113 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002114 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002115 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002117 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002118 spin_unlock(&sk->sk_lock.slock);
2119 /*
2120 * The sk_lock has mutex_lock() semantics here:
2121 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002122 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002123 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002125EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002127void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002129 /*
2130 * The sk_lock has mutex_unlock() semantics:
2131 */
2132 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2133
2134 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 if (sk->sk_backlog.tail)
2136 __release_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002137 sk->sk_lock.owned = 0;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002138 if (waitqueue_active(&sk->sk_lock.wq))
2139 wake_up(&sk->sk_lock.wq);
2140 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141}
2142EXPORT_SYMBOL(release_sock);
2143
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002144/**
2145 * lock_sock_fast - fast version of lock_sock
2146 * @sk: socket
2147 *
2148 * This version should be used for very small section, where process wont block
2149 * return false if fast path is taken
2150 * sk_lock.slock locked, owned = 0, BH disabled
2151 * return true if slow path is taken
2152 * sk_lock.slock unlocked, owned = 1, BH enabled
2153 */
2154bool lock_sock_fast(struct sock *sk)
2155{
2156 might_sleep();
2157 spin_lock_bh(&sk->sk_lock.slock);
2158
2159 if (!sk->sk_lock.owned)
2160 /*
2161 * Note : We must disable BH
2162 */
2163 return false;
2164
2165 __lock_sock(sk);
2166 sk->sk_lock.owned = 1;
2167 spin_unlock(&sk->sk_lock.slock);
2168 /*
2169 * The sk_lock has mutex_lock() semantics here:
2170 */
2171 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2172 local_bh_enable();
2173 return true;
2174}
2175EXPORT_SYMBOL(lock_sock_fast);
2176
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002178{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002179 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002181 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002182 tv = ktime_to_timeval(sk->sk_stamp);
2183 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002185 if (tv.tv_sec == 0) {
2186 sk->sk_stamp = ktime_get_real();
2187 tv = ktime_to_timeval(sk->sk_stamp);
2188 }
2189 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002190}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191EXPORT_SYMBOL(sock_get_timestamp);
2192
Eric Dumazetae40eb12007-03-18 17:33:16 -07002193int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2194{
2195 struct timespec ts;
2196 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002197 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002198 ts = ktime_to_timespec(sk->sk_stamp);
2199 if (ts.tv_sec == -1)
2200 return -ENOENT;
2201 if (ts.tv_sec == 0) {
2202 sk->sk_stamp = ktime_get_real();
2203 ts = ktime_to_timespec(sk->sk_stamp);
2204 }
2205 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2206}
2207EXPORT_SYMBOL(sock_get_timestampns);
2208
Patrick Ohly20d49472009-02-12 05:03:38 +00002209void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002210{
Patrick Ohly20d49472009-02-12 05:03:38 +00002211 if (!sock_flag(sk, flag)) {
Eric Dumazet08e29af2011-11-28 12:04:18 +00002212 unsigned long previous_flags = sk->sk_flags;
2213
Patrick Ohly20d49472009-02-12 05:03:38 +00002214 sock_set_flag(sk, flag);
2215 /*
2216 * we just set one of the two flags which require net
2217 * time stamping, but time stamping might have been on
2218 * already because of the other one
2219 */
Eric Dumazet08e29af2011-11-28 12:04:18 +00002220 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002221 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 }
2223}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
2225/*
2226 * Get a socket option on an socket.
2227 *
2228 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2229 * asynchronous errors should be reported by getsockopt. We assume
2230 * this means if you specify SO_ERROR (otherwise whats the point of it).
2231 */
2232int sock_common_getsockopt(struct socket *sock, int level, int optname,
2233 char __user *optval, int __user *optlen)
2234{
2235 struct sock *sk = sock->sk;
2236
2237 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2238}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239EXPORT_SYMBOL(sock_common_getsockopt);
2240
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002241#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002242int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2243 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002244{
2245 struct sock *sk = sock->sk;
2246
Johannes Berg1e51f952007-03-06 13:44:06 -08002247 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002248 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2249 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002250 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2251}
2252EXPORT_SYMBOL(compat_sock_common_getsockopt);
2253#endif
2254
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2256 struct msghdr *msg, size_t size, int flags)
2257{
2258 struct sock *sk = sock->sk;
2259 int addr_len = 0;
2260 int err;
2261
2262 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2263 flags & ~MSG_DONTWAIT, &addr_len);
2264 if (err >= 0)
2265 msg->msg_namelen = addr_len;
2266 return err;
2267}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268EXPORT_SYMBOL(sock_common_recvmsg);
2269
2270/*
2271 * Set socket options on an inet socket.
2272 */
2273int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002274 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275{
2276 struct sock *sk = sock->sk;
2277
2278 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2279}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280EXPORT_SYMBOL(sock_common_setsockopt);
2281
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002282#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002283int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002284 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002285{
2286 struct sock *sk = sock->sk;
2287
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002288 if (sk->sk_prot->compat_setsockopt != NULL)
2289 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2290 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002291 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2292}
2293EXPORT_SYMBOL(compat_sock_common_setsockopt);
2294#endif
2295
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296void sk_common_release(struct sock *sk)
2297{
2298 if (sk->sk_prot->destroy)
2299 sk->sk_prot->destroy(sk);
2300
2301 /*
2302 * Observation: when sock_common_release is called, processes have
2303 * no access to socket. But net still has.
2304 * Step one, detach it from networking:
2305 *
2306 * A. Remove from hash tables.
2307 */
2308
2309 sk->sk_prot->unhash(sk);
2310
2311 /*
2312 * In this point socket cannot receive new packets, but it is possible
2313 * that some packets are in flight because some CPU runs receiver and
2314 * did hash table lookup before we unhashed socket. They will achieve
2315 * receive queue and will be purged by socket destructor.
2316 *
2317 * Also we still have packets pending on receive queue and probably,
2318 * our own packets waiting in device queues. sock_destroy will drain
2319 * receive queue, but transmitted packets will delay socket destruction
2320 * until the last reference will be released.
2321 */
2322
2323 sock_orphan(sk);
2324
2325 xfrm_sk_free_policy(sk);
2326
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002327 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 sock_put(sk);
2329}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330EXPORT_SYMBOL(sk_common_release);
2331
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002332#ifdef CONFIG_PROC_FS
2333#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002334struct prot_inuse {
2335 int val[PROTO_INUSE_NR];
2336};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002337
2338static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002339
2340#ifdef CONFIG_NET_NS
2341void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2342{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002343 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002344}
2345EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2346
2347int sock_prot_inuse_get(struct net *net, struct proto *prot)
2348{
2349 int cpu, idx = prot->inuse_idx;
2350 int res = 0;
2351
2352 for_each_possible_cpu(cpu)
2353 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2354
2355 return res >= 0 ? res : 0;
2356}
2357EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2358
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002359static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002360{
2361 net->core.inuse = alloc_percpu(struct prot_inuse);
2362 return net->core.inuse ? 0 : -ENOMEM;
2363}
2364
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002365static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002366{
2367 free_percpu(net->core.inuse);
2368}
2369
2370static struct pernet_operations net_inuse_ops = {
2371 .init = sock_inuse_init_net,
2372 .exit = sock_inuse_exit_net,
2373};
2374
2375static __init int net_inuse_init(void)
2376{
2377 if (register_pernet_subsys(&net_inuse_ops))
2378 panic("Cannot initialize net inuse counters");
2379
2380 return 0;
2381}
2382
2383core_initcall(net_inuse_init);
2384#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002385static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2386
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002387void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002388{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002389 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002390}
2391EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2392
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002393int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002394{
2395 int cpu, idx = prot->inuse_idx;
2396 int res = 0;
2397
2398 for_each_possible_cpu(cpu)
2399 res += per_cpu(prot_inuse, cpu).val[idx];
2400
2401 return res >= 0 ? res : 0;
2402}
2403EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002404#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002405
2406static void assign_proto_idx(struct proto *prot)
2407{
2408 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2409
2410 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2411 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2412 return;
2413 }
2414
2415 set_bit(prot->inuse_idx, proto_inuse_idx);
2416}
2417
2418static void release_proto_idx(struct proto *prot)
2419{
2420 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2421 clear_bit(prot->inuse_idx, proto_inuse_idx);
2422}
2423#else
2424static inline void assign_proto_idx(struct proto *prot)
2425{
2426}
2427
2428static inline void release_proto_idx(struct proto *prot)
2429{
2430}
2431#endif
2432
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433int proto_register(struct proto *prot, int alloc_slab)
2434{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 if (alloc_slab) {
2436 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002437 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2438 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439
2440 if (prot->slab == NULL) {
2441 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2442 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002443 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002445
2446 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002447 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002448 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002449 goto out_free_sock_slab;
2450
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002451 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002452 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002453 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002454
2455 if (prot->rsk_prot->slab == NULL) {
2456 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2457 prot->name);
2458 goto out_free_request_sock_slab_name;
2459 }
2460 }
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002461
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002462 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002463 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002464
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002465 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002466 goto out_free_request_sock_slab;
2467
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002468 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002469 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002470 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002471 0,
2472 SLAB_HWCACHE_ALIGN |
2473 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002474 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002475 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002476 goto out_free_timewait_sock_slab_name;
2477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 }
2479
Glauber Costa36b77a52011-12-16 00:51:59 +00002480 mutex_lock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002482 assign_proto_idx(prot);
Glauber Costa36b77a52011-12-16 00:51:59 +00002483 mutex_unlock(&proto_list_mutex);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002484 return 0;
2485
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002486out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002487 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002488out_free_request_sock_slab:
2489 if (prot->rsk_prot && prot->rsk_prot->slab) {
2490 kmem_cache_destroy(prot->rsk_prot->slab);
2491 prot->rsk_prot->slab = NULL;
2492 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002493out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002494 if (prot->rsk_prot)
2495 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002496out_free_sock_slab:
2497 kmem_cache_destroy(prot->slab);
2498 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002499out:
2500 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502EXPORT_SYMBOL(proto_register);
2503
2504void proto_unregister(struct proto *prot)
2505{
Glauber Costa36b77a52011-12-16 00:51:59 +00002506 mutex_lock(&proto_list_mutex);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002507 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002508 list_del(&prot->node);
Glauber Costa36b77a52011-12-16 00:51:59 +00002509 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510
2511 if (prot->slab != NULL) {
2512 kmem_cache_destroy(prot->slab);
2513 prot->slab = NULL;
2514 }
2515
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002516 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002517 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002518 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002519 prot->rsk_prot->slab = NULL;
2520 }
2521
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002522 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002523 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002524 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002525 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002526 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528EXPORT_SYMBOL(proto_unregister);
2529
2530#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Glauber Costa36b77a52011-12-16 00:51:59 +00002532 __acquires(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533{
Glauber Costa36b77a52011-12-16 00:51:59 +00002534 mutex_lock(&proto_list_mutex);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002535 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536}
2537
2538static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2539{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002540 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541}
2542
2543static void proto_seq_stop(struct seq_file *seq, void *v)
Glauber Costa36b77a52011-12-16 00:51:59 +00002544 __releases(proto_list_mutex)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545{
Glauber Costa36b77a52011-12-16 00:51:59 +00002546 mutex_unlock(&proto_list_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547}
2548
2549static char proto_method_implemented(const void *method)
2550{
2551 return method == NULL ? 'n' : 'y';
2552}
Glauber Costa180d8cd2011-12-11 21:47:02 +00002553static long sock_prot_memory_allocated(struct proto *proto)
2554{
2555 return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L;
2556}
2557
2558static char *sock_prot_memory_pressure(struct proto *proto)
2559{
2560 return proto->memory_pressure != NULL ?
2561 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2562}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563
2564static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2565{
Glauber Costa180d8cd2011-12-11 21:47:02 +00002566
Eric Dumazet8d987e52010-11-09 23:24:26 +00002567 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2569 proto->name,
2570 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002571 sock_prot_inuse_get(seq_file_net(seq), proto),
Glauber Costa180d8cd2011-12-11 21:47:02 +00002572 sock_prot_memory_allocated(proto),
2573 sock_prot_memory_pressure(proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 proto->max_header,
2575 proto->slab == NULL ? "no" : "yes",
2576 module_name(proto->owner),
2577 proto_method_implemented(proto->close),
2578 proto_method_implemented(proto->connect),
2579 proto_method_implemented(proto->disconnect),
2580 proto_method_implemented(proto->accept),
2581 proto_method_implemented(proto->ioctl),
2582 proto_method_implemented(proto->init),
2583 proto_method_implemented(proto->destroy),
2584 proto_method_implemented(proto->shutdown),
2585 proto_method_implemented(proto->setsockopt),
2586 proto_method_implemented(proto->getsockopt),
2587 proto_method_implemented(proto->sendmsg),
2588 proto_method_implemented(proto->recvmsg),
2589 proto_method_implemented(proto->sendpage),
2590 proto_method_implemented(proto->bind),
2591 proto_method_implemented(proto->backlog_rcv),
2592 proto_method_implemented(proto->hash),
2593 proto_method_implemented(proto->unhash),
2594 proto_method_implemented(proto->get_port),
2595 proto_method_implemented(proto->enter_memory_pressure));
2596}
2597
2598static int proto_seq_show(struct seq_file *seq, void *v)
2599{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002600 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2602 "protocol",
2603 "size",
2604 "sockets",
2605 "memory",
2606 "press",
2607 "maxhdr",
2608 "slab",
2609 "module",
2610 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2611 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002612 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 return 0;
2614}
2615
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002616static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 .start = proto_seq_start,
2618 .next = proto_seq_next,
2619 .stop = proto_seq_stop,
2620 .show = proto_seq_show,
2621};
2622
2623static int proto_seq_open(struct inode *inode, struct file *file)
2624{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002625 return seq_open_net(inode, file, &proto_seq_ops,
2626 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627}
2628
Arjan van de Ven9a321442007-02-12 00:55:35 -08002629static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 .owner = THIS_MODULE,
2631 .open = proto_seq_open,
2632 .read = seq_read,
2633 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002634 .release = seq_release_net,
2635};
2636
2637static __net_init int proto_init_net(struct net *net)
2638{
2639 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2640 return -ENOMEM;
2641
2642 return 0;
2643}
2644
2645static __net_exit void proto_exit_net(struct net *net)
2646{
2647 proc_net_remove(net, "protocols");
2648}
2649
2650
2651static __net_initdata struct pernet_operations proto_net_ops = {
2652 .init = proto_init_net,
2653 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654};
2655
2656static int __init proto_init(void)
2657{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002658 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659}
2660
2661subsys_initcall(proto_init);
2662
2663#endif /* PROC_FS */