blob: 42365deeba279c9b625dfb00213811722855046c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Randy Dunlap4fc268d2006-01-11 12:17:47 -080092#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <linux/errno.h>
94#include <linux/types.h>
95#include <linux/socket.h>
96#include <linux/in.h>
97#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/module.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/sched.h>
102#include <linux/timer.h>
103#include <linux/string.h>
104#include <linux/sockios.h>
105#include <linux/net.h>
106#include <linux/mm.h>
107#include <linux/slab.h>
108#include <linux/interrupt.h>
109#include <linux/poll.h>
110#include <linux/tcp.h>
111#include <linux/init.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -0400112#include <linux/highmem.h>
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000113#include <linux/user_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115#include <asm/uaccess.h>
116#include <asm/system.h>
117
118#include <linux/netdevice.h>
119#include <net/protocol.h>
120#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200121#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700122#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123#include <net/sock.h>
Patrick Ohly20d49472009-02-12 05:03:38 +0000124#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#include <net/xfrm.h>
126#include <linux/ipsec.h>
Herbert Xuf8451722010-05-24 00:12:34 -0700127#include <net/cls_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129#include <linux/filter.h>
130
131#ifdef CONFIG_INET
132#include <net/tcp.h>
133#endif
134
Ingo Molnarda21f242006-07-03 00:25:12 -0700135/*
136 * Each address family might have different locking rules, so we have
137 * one slock key per address family:
138 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700139static struct lock_class_key af_family_keys[AF_MAX];
140static struct lock_class_key af_family_slock_keys[AF_MAX];
141
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700142/*
143 * Make lock validator output more readable. (we pre-construct these
144 * strings build-time, so that runtime initialization of socket
145 * locks is fast):
146 */
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700147static const char *const af_family_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700148 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
149 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
150 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
151 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
152 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
153 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
154 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800155 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700156 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800157 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700158 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700159 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
Alex Lorcafe331472010-06-07 01:01:22 -0700160 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700161 "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700162};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700163static const char *const af_family_slock_key_strings[AF_MAX+1] = {
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700164 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
165 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
166 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
167 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
168 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
169 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
170 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800171 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700172 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800173 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700174 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700175 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
Alex Lorcafe331472010-06-07 01:01:22 -0700176 "slock-AF_IEEE802154", "slock-AF_CAIF" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700177 "slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700178};
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700179static const char *const af_family_clock_key_strings[AF_MAX+1] = {
Peter Zijlstra443aef02007-07-19 01:49:00 -0700180 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
181 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
182 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
183 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
184 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
185 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
186 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
Andy Grovercbd151b2009-02-26 23:43:19 -0800187 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
Peter Zijlstra443aef02007-07-19 01:49:00 -0700188 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700189 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700190 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700191 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
Alex Lorcafe331472010-06-07 01:01:22 -0700192 "clock-AF_IEEE802154", "clock-AF_CAIF" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700193 "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700194};
Ingo Molnarda21f242006-07-03 00:25:12 -0700195
196/*
197 * sk_callback_lock locking rules are per-address-family,
198 * so split the lock classes by using a per-AF key:
199 */
200static struct lock_class_key af_callback_keys[AF_MAX];
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202/* Take into consideration the size of the struct sk_buff overhead in the
203 * determination of these values, since that is non-constant across
204 * platforms. This makes socket queueing behavior and performance
205 * not depend upon such differences.
206 */
207#define _SK_MEM_PACKETS 256
208#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
209#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
210#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
211
212/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700213__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
214__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
215__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
216__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218/* Maximal space eaten by iovec or ancilliary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700219int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet2a915252009-05-27 11:30:05 +0000220EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Herbert Xuf8451722010-05-24 00:12:34 -0700222#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
223int net_cls_subsys_id = -1;
224EXPORT_SYMBOL_GPL(net_cls_subsys_id);
225#endif
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
228{
229 struct timeval tv;
230
231 if (optlen < sizeof(tv))
232 return -EINVAL;
233 if (copy_from_user(&tv, optval, sizeof(tv)))
234 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700235 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
236 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Vasily Averinba780732007-05-24 16:58:54 -0700238 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700239 static int warned __read_mostly;
240
Vasily Averinba780732007-05-24 16:58:54 -0700241 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700242 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700243 warned++;
244 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
245 "tries to set negative timeout\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700246 current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700247 }
Vasily Averinba780732007-05-24 16:58:54 -0700248 return 0;
249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 *timeo_p = MAX_SCHEDULE_TIMEOUT;
251 if (tv.tv_sec == 0 && tv.tv_usec == 0)
252 return 0;
253 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
254 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
255 return 0;
256}
257
258static void sock_warn_obsolete_bsdism(const char *name)
259{
260 static int warned;
261 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900262 if (strcmp(warncomm, current->comm) && warned < 5) {
263 strcpy(warncomm, current->comm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 printk(KERN_WARNING "process `%s' is using obsolete "
265 "%s SO_BSDCOMPAT\n", warncomm, name);
266 warned++;
267 }
268}
269
Patrick Ohly20d49472009-02-12 05:03:38 +0000270static void sock_disable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900271{
Patrick Ohly20d49472009-02-12 05:03:38 +0000272 if (sock_flag(sk, flag)) {
273 sock_reset_flag(sk, flag);
274 if (!sock_flag(sk, SOCK_TIMESTAMP) &&
275 !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
276 net_disable_timestamp();
277 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 }
279}
280
281
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800282int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
283{
Eric Dumazet766e90372009-10-14 20:40:11 -0700284 int err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800285 int skb_len;
Neil Horman3b885782009-10-12 13:26:31 -0700286 unsigned long flags;
287 struct sk_buff_head *list = &sk->sk_receive_queue;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800288
Rami Rosen9ee6b7f2008-05-14 03:50:03 -0700289 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800290 number of warnings when compiling with -W --ANK
291 */
292 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
293 (unsigned)sk->sk_rcvbuf) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700294 atomic_inc(&sk->sk_drops);
295 return -ENOMEM;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800296 }
297
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700298 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800299 if (err)
Eric Dumazet766e90372009-10-14 20:40:11 -0700300 return err;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800301
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800302 if (!sk_rmem_schedule(sk, skb->truesize)) {
Eric Dumazet766e90372009-10-14 20:40:11 -0700303 atomic_inc(&sk->sk_drops);
304 return -ENOBUFS;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800305 }
306
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800307 skb->dev = NULL;
308 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800309
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800310 /* Cache the SKB length before we tack it onto the receive
311 * queue. Once it is added it no longer belongs to us and
312 * may be freed by other threads of control pulling packets
313 * from the queue.
314 */
315 skb_len = skb->len;
316
Eric Dumazet7fee2262010-05-11 23:19:48 +0000317 /* we escape from rcu protected region, make sure we dont leak
318 * a norefcounted dst
319 */
320 skb_dst_force(skb);
321
Neil Horman3b885782009-10-12 13:26:31 -0700322 spin_lock_irqsave(&list->lock, flags);
323 skb->dropcount = atomic_read(&sk->sk_drops);
324 __skb_queue_tail(list, skb);
325 spin_unlock_irqrestore(&list->lock, flags);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800326
327 if (!sock_flag(sk, SOCK_DEAD))
328 sk->sk_data_ready(sk, skb_len);
Eric Dumazet766e90372009-10-14 20:40:11 -0700329 return 0;
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800330}
331EXPORT_SYMBOL(sock_queue_rcv_skb);
332
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200333int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800334{
335 int rc = NET_RX_SUCCESS;
336
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700337 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800338 goto discard_and_relse;
339
340 skb->dev = NULL;
341
Eric Dumazetc3774112010-04-27 15:13:20 -0700342 if (sk_rcvqueues_full(sk, skb)) {
343 atomic_inc(&sk->sk_drops);
344 goto discard_and_relse;
345 }
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200346 if (nested)
347 bh_lock_sock_nested(sk);
348 else
349 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700350 if (!sock_owned_by_user(sk)) {
351 /*
352 * trylock + unlock semantics:
353 */
354 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
355
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700356 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700357
358 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
Zhu Yia3a858f2010-03-04 18:01:47 +0000359 } else if (sk_add_backlog(sk, skb)) {
Zhu Yi8eae9392010-03-04 18:01:40 +0000360 bh_unlock_sock(sk);
361 atomic_inc(&sk->sk_drops);
362 goto discard_and_relse;
363 }
364
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800365 bh_unlock_sock(sk);
366out:
367 sock_put(sk);
368 return rc;
369discard_and_relse:
370 kfree_skb(skb);
371 goto out;
372}
373EXPORT_SYMBOL(sk_receive_skb);
374
Krishna Kumarea94ff32009-10-19 23:46:45 +0000375void sk_reset_txq(struct sock *sk)
376{
377 sk_tx_queue_clear(sk);
378}
379EXPORT_SYMBOL(sk_reset_txq);
380
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800381struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
382{
Eric Dumazetb6c67122010-04-08 23:03:29 +0000383 struct dst_entry *dst = __sk_dst_get(sk);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800384
385 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
Krishna Kumare022f0b2009-10-19 23:46:20 +0000386 sk_tx_queue_clear(sk);
Eric Dumazetb6c67122010-04-08 23:03:29 +0000387 rcu_assign_pointer(sk->sk_dst_cache, NULL);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800388 dst_release(dst);
389 return NULL;
390 }
391
392 return dst;
393}
394EXPORT_SYMBOL(__sk_dst_check);
395
396struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
397{
398 struct dst_entry *dst = sk_dst_get(sk);
399
400 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
401 sk_dst_reset(sk);
402 dst_release(dst);
403 return NULL;
404 }
405
406 return dst;
407}
408EXPORT_SYMBOL(sk_dst_check);
409
David S. Miller48788092007-09-14 16:41:03 -0700410static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
411{
412 int ret = -ENOPROTOOPT;
413#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900414 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700415 char devname[IFNAMSIZ];
416 int index;
417
418 /* Sorry... */
419 ret = -EPERM;
420 if (!capable(CAP_NET_RAW))
421 goto out;
422
423 ret = -EINVAL;
424 if (optlen < 0)
425 goto out;
426
427 /* Bind this socket to a particular device like "eth0",
428 * as specified in the passed interface name. If the
429 * name is "" or the option length is zero the socket
430 * is not bound.
431 */
432 if (optlen > IFNAMSIZ - 1)
433 optlen = IFNAMSIZ - 1;
434 memset(devname, 0, sizeof(devname));
435
436 ret = -EFAULT;
437 if (copy_from_user(devname, optval, optlen))
438 goto out;
439
David S. Miller000ba2e2009-11-05 22:37:11 -0800440 index = 0;
441 if (devname[0] != '\0') {
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800442 struct net_device *dev;
David S. Miller48788092007-09-14 16:41:03 -0700443
Eric Dumazetbf8e56b2009-11-05 21:03:39 -0800444 rcu_read_lock();
445 dev = dev_get_by_name_rcu(net, devname);
446 if (dev)
447 index = dev->ifindex;
448 rcu_read_unlock();
David S. Miller48788092007-09-14 16:41:03 -0700449 ret = -ENODEV;
450 if (!dev)
451 goto out;
David S. Miller48788092007-09-14 16:41:03 -0700452 }
453
454 lock_sock(sk);
455 sk->sk_bound_dev_if = index;
456 sk_dst_reset(sk);
457 release_sock(sk);
458
459 ret = 0;
460
461out:
462#endif
463
464 return ret;
465}
466
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800467static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
468{
469 if (valbool)
470 sock_set_flag(sk, bit);
471 else
472 sock_reset_flag(sk, bit);
473}
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475/*
476 * This is meant for all protocols to use and covers goings on
477 * at the socket level. Everything here is generic.
478 */
479
480int sock_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -0700481 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482{
Eric Dumazet2a915252009-05-27 11:30:05 +0000483 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 int val;
485 int valbool;
486 struct linger ling;
487 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 /*
490 * Options without arguments
491 */
492
David S. Miller48788092007-09-14 16:41:03 -0700493 if (optname == SO_BINDTODEVICE)
494 return sock_bindtodevice(sk, optval, optlen);
495
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700496 if (optlen < sizeof(int))
497 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 if (get_user(val, (int __user *)optval))
500 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900501
Eric Dumazet2a915252009-05-27 11:30:05 +0000502 valbool = val ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
504 lock_sock(sk);
505
Eric Dumazet2a915252009-05-27 11:30:05 +0000506 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700507 case SO_DEBUG:
Eric Dumazet2a915252009-05-27 11:30:05 +0000508 if (val && !capable(CAP_NET_ADMIN))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700509 ret = -EACCES;
Eric Dumazet2a915252009-05-27 11:30:05 +0000510 else
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800511 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700512 break;
513 case SO_REUSEADDR:
514 sk->sk_reuse = valbool;
515 break;
516 case SO_TYPE:
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000517 case SO_PROTOCOL:
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000518 case SO_DOMAIN:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700519 case SO_ERROR:
520 ret = -ENOPROTOOPT;
521 break;
522 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800523 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700524 break;
525 case SO_BROADCAST:
526 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
527 break;
528 case SO_SNDBUF:
529 /* Don't error on this BSD doesn't and if you think
530 about it this is right. Otherwise apps have to
531 play 'guess the biggest size' games. RCVBUF/SNDBUF
532 are treated in BSD as hints */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900533
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700534 if (val > sysctl_wmem_max)
535 val = sysctl_wmem_max;
Patrick McHardyb0573de2005-08-09 19:30:51 -0700536set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700537 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
538 if ((val * 2) < SOCK_MIN_SNDBUF)
539 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
540 else
541 sk->sk_sndbuf = val * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700543 /*
544 * Wake up sending tasks if we
545 * upped the value.
546 */
547 sk->sk_write_space(sk);
548 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700550 case SO_SNDBUFFORCE:
551 if (!capable(CAP_NET_ADMIN)) {
552 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 break;
554 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700555 goto set_sndbuf;
556
557 case SO_RCVBUF:
558 /* Don't error on this BSD doesn't and if you think
559 about it this is right. Otherwise apps have to
560 play 'guess the biggest size' games. RCVBUF/SNDBUF
561 are treated in BSD as hints */
562
563 if (val > sysctl_rmem_max)
564 val = sysctl_rmem_max;
565set_rcvbuf:
566 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
567 /*
568 * We double it on the way in to account for
569 * "struct sk_buff" etc. overhead. Applications
570 * assume that the SO_RCVBUF setting they make will
571 * allow that much actual data to be received on that
572 * socket.
573 *
574 * Applications are unaware that "struct sk_buff" and
575 * other overheads allocate from the receive buffer
576 * during socket buffer allocation.
577 *
578 * And after considering the possible alternatives,
579 * returning the value we actually used in getsockopt
580 * is the most desirable behavior.
581 */
582 if ((val * 2) < SOCK_MIN_RCVBUF)
583 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
584 else
585 sk->sk_rcvbuf = val * 2;
586 break;
587
588 case SO_RCVBUFFORCE:
589 if (!capable(CAP_NET_ADMIN)) {
590 ret = -EPERM;
591 break;
592 }
593 goto set_rcvbuf;
594
595 case SO_KEEPALIVE:
596#ifdef CONFIG_INET
597 if (sk->sk_protocol == IPPROTO_TCP)
598 tcp_set_keepalive(sk, valbool);
599#endif
600 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
601 break;
602
603 case SO_OOBINLINE:
604 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
605 break;
606
607 case SO_NO_CHECK:
608 sk->sk_no_check = valbool;
609 break;
610
611 case SO_PRIORITY:
612 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
613 sk->sk_priority = val;
614 else
615 ret = -EPERM;
616 break;
617
618 case SO_LINGER:
619 if (optlen < sizeof(ling)) {
620 ret = -EINVAL; /* 1003.1g */
621 break;
622 }
Eric Dumazet2a915252009-05-27 11:30:05 +0000623 if (copy_from_user(&ling, optval, sizeof(ling))) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700624 ret = -EFAULT;
625 break;
626 }
627 if (!ling.l_onoff)
628 sock_reset_flag(sk, SOCK_LINGER);
629 else {
630#if (BITS_PER_LONG == 32)
631 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
632 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
633 else
634#endif
635 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
636 sock_set_flag(sk, SOCK_LINGER);
637 }
638 break;
639
640 case SO_BSDCOMPAT:
641 sock_warn_obsolete_bsdism("setsockopt");
642 break;
643
644 case SO_PASSCRED:
645 if (valbool)
646 set_bit(SOCK_PASSCRED, &sock->flags);
647 else
648 clear_bit(SOCK_PASSCRED, &sock->flags);
649 break;
650
651 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700652 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700653 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700654 if (optname == SO_TIMESTAMP)
655 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
656 else
657 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700658 sock_set_flag(sk, SOCK_RCVTSTAMP);
Patrick Ohly20d49472009-02-12 05:03:38 +0000659 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700660 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700661 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700662 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
663 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700664 break;
665
Patrick Ohly20d49472009-02-12 05:03:38 +0000666 case SO_TIMESTAMPING:
667 if (val & ~SOF_TIMESTAMPING_MASK) {
Rémi Denis-Courmontf249fb72009-07-20 00:47:04 +0000668 ret = -EINVAL;
Patrick Ohly20d49472009-02-12 05:03:38 +0000669 break;
670 }
671 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
672 val & SOF_TIMESTAMPING_TX_HARDWARE);
673 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
674 val & SOF_TIMESTAMPING_TX_SOFTWARE);
675 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
676 val & SOF_TIMESTAMPING_RX_HARDWARE);
677 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
678 sock_enable_timestamp(sk,
679 SOCK_TIMESTAMPING_RX_SOFTWARE);
680 else
681 sock_disable_timestamp(sk,
682 SOCK_TIMESTAMPING_RX_SOFTWARE);
683 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
684 val & SOF_TIMESTAMPING_SOFTWARE);
685 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
686 val & SOF_TIMESTAMPING_SYS_HARDWARE);
687 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
688 val & SOF_TIMESTAMPING_RAW_HARDWARE);
689 break;
690
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700691 case SO_RCVLOWAT:
692 if (val < 0)
693 val = INT_MAX;
694 sk->sk_rcvlowat = val ? : 1;
695 break;
696
697 case SO_RCVTIMEO:
698 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
699 break;
700
701 case SO_SNDTIMEO:
702 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
703 break;
704
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700705 case SO_ATTACH_FILTER:
706 ret = -EINVAL;
707 if (optlen == sizeof(struct sock_fprog)) {
708 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700710 ret = -EFAULT;
711 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700714 ret = sk_attach_filter(&fprog, sk);
715 }
716 break;
717
718 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700719 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700720 break;
721
722 case SO_PASSSEC:
723 if (valbool)
724 set_bit(SOCK_PASSSEC, &sock->flags);
725 else
726 clear_bit(SOCK_PASSSEC, &sock->flags);
727 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800728 case SO_MARK:
729 if (!capable(CAP_NET_ADMIN))
730 ret = -EPERM;
Eric Dumazet2a915252009-05-27 11:30:05 +0000731 else
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800732 sk->sk_mark = val;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800733 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700734
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 /* We implement the SO_SNDLOWAT etc to
736 not be settable (1003.1g 5.3) */
Neil Horman3b885782009-10-12 13:26:31 -0700737 case SO_RXQ_OVFL:
738 if (valbool)
739 sock_set_flag(sk, SOCK_RXQ_OVFL);
740 else
741 sock_reset_flag(sk, SOCK_RXQ_OVFL);
742 break;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700743 default:
744 ret = -ENOPROTOOPT;
745 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900746 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 release_sock(sk);
748 return ret;
749}
Eric Dumazet2a915252009-05-27 11:30:05 +0000750EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
752
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000753void cred_to_ucred(struct pid *pid, const struct cred *cred,
754 struct ucred *ucred)
755{
756 ucred->pid = pid_vnr(pid);
757 ucred->uid = ucred->gid = -1;
758 if (cred) {
759 struct user_namespace *current_ns = current_user_ns();
760
761 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
762 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
763 }
764}
David S. Miller39247732010-06-16 16:18:25 -0700765EXPORT_SYMBOL_GPL(cred_to_ucred);
Eric W. Biederman3f551f92010-06-13 03:28:59 +0000766
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767int sock_getsockopt(struct socket *sock, int level, int optname,
768 char __user *optval, int __user *optlen)
769{
770 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900771
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700772 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900773 int val;
774 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 struct timeval tm;
776 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900777
H Hartley Sweeten4d0392b2010-01-15 01:08:58 -0800778 int lv = sizeof(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900780
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700781 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900782 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700783 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900785
Eugene Teo50fee1d2009-02-23 15:38:41 -0800786 memset(&v, 0, sizeof(v));
Clément Lecignedf0bca02009-02-12 16:59:09 -0800787
Eric Dumazet2a915252009-05-27 11:30:05 +0000788 switch (optname) {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700789 case SO_DEBUG:
790 v.val = sock_flag(sk, SOCK_DBG);
791 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900792
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700793 case SO_DONTROUTE:
794 v.val = sock_flag(sk, SOCK_LOCALROUTE);
795 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900796
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700797 case SO_BROADCAST:
798 v.val = !!sock_flag(sk, SOCK_BROADCAST);
799 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700801 case SO_SNDBUF:
802 v.val = sk->sk_sndbuf;
803 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900804
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700805 case SO_RCVBUF:
806 v.val = sk->sk_rcvbuf;
807 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700809 case SO_REUSEADDR:
810 v.val = sk->sk_reuse;
811 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700813 case SO_KEEPALIVE:
814 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
815 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700817 case SO_TYPE:
818 v.val = sk->sk_type;
819 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Jan Engelhardt49c794e2009-08-04 07:28:28 +0000821 case SO_PROTOCOL:
822 v.val = sk->sk_protocol;
823 break;
824
Jan Engelhardt0d6038e2009-08-04 07:28:29 +0000825 case SO_DOMAIN:
826 v.val = sk->sk_family;
827 break;
828
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700829 case SO_ERROR:
830 v.val = -sock_error(sk);
Eric Dumazet2a915252009-05-27 11:30:05 +0000831 if (v.val == 0)
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700832 v.val = xchg(&sk->sk_err_soft, 0);
833 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700835 case SO_OOBINLINE:
836 v.val = !!sock_flag(sk, SOCK_URGINLINE);
837 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900838
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700839 case SO_NO_CHECK:
840 v.val = sk->sk_no_check;
841 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700843 case SO_PRIORITY:
844 v.val = sk->sk_priority;
845 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900846
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700847 case SO_LINGER:
848 lv = sizeof(v.ling);
849 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
850 v.ling.l_linger = sk->sk_lingertime / HZ;
851 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900852
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700853 case SO_BSDCOMPAT:
854 sock_warn_obsolete_bsdism("getsockopt");
855 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700857 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700858 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
859 !sock_flag(sk, SOCK_RCVTSTAMPNS);
860 break;
861
862 case SO_TIMESTAMPNS:
863 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700864 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
Patrick Ohly20d49472009-02-12 05:03:38 +0000866 case SO_TIMESTAMPING:
867 v.val = 0;
868 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
869 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
870 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
871 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
872 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
873 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
874 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
875 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
876 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
877 v.val |= SOF_TIMESTAMPING_SOFTWARE;
878 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
879 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
880 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
881 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
882 break;
883
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700884 case SO_RCVTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000885 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700886 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
887 v.tm.tv_sec = 0;
888 v.tm.tv_usec = 0;
889 } else {
890 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
891 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700893 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700895 case SO_SNDTIMEO:
Eric Dumazet2a915252009-05-27 11:30:05 +0000896 lv = sizeof(struct timeval);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700897 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
898 v.tm.tv_sec = 0;
899 v.tm.tv_usec = 0;
900 } else {
901 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
902 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
903 }
904 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700906 case SO_RCVLOWAT:
907 v.val = sk->sk_rcvlowat;
908 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700909
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700910 case SO_SNDLOWAT:
Eric Dumazet2a915252009-05-27 11:30:05 +0000911 v.val = 1;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700912 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700914 case SO_PASSCRED:
915 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
916 break;
917
918 case SO_PEERCRED:
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000919 {
920 struct ucred peercred;
921 if (len > sizeof(peercred))
922 len = sizeof(peercred);
923 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
924 if (copy_to_user(optval, &peercred, len))
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700925 return -EFAULT;
926 goto lenout;
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000927 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700928
929 case SO_PEERNAME:
930 {
931 char address[128];
932
933 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
934 return -ENOTCONN;
935 if (lv < len)
936 return -EINVAL;
937 if (copy_to_user(optval, address, len))
938 return -EFAULT;
939 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700941
942 /* Dubious BSD thing... Probably nobody even uses it, but
943 * the UNIX standard wants it for whatever reason... -DaveM
944 */
945 case SO_ACCEPTCONN:
946 v.val = sk->sk_state == TCP_LISTEN;
947 break;
948
949 case SO_PASSSEC:
950 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
951 break;
952
953 case SO_PEERSEC:
954 return security_socket_getpeersec_stream(sock, optval, optlen, len);
955
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800956 case SO_MARK:
957 v.val = sk->sk_mark;
958 break;
959
Neil Horman3b885782009-10-12 13:26:31 -0700960 case SO_RXQ_OVFL:
961 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
962 break;
963
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700964 default:
965 return -ENOPROTOOPT;
966 }
967
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 if (len > lv)
969 len = lv;
970 if (copy_to_user(optval, &v, len))
971 return -EFAULT;
972lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900973 if (put_user(len, optlen))
974 return -EFAULT;
975 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976}
977
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700978/*
979 * Initialize an sk_lock.
980 *
981 * (We also register the sk_lock with the lock validator.)
982 */
Dave Jonesb6f99a22007-03-22 12:27:49 -0700983static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700984{
Peter Zijlstraed075362006-12-06 20:35:24 -0800985 sock_lock_init_class_and_name(sk,
986 af_family_slock_key_strings[sk->sk_family],
987 af_family_slock_keys + sk->sk_family,
988 af_family_key_strings[sk->sk_family],
989 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700990}
991
Eric Dumazet4dc6dc72009-07-15 23:13:10 +0000992/*
993 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
994 * even temporarly, because of RCU lookups. sk_node should also be left as is.
995 */
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -0700996static void sock_copy(struct sock *nsk, const struct sock *osk)
997{
998#ifdef CONFIG_SECURITY_NETWORK
999 void *sptr = nsk->sk_security;
1000#endif
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001001 BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
Krishna Kumare022f0b2009-10-19 23:46:20 +00001002 sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
1003 sizeof(osk->sk_tx_queue_mapping));
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001004 memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
1005 osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -07001006#ifdef CONFIG_SECURITY_NETWORK
1007 nsk->sk_security = sptr;
1008 security_sk_clone(osk, nsk);
1009#endif
1010}
1011
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001012static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1013 int family)
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001014{
1015 struct sock *sk;
1016 struct kmem_cache *slab;
1017
1018 slab = prot->slab;
Eric Dumazete912b112009-07-08 19:36:05 +00001019 if (slab != NULL) {
1020 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1021 if (!sk)
1022 return sk;
1023 if (priority & __GFP_ZERO) {
1024 /*
1025 * caches using SLAB_DESTROY_BY_RCU should let
1026 * sk_node.next un-modified. Special care is taken
1027 * when initializing object to zero.
1028 */
1029 if (offsetof(struct sock, sk_node.next) != 0)
1030 memset(sk, 0, offsetof(struct sock, sk_node.next));
1031 memset(&sk->sk_node.pprev, 0,
1032 prot->obj_size - offsetof(struct sock,
1033 sk_node.pprev));
1034 }
1035 }
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001036 else
1037 sk = kmalloc(prot->obj_size, priority);
1038
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001039 if (sk != NULL) {
Vegard Nossuma98b65a2009-02-26 14:46:57 +01001040 kmemcheck_annotate_bitfield(sk, flags);
1041
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001042 if (security_sk_alloc(sk, family, priority))
1043 goto out_free;
1044
1045 if (!try_module_get(prot->owner))
1046 goto out_free_sec;
Krishna Kumare022f0b2009-10-19 23:46:20 +00001047 sk_tx_queue_clear(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001048 }
1049
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001050 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001051
1052out_free_sec:
1053 security_sk_free(sk);
1054out_free:
1055 if (slab != NULL)
1056 kmem_cache_free(slab, sk);
1057 else
1058 kfree(sk);
1059 return NULL;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001060}
1061
1062static void sk_prot_free(struct proto *prot, struct sock *sk)
1063{
1064 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001065 struct module *owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001066
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001067 owner = prot->owner;
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001068 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001069
1070 security_sk_free(sk);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001071 if (slab != NULL)
1072 kmem_cache_free(slab, sk);
1073 else
1074 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001075 module_put(owner);
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001076}
1077
Herbert Xuf8451722010-05-24 00:12:34 -07001078#ifdef CONFIG_CGROUPS
1079void sock_update_classid(struct sock *sk)
1080{
1081 u32 classid = task_cls_classid(current);
1082
1083 if (classid && classid != sk->sk_classid)
1084 sk->sk_classid = classid;
1085}
Herbert Xu82862742010-05-24 00:14:10 -07001086EXPORT_SYMBOL(sock_update_classid);
Herbert Xuf8451722010-05-24 00:12:34 -07001087#endif
1088
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089/**
1090 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001091 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001092 * @family: protocol family
1093 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1094 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -07001096struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07001097 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098{
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001099 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001101 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -07001103 sk->sk_family = family;
1104 /*
1105 * See comment in struct sock definition to understand
1106 * why we need sk_prot_creator -acme
1107 */
1108 sk->sk_prot = sk->sk_prot_creator = prot;
1109 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001110 sock_net_set(sk, get_net(net));
Jarek Poplawskid66ee052009-08-30 23:15:36 +00001111 atomic_set(&sk->sk_wmem_alloc, 1);
Herbert Xuf8451722010-05-24 00:12:34 -07001112
1113 sock_update_classid(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 }
Frank Filza79af592005-09-27 15:23:38 -07001115
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -07001116 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117}
Eric Dumazet2a915252009-05-27 11:30:05 +00001118EXPORT_SYMBOL(sk_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119
Eric Dumazet2b85a342009-06-11 02:55:43 -07001120static void __sk_free(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121{
1122 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123
1124 if (sk->sk_destruct)
1125 sk->sk_destruct(sk);
1126
Paul E. McKenneya898def2010-02-22 17:04:49 -08001127 filter = rcu_dereference_check(sk->sk_filter,
1128 atomic_read(&sk->sk_wmem_alloc) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -07001130 sk_filter_uncharge(sk, filter);
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001131 rcu_assign_pointer(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 }
1133
Patrick Ohly20d49472009-02-12 05:03:38 +00001134 sock_disable_timestamp(sk, SOCK_TIMESTAMP);
1135 sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
1137 if (atomic_read(&sk->sk_omem_alloc))
1138 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
Harvey Harrison0dc47872008-03-05 20:47:47 -08001139 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001141 if (sk->sk_peer_cred)
1142 put_cred(sk->sk_peer_cred);
1143 put_pid(sk->sk_peer_pid);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001144 put_net(sock_net(sk));
Pavel Emelyanovc308c1b22007-11-01 00:33:50 -07001145 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146}
Eric Dumazet2b85a342009-06-11 02:55:43 -07001147
1148void sk_free(struct sock *sk)
1149{
1150 /*
1151 * We substract one from sk_wmem_alloc and can know if
1152 * some packets are still in some tx queue.
1153 * If not null, sock_wfree() will call __sk_free(sk) later
1154 */
1155 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1156 __sk_free(sk);
1157}
Eric Dumazet2a915252009-05-27 11:30:05 +00001158EXPORT_SYMBOL(sk_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159
Denis V. Lunevedf02082008-02-29 11:18:32 -08001160/*
1161 * Last sock_put should drop referrence to sk->sk_net. It has already
1162 * been dropped in sk_change_net. Taking referrence to stopping namespace
1163 * is not an option.
1164 * Take referrence to a socket to remove it from hash _alive_ and after that
1165 * destroy it in the context of init_net.
1166 */
1167void sk_release_kernel(struct sock *sk)
1168{
1169 if (sk == NULL || sk->sk_socket == NULL)
1170 return;
1171
1172 sock_hold(sk);
1173 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -07001174 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001175 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -08001176 sock_put(sk);
1177}
David S. Miller45af1752008-02-29 11:33:19 -08001178EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001179
Al Virodd0fc662005-10-07 07:46:04 +01001180struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001181{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001182 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001183
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001184 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001185 if (newsk != NULL) {
1186 struct sk_filter *filter;
1187
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001188 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001189
1190 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001191 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001192 sk_node_init(&newsk->sk_node);
1193 sock_lock_init(newsk);
1194 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001195 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Zhu Yi8eae9392010-03-04 18:01:40 +00001196 newsk->sk_backlog.len = 0;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001197
1198 atomic_set(&newsk->sk_rmem_alloc, 0);
Eric Dumazet2b85a342009-06-11 02:55:43 -07001199 /*
1200 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1201 */
1202 atomic_set(&newsk->sk_wmem_alloc, 1);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001203 atomic_set(&newsk->sk_omem_alloc, 0);
1204 skb_queue_head_init(&newsk->sk_receive_queue);
1205 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001206#ifdef CONFIG_NET_DMA
1207 skb_queue_head_init(&newsk->sk_async_wait_queue);
1208#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001209
Eric Dumazetb6c67122010-04-08 23:03:29 +00001210 spin_lock_init(&newsk->sk_dst_lock);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001211 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001212 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1213 af_callback_keys + newsk->sk_family,
1214 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001215
1216 newsk->sk_dst_cache = NULL;
1217 newsk->sk_wmem_queued = 0;
1218 newsk->sk_forward_alloc = 0;
1219 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001220 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1221
1222 sock_reset_flag(newsk, SOCK_DONE);
1223 skb_queue_head_init(&newsk->sk_error_queue);
1224
1225 filter = newsk->sk_filter;
1226 if (filter != NULL)
1227 sk_filter_charge(newsk, filter);
1228
1229 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1230 /* It is still raw copy of parent, so invalidate
1231 * destructor and make plain sk_free() */
1232 newsk->sk_destruct = NULL;
1233 sk_free(newsk);
1234 newsk = NULL;
1235 goto out;
1236 }
1237
1238 newsk->sk_err = 0;
1239 newsk->sk_priority = 0;
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001240 /*
1241 * Before updating sk_refcnt, we must commit prior changes to memory
1242 * (Documentation/RCU/rculist_nulls.txt for details)
1243 */
1244 smp_wmb();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001245 atomic_set(&newsk->sk_refcnt, 2);
1246
1247 /*
1248 * Increment the counter in the same struct proto as the master
1249 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1250 * is the same as sk->sk_prot->socks, as this field was copied
1251 * with memcpy).
1252 *
1253 * This _changes_ the previous behaviour, where
1254 * tcp_create_openreq_child always was incrementing the
1255 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1256 * to be taken into account in all callers. -acme
1257 */
1258 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001259 sk_set_socket(newsk, NULL);
Eric Dumazet43815482010-04-29 11:01:49 +00001260 newsk->sk_wq = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001261
1262 if (newsk->sk_prot->sockets_allocated)
Eric Dumazet17483762008-11-25 21:16:35 -08001263 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
Octavian Purdila704da5602010-01-08 00:00:09 -08001264
1265 if (sock_flag(newsk, SOCK_TIMESTAMP) ||
1266 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1267 net_enable_timestamp();
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001268 }
1269out:
1270 return newsk;
1271}
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001272EXPORT_SYMBOL_GPL(sk_clone);
1273
Andi Kleen99580892007-04-20 17:12:43 -07001274void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1275{
1276 __sk_dst_set(sk, dst);
1277 sk->sk_route_caps = dst->dev->features;
1278 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001279 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Eric Dumazeta4654192010-05-16 00:36:33 -07001280 sk->sk_route_caps &= ~sk->sk_route_nocaps;
Andi Kleen99580892007-04-20 17:12:43 -07001281 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001282 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001283 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001284 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001285 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001286 sk->sk_gso_max_size = dst->dev->gso_max_size;
1287 }
Andi Kleen99580892007-04-20 17:12:43 -07001288 }
1289}
1290EXPORT_SYMBOL_GPL(sk_setup_caps);
1291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292void __init sk_init(void)
1293{
Jan Beulich44813742009-09-21 17:03:05 -07001294 if (totalram_pages <= 4096) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 sysctl_wmem_max = 32767;
1296 sysctl_rmem_max = 32767;
1297 sysctl_wmem_default = 32767;
1298 sysctl_rmem_default = 32767;
Jan Beulich44813742009-09-21 17:03:05 -07001299 } else if (totalram_pages >= 131072) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 sysctl_wmem_max = 131071;
1301 sysctl_rmem_max = 131071;
1302 }
1303}
1304
1305/*
1306 * Simple resource managers for sockets.
1307 */
1308
1309
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001310/*
1311 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 */
1313void sock_wfree(struct sk_buff *skb)
1314{
1315 struct sock *sk = skb->sk;
Eric Dumazetd99927f2009-09-24 10:49:24 +00001316 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
Eric Dumazetd99927f2009-09-24 10:49:24 +00001318 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1319 /*
1320 * Keep a reference on sk_wmem_alloc, this will be released
1321 * after sk_write_space() call
1322 */
1323 atomic_sub(len - 1, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 sk->sk_write_space(sk);
Eric Dumazetd99927f2009-09-24 10:49:24 +00001325 len = 1;
1326 }
Eric Dumazet2b85a342009-06-11 02:55:43 -07001327 /*
Eric Dumazetd99927f2009-09-24 10:49:24 +00001328 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1329 * could not do because of in-flight packets
Eric Dumazet2b85a342009-06-11 02:55:43 -07001330 */
Eric Dumazetd99927f2009-09-24 10:49:24 +00001331 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
Eric Dumazet2b85a342009-06-11 02:55:43 -07001332 __sk_free(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333}
Eric Dumazet2a915252009-05-27 11:30:05 +00001334EXPORT_SYMBOL(sock_wfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001336/*
1337 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 */
1339void sock_rfree(struct sk_buff *skb)
1340{
1341 struct sock *sk = skb->sk;
Eric Dumazetd361fd52010-07-10 22:45:17 +00001342 unsigned int len = skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
Eric Dumazetd361fd52010-07-10 22:45:17 +00001344 atomic_sub(len, &sk->sk_rmem_alloc);
1345 sk_mem_uncharge(sk, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346}
Eric Dumazet2a915252009-05-27 11:30:05 +00001347EXPORT_SYMBOL(sock_rfree);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348
1349
1350int sock_i_uid(struct sock *sk)
1351{
1352 int uid;
1353
Eric Dumazetf064af12010-09-22 12:43:39 +00001354 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001356 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 return uid;
1358}
Eric Dumazet2a915252009-05-27 11:30:05 +00001359EXPORT_SYMBOL(sock_i_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
1361unsigned long sock_i_ino(struct sock *sk)
1362{
1363 unsigned long ino;
1364
Eric Dumazetf064af12010-09-22 12:43:39 +00001365 read_lock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
Eric Dumazetf064af12010-09-22 12:43:39 +00001367 read_unlock_bh(&sk->sk_callback_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 return ino;
1369}
Eric Dumazet2a915252009-05-27 11:30:05 +00001370EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
1372/*
1373 * Allocate a skb from the socket's send buffer.
1374 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001375struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001376 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377{
1378 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Eric Dumazet2a915252009-05-27 11:30:05 +00001379 struct sk_buff *skb = alloc_skb(size, priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 if (skb) {
1381 skb_set_owner_w(skb, sk);
1382 return skb;
1383 }
1384 }
1385 return NULL;
1386}
Eric Dumazet2a915252009-05-27 11:30:05 +00001387EXPORT_SYMBOL(sock_wmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
1389/*
1390 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001391 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001392struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001393 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394{
1395 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1396 struct sk_buff *skb = alloc_skb(size, priority);
1397 if (skb) {
1398 skb_set_owner_r(skb, sk);
1399 return skb;
1400 }
1401 }
1402 return NULL;
1403}
1404
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001405/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001407 */
Al Virodd0fc662005-10-07 07:46:04 +01001408void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409{
1410 if ((unsigned)size <= sysctl_optmem_max &&
1411 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1412 void *mem;
1413 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001414 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 */
1416 atomic_add(size, &sk->sk_omem_alloc);
1417 mem = kmalloc(size, priority);
1418 if (mem)
1419 return mem;
1420 atomic_sub(size, &sk->sk_omem_alloc);
1421 }
1422 return NULL;
1423}
Eric Dumazet2a915252009-05-27 11:30:05 +00001424EXPORT_SYMBOL(sock_kmalloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425
1426/*
1427 * Free an option memory block.
1428 */
1429void sock_kfree_s(struct sock *sk, void *mem, int size)
1430{
1431 kfree(mem);
1432 atomic_sub(size, &sk->sk_omem_alloc);
1433}
Eric Dumazet2a915252009-05-27 11:30:05 +00001434EXPORT_SYMBOL(sock_kfree_s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
1436/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1437 I think, these locks should be removed for datagram sockets.
1438 */
Eric Dumazet2a915252009-05-27 11:30:05 +00001439static long sock_wait_for_wmem(struct sock *sk, long timeo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440{
1441 DEFINE_WAIT(wait);
1442
1443 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1444 for (;;) {
1445 if (!timeo)
1446 break;
1447 if (signal_pending(current))
1448 break;
1449 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001450 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1452 break;
1453 if (sk->sk_shutdown & SEND_SHUTDOWN)
1454 break;
1455 if (sk->sk_err)
1456 break;
1457 timeo = schedule_timeout(timeo);
1458 }
Eric Dumazetaa395142010-04-20 13:03:51 +00001459 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 return timeo;
1461}
1462
1463
1464/*
1465 * Generic send/receive buffer handlers
1466 */
1467
Herbert Xu4cc7f682009-02-04 16:55:54 -08001468struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1469 unsigned long data_len, int noblock,
1470 int *errcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471{
1472 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001473 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 long timeo;
1475 int err;
1476
1477 gfp_mask = sk->sk_allocation;
1478 if (gfp_mask & __GFP_WAIT)
1479 gfp_mask |= __GFP_REPEAT;
1480
1481 timeo = sock_sndtimeo(sk, noblock);
1482 while (1) {
1483 err = sock_error(sk);
1484 if (err != 0)
1485 goto failure;
1486
1487 err = -EPIPE;
1488 if (sk->sk_shutdown & SEND_SHUTDOWN)
1489 goto failure;
1490
1491 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001492 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 if (skb) {
1494 int npages;
1495 int i;
1496
1497 /* No pages, we're done... */
1498 if (!data_len)
1499 break;
1500
1501 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1502 skb->truesize += data_len;
1503 skb_shinfo(skb)->nr_frags = npages;
1504 for (i = 0; i < npages; i++) {
1505 struct page *page;
1506 skb_frag_t *frag;
1507
1508 page = alloc_pages(sk->sk_allocation, 0);
1509 if (!page) {
1510 err = -ENOBUFS;
1511 skb_shinfo(skb)->nr_frags = i;
1512 kfree_skb(skb);
1513 goto failure;
1514 }
1515
1516 frag = &skb_shinfo(skb)->frags[i];
1517 frag->page = page;
1518 frag->page_offset = 0;
1519 frag->size = (data_len >= PAGE_SIZE ?
1520 PAGE_SIZE :
1521 data_len);
1522 data_len -= PAGE_SIZE;
1523 }
1524
1525 /* Full success... */
1526 break;
1527 }
1528 err = -ENOBUFS;
1529 goto failure;
1530 }
1531 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1532 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1533 err = -EAGAIN;
1534 if (!timeo)
1535 goto failure;
1536 if (signal_pending(current))
1537 goto interrupted;
1538 timeo = sock_wait_for_wmem(sk, timeo);
1539 }
1540
1541 skb_set_owner_w(skb, sk);
1542 return skb;
1543
1544interrupted:
1545 err = sock_intr_errno(timeo);
1546failure:
1547 *errcode = err;
1548 return NULL;
1549}
Herbert Xu4cc7f682009-02-04 16:55:54 -08001550EXPORT_SYMBOL(sock_alloc_send_pskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001552struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 int noblock, int *errcode)
1554{
1555 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1556}
Eric Dumazet2a915252009-05-27 11:30:05 +00001557EXPORT_SYMBOL(sock_alloc_send_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
1559static void __lock_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001560 __releases(&sk->sk_lock.slock)
1561 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562{
1563 DEFINE_WAIT(wait);
1564
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001565 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1567 TASK_UNINTERRUPTIBLE);
1568 spin_unlock_bh(&sk->sk_lock.slock);
1569 schedule();
1570 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001571 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 break;
1573 }
1574 finish_wait(&sk->sk_lock.wq, &wait);
1575}
1576
1577static void __release_sock(struct sock *sk)
Namhyung Kimf39234d2010-09-08 03:48:48 +00001578 __releases(&sk->sk_lock.slock)
1579 __acquires(&sk->sk_lock.slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580{
1581 struct sk_buff *skb = sk->sk_backlog.head;
1582
1583 do {
1584 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1585 bh_unlock_sock(sk);
1586
1587 do {
1588 struct sk_buff *next = skb->next;
1589
Eric Dumazet7fee2262010-05-11 23:19:48 +00001590 WARN_ON_ONCE(skb_dst_is_noref(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001592 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593
1594 /*
1595 * We are in process context here with softirqs
1596 * disabled, use cond_resched_softirq() to preempt.
1597 * This is safe to do because we've taken the backlog
1598 * queue private:
1599 */
1600 cond_resched_softirq();
1601
1602 skb = next;
1603 } while (skb != NULL);
1604
1605 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001606 } while ((skb = sk->sk_backlog.head) != NULL);
Zhu Yi8eae9392010-03-04 18:01:40 +00001607
1608 /*
1609 * Doing the zeroing here guarantee we can not loop forever
1610 * while a wild producer attempts to flood us.
1611 */
1612 sk->sk_backlog.len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613}
1614
1615/**
1616 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001617 * @sk: sock to wait on
1618 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 *
1620 * Now socket state including sk->sk_err is changed only under lock,
1621 * hence we may omit checks after joining wait queue.
1622 * We check receive queue before schedule() only as optimization;
1623 * it is very likely that release_sock() added new data.
1624 */
1625int sk_wait_data(struct sock *sk, long *timeo)
1626{
1627 int rc;
1628 DEFINE_WAIT(wait);
1629
Eric Dumazetaa395142010-04-20 13:03:51 +00001630 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1632 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1633 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
Eric Dumazetaa395142010-04-20 13:03:51 +00001634 finish_wait(sk_sleep(sk), &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 return rc;
1636}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637EXPORT_SYMBOL(sk_wait_data);
1638
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001639/**
1640 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1641 * @sk: socket
1642 * @size: memory size to allocate
1643 * @kind: allocation type
1644 *
1645 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1646 * rmem allocation. This function assumes that protocols which have
1647 * memory_pressure use sk_wmem_queued as write buffer accounting.
1648 */
1649int __sk_mem_schedule(struct sock *sk, int size, int kind)
1650{
1651 struct proto *prot = sk->sk_prot;
1652 int amt = sk_mem_pages(size);
1653 int allocated;
1654
1655 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1656 allocated = atomic_add_return(amt, prot->memory_allocated);
1657
1658 /* Under limit. */
1659 if (allocated <= prot->sysctl_mem[0]) {
1660 if (prot->memory_pressure && *prot->memory_pressure)
1661 *prot->memory_pressure = 0;
1662 return 1;
1663 }
1664
1665 /* Under pressure. */
1666 if (allocated > prot->sysctl_mem[1])
1667 if (prot->enter_memory_pressure)
Pavel Emelyanov5c52ba12008-07-16 20:28:10 -07001668 prot->enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001669
1670 /* Over hard limit. */
1671 if (allocated > prot->sysctl_mem[2])
1672 goto suppress_allocation;
1673
1674 /* guarantee minimum buffer size under pressure */
1675 if (kind == SK_MEM_RECV) {
1676 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1677 return 1;
1678 } else { /* SK_MEM_SEND */
1679 if (sk->sk_type == SOCK_STREAM) {
1680 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1681 return 1;
1682 } else if (atomic_read(&sk->sk_wmem_alloc) <
1683 prot->sysctl_wmem[0])
1684 return 1;
1685 }
1686
1687 if (prot->memory_pressure) {
Eric Dumazet17483762008-11-25 21:16:35 -08001688 int alloc;
1689
1690 if (!*prot->memory_pressure)
1691 return 1;
1692 alloc = percpu_counter_read_positive(prot->sockets_allocated);
1693 if (prot->sysctl_mem[2] > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001694 sk_mem_pages(sk->sk_wmem_queued +
1695 atomic_read(&sk->sk_rmem_alloc) +
1696 sk->sk_forward_alloc))
1697 return 1;
1698 }
1699
1700suppress_allocation:
1701
1702 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1703 sk_stream_moderate_sndbuf(sk);
1704
1705 /* Fail only if socket is _under_ its sndbuf.
1706 * In this case we cannot block, so that we have to fail.
1707 */
1708 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1709 return 1;
1710 }
1711
1712 /* Alas. Undo changes. */
1713 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1714 atomic_sub(amt, prot->memory_allocated);
1715 return 0;
1716}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001717EXPORT_SYMBOL(__sk_mem_schedule);
1718
1719/**
1720 * __sk_reclaim - reclaim memory_allocated
1721 * @sk: socket
1722 */
1723void __sk_mem_reclaim(struct sock *sk)
1724{
1725 struct proto *prot = sk->sk_prot;
1726
Eric Dumazet680a5a52007-12-31 15:00:50 -08001727 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001728 prot->memory_allocated);
1729 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1730
1731 if (prot->memory_pressure && *prot->memory_pressure &&
1732 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1733 *prot->memory_pressure = 0;
1734}
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001735EXPORT_SYMBOL(__sk_mem_reclaim);
1736
1737
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738/*
1739 * Set of default routines for initialising struct proto_ops when
1740 * the protocol does not support a particular function. In certain
1741 * cases where it makes no sense for a protocol to have a "do nothing"
1742 * function, some default processing is provided.
1743 */
1744
1745int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1746{
1747 return -EOPNOTSUPP;
1748}
Eric Dumazet2a915252009-05-27 11:30:05 +00001749EXPORT_SYMBOL(sock_no_bind);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001751int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 int len, int flags)
1753{
1754 return -EOPNOTSUPP;
1755}
Eric Dumazet2a915252009-05-27 11:30:05 +00001756EXPORT_SYMBOL(sock_no_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757
1758int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1759{
1760 return -EOPNOTSUPP;
1761}
Eric Dumazet2a915252009-05-27 11:30:05 +00001762EXPORT_SYMBOL(sock_no_socketpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
1764int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1765{
1766 return -EOPNOTSUPP;
1767}
Eric Dumazet2a915252009-05-27 11:30:05 +00001768EXPORT_SYMBOL(sock_no_accept);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001770int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 int *len, int peer)
1772{
1773 return -EOPNOTSUPP;
1774}
Eric Dumazet2a915252009-05-27 11:30:05 +00001775EXPORT_SYMBOL(sock_no_getname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
Eric Dumazet2a915252009-05-27 11:30:05 +00001777unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778{
1779 return 0;
1780}
Eric Dumazet2a915252009-05-27 11:30:05 +00001781EXPORT_SYMBOL(sock_no_poll);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
1783int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1784{
1785 return -EOPNOTSUPP;
1786}
Eric Dumazet2a915252009-05-27 11:30:05 +00001787EXPORT_SYMBOL(sock_no_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
1789int sock_no_listen(struct socket *sock, int backlog)
1790{
1791 return -EOPNOTSUPP;
1792}
Eric Dumazet2a915252009-05-27 11:30:05 +00001793EXPORT_SYMBOL(sock_no_listen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794
1795int sock_no_shutdown(struct socket *sock, int how)
1796{
1797 return -EOPNOTSUPP;
1798}
Eric Dumazet2a915252009-05-27 11:30:05 +00001799EXPORT_SYMBOL(sock_no_shutdown);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800
1801int sock_no_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07001802 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803{
1804 return -EOPNOTSUPP;
1805}
Eric Dumazet2a915252009-05-27 11:30:05 +00001806EXPORT_SYMBOL(sock_no_setsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
1808int sock_no_getsockopt(struct socket *sock, int level, int optname,
1809 char __user *optval, int __user *optlen)
1810{
1811 return -EOPNOTSUPP;
1812}
Eric Dumazet2a915252009-05-27 11:30:05 +00001813EXPORT_SYMBOL(sock_no_getsockopt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
1815int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1816 size_t len)
1817{
1818 return -EOPNOTSUPP;
1819}
Eric Dumazet2a915252009-05-27 11:30:05 +00001820EXPORT_SYMBOL(sock_no_sendmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821
1822int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1823 size_t len, int flags)
1824{
1825 return -EOPNOTSUPP;
1826}
Eric Dumazet2a915252009-05-27 11:30:05 +00001827EXPORT_SYMBOL(sock_no_recvmsg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828
1829int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1830{
1831 /* Mirror missing mmap method error code */
1832 return -ENODEV;
1833}
Eric Dumazet2a915252009-05-27 11:30:05 +00001834EXPORT_SYMBOL(sock_no_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835
1836ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1837{
1838 ssize_t res;
1839 struct msghdr msg = {.msg_flags = flags};
1840 struct kvec iov;
1841 char *kaddr = kmap(page);
1842 iov.iov_base = kaddr + offset;
1843 iov.iov_len = size;
1844 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1845 kunmap(page);
1846 return res;
1847}
Eric Dumazet2a915252009-05-27 11:30:05 +00001848EXPORT_SYMBOL(sock_no_sendpage);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
1850/*
1851 * Default Socket Callbacks
1852 */
1853
1854static void sock_def_wakeup(struct sock *sk)
1855{
Eric Dumazet43815482010-04-29 11:01:49 +00001856 struct socket_wq *wq;
1857
1858 rcu_read_lock();
1859 wq = rcu_dereference(sk->sk_wq);
1860 if (wq_has_sleeper(wq))
1861 wake_up_interruptible_all(&wq->wait);
1862 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863}
1864
1865static void sock_def_error_report(struct sock *sk)
1866{
Eric Dumazet43815482010-04-29 11:01:49 +00001867 struct socket_wq *wq;
1868
1869 rcu_read_lock();
1870 wq = rcu_dereference(sk->sk_wq);
1871 if (wq_has_sleeper(wq))
1872 wake_up_interruptible_poll(&wq->wait, POLLERR);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001873 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Eric Dumazet43815482010-04-29 11:01:49 +00001874 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875}
1876
1877static void sock_def_readable(struct sock *sk, int len)
1878{
Eric Dumazet43815482010-04-29 11:01:49 +00001879 struct socket_wq *wq;
1880
1881 rcu_read_lock();
1882 wq = rcu_dereference(sk->sk_wq);
1883 if (wq_has_sleeper(wq))
1884 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
Davide Libenzi37e55402009-03-31 15:24:21 -07001885 POLLRDNORM | POLLRDBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001886 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Eric Dumazet43815482010-04-29 11:01:49 +00001887 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888}
1889
1890static void sock_def_write_space(struct sock *sk)
1891{
Eric Dumazet43815482010-04-29 11:01:49 +00001892 struct socket_wq *wq;
1893
1894 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896 /* Do not wake up a writer until he can make "significant"
1897 * progress. --DaveM
1898 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001899 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Eric Dumazet43815482010-04-29 11:01:49 +00001900 wq = rcu_dereference(sk->sk_wq);
1901 if (wq_has_sleeper(wq))
1902 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
Davide Libenzi37e55402009-03-31 15:24:21 -07001903 POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904
1905 /* Should agree with poll, otherwise some programs break */
1906 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001907 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 }
1909
Eric Dumazet43815482010-04-29 11:01:49 +00001910 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911}
1912
1913static void sock_def_destruct(struct sock *sk)
1914{
Jesper Juhla51482b2005-11-08 09:41:34 -08001915 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916}
1917
1918void sk_send_sigurg(struct sock *sk)
1919{
1920 if (sk->sk_socket && sk->sk_socket->file)
1921 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001922 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923}
Eric Dumazet2a915252009-05-27 11:30:05 +00001924EXPORT_SYMBOL(sk_send_sigurg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925
1926void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1927 unsigned long expires)
1928{
1929 if (!mod_timer(timer, expires))
1930 sock_hold(sk);
1931}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932EXPORT_SYMBOL(sk_reset_timer);
1933
1934void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1935{
1936 if (timer_pending(timer) && del_timer(timer))
1937 __sock_put(sk);
1938}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939EXPORT_SYMBOL(sk_stop_timer);
1940
1941void sock_init_data(struct socket *sock, struct sock *sk)
1942{
1943 skb_queue_head_init(&sk->sk_receive_queue);
1944 skb_queue_head_init(&sk->sk_write_queue);
1945 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001946#ifdef CONFIG_NET_DMA
1947 skb_queue_head_init(&sk->sk_async_wait_queue);
1948#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949
1950 sk->sk_send_head = NULL;
1951
1952 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001953
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 sk->sk_allocation = GFP_KERNEL;
1955 sk->sk_rcvbuf = sysctl_rmem_default;
1956 sk->sk_sndbuf = sysctl_wmem_default;
1957 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07001958 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
1960 sock_set_flag(sk, SOCK_ZAPPED);
1961
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001962 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 sk->sk_type = sock->type;
Eric Dumazet43815482010-04-29 11:01:49 +00001964 sk->sk_wq = sock->wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 sock->sk = sk;
1966 } else
Eric Dumazet43815482010-04-29 11:01:49 +00001967 sk->sk_wq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
Eric Dumazetb6c67122010-04-08 23:03:29 +00001969 spin_lock_init(&sk->sk_dst_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001971 lockdep_set_class_and_name(&sk->sk_callback_lock,
1972 af_callback_keys + sk->sk_family,
1973 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974
1975 sk->sk_state_change = sock_def_wakeup;
1976 sk->sk_data_ready = sock_def_readable;
1977 sk->sk_write_space = sock_def_write_space;
1978 sk->sk_error_report = sock_def_error_report;
1979 sk->sk_destruct = sock_def_destruct;
1980
1981 sk->sk_sndmsg_page = NULL;
1982 sk->sk_sndmsg_off = 0;
1983
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001984 sk->sk_peer_pid = NULL;
1985 sk->sk_peer_cred = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 sk->sk_write_pending = 0;
1987 sk->sk_rcvlowat = 1;
1988 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1989 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1990
Eric Dumazetf37f0af2008-04-13 21:39:26 -07001991 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
Eric Dumazet4dc6dc72009-07-15 23:13:10 +00001993 /*
1994 * Before updating sk_refcnt, we must commit prior changes to memory
1995 * (Documentation/RCU/rculist_nulls.txt for details)
1996 */
1997 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08001999 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000}
Eric Dumazet2a915252009-05-27 11:30:05 +00002001EXPORT_SYMBOL(sock_init_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002003void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004{
2005 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002006 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02002007 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002009 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002010 spin_unlock(&sk->sk_lock.slock);
2011 /*
2012 * The sk_lock has mutex_lock() semantics here:
2013 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002014 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002015 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016}
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08002017EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002019void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002021 /*
2022 * The sk_lock has mutex_unlock() semantics:
2023 */
2024 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2025
2026 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 if (sk->sk_backlog.tail)
2028 __release_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02002029 sk->sk_lock.owned = 0;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07002030 if (waitqueue_active(&sk->sk_lock.wq))
2031 wake_up(&sk->sk_lock.wq);
2032 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033}
2034EXPORT_SYMBOL(release_sock);
2035
Eric Dumazet8a74ad62010-05-26 19:20:18 +00002036/**
2037 * lock_sock_fast - fast version of lock_sock
2038 * @sk: socket
2039 *
2040 * This version should be used for very small section, where process wont block
2041 * return false if fast path is taken
2042 * sk_lock.slock locked, owned = 0, BH disabled
2043 * return true if slow path is taken
2044 * sk_lock.slock unlocked, owned = 1, BH enabled
2045 */
2046bool lock_sock_fast(struct sock *sk)
2047{
2048 might_sleep();
2049 spin_lock_bh(&sk->sk_lock.slock);
2050
2051 if (!sk->sk_lock.owned)
2052 /*
2053 * Note : We must disable BH
2054 */
2055 return false;
2056
2057 __lock_sock(sk);
2058 sk->sk_lock.owned = 1;
2059 spin_unlock(&sk->sk_lock.slock);
2060 /*
2061 * The sk_lock has mutex_lock() semantics here:
2062 */
2063 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2064 local_bh_enable();
2065 return true;
2066}
2067EXPORT_SYMBOL(lock_sock_fast);
2068
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002070{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002071 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002073 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002074 tv = ktime_to_timeval(sk->sk_stamp);
2075 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002077 if (tv.tv_sec == 0) {
2078 sk->sk_stamp = ktime_get_real();
2079 tv = ktime_to_timeval(sk->sk_stamp);
2080 }
2081 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002082}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083EXPORT_SYMBOL(sock_get_timestamp);
2084
Eric Dumazetae40eb12007-03-18 17:33:16 -07002085int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2086{
2087 struct timespec ts;
2088 if (!sock_flag(sk, SOCK_TIMESTAMP))
Patrick Ohly20d49472009-02-12 05:03:38 +00002089 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
Eric Dumazetae40eb12007-03-18 17:33:16 -07002090 ts = ktime_to_timespec(sk->sk_stamp);
2091 if (ts.tv_sec == -1)
2092 return -ENOENT;
2093 if (ts.tv_sec == 0) {
2094 sk->sk_stamp = ktime_get_real();
2095 ts = ktime_to_timespec(sk->sk_stamp);
2096 }
2097 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2098}
2099EXPORT_SYMBOL(sock_get_timestampns);
2100
Patrick Ohly20d49472009-02-12 05:03:38 +00002101void sock_enable_timestamp(struct sock *sk, int flag)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002102{
Patrick Ohly20d49472009-02-12 05:03:38 +00002103 if (!sock_flag(sk, flag)) {
2104 sock_set_flag(sk, flag);
2105 /*
2106 * we just set one of the two flags which require net
2107 * time stamping, but time stamping might have been on
2108 * already because of the other one
2109 */
2110 if (!sock_flag(sk,
2111 flag == SOCK_TIMESTAMP ?
2112 SOCK_TIMESTAMPING_RX_SOFTWARE :
2113 SOCK_TIMESTAMP))
2114 net_enable_timestamp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 }
2116}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
2118/*
2119 * Get a socket option on an socket.
2120 *
2121 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2122 * asynchronous errors should be reported by getsockopt. We assume
2123 * this means if you specify SO_ERROR (otherwise whats the point of it).
2124 */
2125int sock_common_getsockopt(struct socket *sock, int level, int optname,
2126 char __user *optval, int __user *optlen)
2127{
2128 struct sock *sk = sock->sk;
2129
2130 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2131}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132EXPORT_SYMBOL(sock_common_getsockopt);
2133
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002134#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002135int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2136 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002137{
2138 struct sock *sk = sock->sk;
2139
Johannes Berg1e51f952007-03-06 13:44:06 -08002140 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002141 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2142 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002143 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2144}
2145EXPORT_SYMBOL(compat_sock_common_getsockopt);
2146#endif
2147
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2149 struct msghdr *msg, size_t size, int flags)
2150{
2151 struct sock *sk = sock->sk;
2152 int addr_len = 0;
2153 int err;
2154
2155 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2156 flags & ~MSG_DONTWAIT, &addr_len);
2157 if (err >= 0)
2158 msg->msg_namelen = addr_len;
2159 return err;
2160}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161EXPORT_SYMBOL(sock_common_recvmsg);
2162
2163/*
2164 * Set socket options on an inet socket.
2165 */
2166int sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002167 char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168{
2169 struct sock *sk = sock->sk;
2170
2171 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2172}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173EXPORT_SYMBOL(sock_common_setsockopt);
2174
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002175#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002176int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07002177 char __user *optval, unsigned int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002178{
2179 struct sock *sk = sock->sk;
2180
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002181 if (sk->sk_prot->compat_setsockopt != NULL)
2182 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2183 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002184 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2185}
2186EXPORT_SYMBOL(compat_sock_common_setsockopt);
2187#endif
2188
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189void sk_common_release(struct sock *sk)
2190{
2191 if (sk->sk_prot->destroy)
2192 sk->sk_prot->destroy(sk);
2193
2194 /*
2195 * Observation: when sock_common_release is called, processes have
2196 * no access to socket. But net still has.
2197 * Step one, detach it from networking:
2198 *
2199 * A. Remove from hash tables.
2200 */
2201
2202 sk->sk_prot->unhash(sk);
2203
2204 /*
2205 * In this point socket cannot receive new packets, but it is possible
2206 * that some packets are in flight because some CPU runs receiver and
2207 * did hash table lookup before we unhashed socket. They will achieve
2208 * receive queue and will be purged by socket destructor.
2209 *
2210 * Also we still have packets pending on receive queue and probably,
2211 * our own packets waiting in device queues. sock_destroy will drain
2212 * receive queue, but transmitted packets will delay socket destruction
2213 * until the last reference will be released.
2214 */
2215
2216 sock_orphan(sk);
2217
2218 xfrm_sk_free_policy(sk);
2219
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07002220 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 sock_put(sk);
2222}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223EXPORT_SYMBOL(sk_common_release);
2224
2225static DEFINE_RWLOCK(proto_list_lock);
2226static LIST_HEAD(proto_list);
2227
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002228#ifdef CONFIG_PROC_FS
2229#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002230struct prot_inuse {
2231 int val[PROTO_INUSE_NR];
2232};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002233
2234static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002235
2236#ifdef CONFIG_NET_NS
2237void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2238{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002239 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002240}
2241EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2242
2243int sock_prot_inuse_get(struct net *net, struct proto *prot)
2244{
2245 int cpu, idx = prot->inuse_idx;
2246 int res = 0;
2247
2248 for_each_possible_cpu(cpu)
2249 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2250
2251 return res >= 0 ? res : 0;
2252}
2253EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2254
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002255static int __net_init sock_inuse_init_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002256{
2257 net->core.inuse = alloc_percpu(struct prot_inuse);
2258 return net->core.inuse ? 0 : -ENOMEM;
2259}
2260
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002261static void __net_exit sock_inuse_exit_net(struct net *net)
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002262{
2263 free_percpu(net->core.inuse);
2264}
2265
2266static struct pernet_operations net_inuse_ops = {
2267 .init = sock_inuse_init_net,
2268 .exit = sock_inuse_exit_net,
2269};
2270
2271static __init int net_inuse_init(void)
2272{
2273 if (register_pernet_subsys(&net_inuse_ops))
2274 panic("Cannot initialize net inuse counters");
2275
2276 return 0;
2277}
2278
2279core_initcall(net_inuse_init);
2280#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002281static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2282
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002283void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002284{
Eric Dumazetd6d9ca02010-07-19 10:48:49 +00002285 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002286}
2287EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2288
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002289int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002290{
2291 int cpu, idx = prot->inuse_idx;
2292 int res = 0;
2293
2294 for_each_possible_cpu(cpu)
2295 res += per_cpu(prot_inuse, cpu).val[idx];
2296
2297 return res >= 0 ? res : 0;
2298}
2299EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002300#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002301
2302static void assign_proto_idx(struct proto *prot)
2303{
2304 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2305
2306 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2307 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2308 return;
2309 }
2310
2311 set_bit(prot->inuse_idx, proto_inuse_idx);
2312}
2313
2314static void release_proto_idx(struct proto *prot)
2315{
2316 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2317 clear_bit(prot->inuse_idx, proto_inuse_idx);
2318}
2319#else
2320static inline void assign_proto_idx(struct proto *prot)
2321{
2322}
2323
2324static inline void release_proto_idx(struct proto *prot)
2325{
2326}
2327#endif
2328
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329int proto_register(struct proto *prot, int alloc_slab)
2330{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 if (alloc_slab) {
2332 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002333 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2334 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335
2336 if (prot->slab == NULL) {
2337 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2338 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002339 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002341
2342 if (prot->rsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002343 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002344 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002345 goto out_free_sock_slab;
2346
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002347 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002348 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002349 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002350
2351 if (prot->rsk_prot->slab == NULL) {
2352 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2353 prot->name);
2354 goto out_free_request_sock_slab_name;
2355 }
2356 }
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002357
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002358 if (prot->twsk_prot != NULL) {
Alexey Dobriyanfaf23422010-02-17 09:34:12 +00002359 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002360
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002361 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002362 goto out_free_request_sock_slab;
2363
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002364 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002365 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002366 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002367 0,
2368 SLAB_HWCACHE_ALIGN |
2369 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002370 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002371 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002372 goto out_free_timewait_sock_slab_name;
2373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 }
2375
Arnaldo Carvalho de Melo2a278052005-04-16 15:24:09 -07002376 write_lock(&proto_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002378 assign_proto_idx(prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 write_unlock(&proto_list_lock);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002380 return 0;
2381
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002382out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002383 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002384out_free_request_sock_slab:
2385 if (prot->rsk_prot && prot->rsk_prot->slab) {
2386 kmem_cache_destroy(prot->rsk_prot->slab);
2387 prot->rsk_prot->slab = NULL;
2388 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002389out_free_request_sock_slab_name:
Dan Carpenter72150e92010-03-06 01:04:45 +00002390 if (prot->rsk_prot)
2391 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002392out_free_sock_slab:
2393 kmem_cache_destroy(prot->slab);
2394 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002395out:
2396 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398EXPORT_SYMBOL(proto_register);
2399
2400void proto_unregister(struct proto *prot)
2401{
2402 write_lock(&proto_list_lock);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002403 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002404 list_del(&prot->node);
2405 write_unlock(&proto_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406
2407 if (prot->slab != NULL) {
2408 kmem_cache_destroy(prot->slab);
2409 prot->slab = NULL;
2410 }
2411
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002412 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002413 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002414 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002415 prot->rsk_prot->slab = NULL;
2416 }
2417
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002418 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002419 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002420 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002421 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002422 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424EXPORT_SYMBOL(proto_unregister);
2425
2426#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002428 __acquires(proto_list_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429{
2430 read_lock(&proto_list_lock);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002431 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432}
2433
2434static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2435{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002436 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437}
2438
2439static void proto_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002440 __releases(proto_list_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441{
2442 read_unlock(&proto_list_lock);
2443}
2444
2445static char proto_method_implemented(const void *method)
2446{
2447 return method == NULL ? 'n' : 'y';
2448}
2449
2450static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2451{
2452 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
2453 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2454 proto->name,
2455 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002456 sock_prot_inuse_get(seq_file_net(seq), proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
2458 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2459 proto->max_header,
2460 proto->slab == NULL ? "no" : "yes",
2461 module_name(proto->owner),
2462 proto_method_implemented(proto->close),
2463 proto_method_implemented(proto->connect),
2464 proto_method_implemented(proto->disconnect),
2465 proto_method_implemented(proto->accept),
2466 proto_method_implemented(proto->ioctl),
2467 proto_method_implemented(proto->init),
2468 proto_method_implemented(proto->destroy),
2469 proto_method_implemented(proto->shutdown),
2470 proto_method_implemented(proto->setsockopt),
2471 proto_method_implemented(proto->getsockopt),
2472 proto_method_implemented(proto->sendmsg),
2473 proto_method_implemented(proto->recvmsg),
2474 proto_method_implemented(proto->sendpage),
2475 proto_method_implemented(proto->bind),
2476 proto_method_implemented(proto->backlog_rcv),
2477 proto_method_implemented(proto->hash),
2478 proto_method_implemented(proto->unhash),
2479 proto_method_implemented(proto->get_port),
2480 proto_method_implemented(proto->enter_memory_pressure));
2481}
2482
2483static int proto_seq_show(struct seq_file *seq, void *v)
2484{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002485 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2487 "protocol",
2488 "size",
2489 "sockets",
2490 "memory",
2491 "press",
2492 "maxhdr",
2493 "slab",
2494 "module",
2495 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2496 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002497 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 return 0;
2499}
2500
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002501static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 .start = proto_seq_start,
2503 .next = proto_seq_next,
2504 .stop = proto_seq_stop,
2505 .show = proto_seq_show,
2506};
2507
2508static int proto_seq_open(struct inode *inode, struct file *file)
2509{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002510 return seq_open_net(inode, file, &proto_seq_ops,
2511 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512}
2513
Arjan van de Ven9a321442007-02-12 00:55:35 -08002514static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 .owner = THIS_MODULE,
2516 .open = proto_seq_open,
2517 .read = seq_read,
2518 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002519 .release = seq_release_net,
2520};
2521
2522static __net_init int proto_init_net(struct net *net)
2523{
2524 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2525 return -ENOMEM;
2526
2527 return 0;
2528}
2529
2530static __net_exit void proto_exit_net(struct net *net)
2531{
2532 proc_net_remove(net, "protocols");
2533}
2534
2535
2536static __net_initdata struct pernet_operations proto_net_ops = {
2537 .init = proto_init_net,
2538 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539};
2540
2541static int __init proto_init(void)
2542{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002543 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544}
2545
2546subsys_initcall(proto_init);
2547
2548#endif /* PROC_FS */