blob: 6f2e1337975de8e09b079339f2c2819caddfa8ea [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090035 * code. The ACK stuff can wait and needs major
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
Randy Dunlap4fc268d2006-01-11 12:17:47 -080092#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <linux/errno.h>
94#include <linux/types.h>
95#include <linux/socket.h>
96#include <linux/in.h>
97#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <linux/module.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/sched.h>
102#include <linux/timer.h>
103#include <linux/string.h>
104#include <linux/sockios.h>
105#include <linux/net.h>
106#include <linux/mm.h>
107#include <linux/slab.h>
108#include <linux/interrupt.h>
109#include <linux/poll.h>
110#include <linux/tcp.h>
111#include <linux/init.h>
Al Viroa1f8e7f2006-10-19 16:08:53 -0400112#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114#include <asm/uaccess.h>
115#include <asm/system.h>
116
117#include <linux/netdevice.h>
118#include <net/protocol.h>
119#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200120#include <net/net_namespace.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700121#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122#include <net/sock.h>
123#include <net/xfrm.h>
124#include <linux/ipsec.h>
125
126#include <linux/filter.h>
127
128#ifdef CONFIG_INET
129#include <net/tcp.h>
130#endif
131
Ingo Molnarda21f242006-07-03 00:25:12 -0700132/*
133 * Each address family might have different locking rules, so we have
134 * one slock key per address family:
135 */
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700136static struct lock_class_key af_family_keys[AF_MAX];
137static struct lock_class_key af_family_slock_keys[AF_MAX];
138
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700139/*
140 * Make lock validator output more readable. (we pre-construct these
141 * strings build-time, so that runtime initialization of socket
142 * locks is fast):
143 */
144static const char *af_family_key_strings[AF_MAX+1] = {
145 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
146 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
147 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
148 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
149 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
150 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
151 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
152 "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
153 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800154 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700155 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700156 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
157 "sk_lock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700158};
159static const char *af_family_slock_key_strings[AF_MAX+1] = {
160 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
161 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
162 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
163 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
164 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
165 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
166 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
167 "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" ,
168 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
Oliver Hartkoppcd05acf2007-12-16 15:59:24 -0800169 "slock-27" , "slock-28" , "slock-AF_CAN" ,
David Howells17926a72007-04-26 15:48:28 -0700170 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700171 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
172 "slock-AF_MAX"
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700173};
Peter Zijlstra443aef02007-07-19 01:49:00 -0700174static const char *af_family_clock_key_strings[AF_MAX+1] = {
175 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
176 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
177 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
178 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
179 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
180 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
181 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
182 "clock-21" , "clock-AF_SNA" , "clock-AF_IRDA" ,
183 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
Oliver Hartkoppb4942af2008-07-23 14:06:04 -0700184 "clock-27" , "clock-28" , "clock-AF_CAN" ,
David Howellse51f8022007-07-21 19:30:16 -0700185 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
Remi Denis-Courmontbce7b152008-09-22 19:51:15 -0700186 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
187 "clock-AF_MAX"
Peter Zijlstra443aef02007-07-19 01:49:00 -0700188};
Ingo Molnarda21f242006-07-03 00:25:12 -0700189
190/*
191 * sk_callback_lock locking rules are per-address-family,
192 * so split the lock classes by using a per-AF key:
193 */
194static struct lock_class_key af_callback_keys[AF_MAX];
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196/* Take into consideration the size of the struct sk_buff overhead in the
197 * determination of these values, since that is non-constant across
198 * platforms. This makes socket queueing behavior and performance
199 * not depend upon such differences.
200 */
201#define _SK_MEM_PACKETS 256
202#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
203#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
204#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
205
206/* Run time adjustable parameters. */
Brian Haleyab32ea52006-09-22 14:15:41 -0700207__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
208__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
209__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
210__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212/* Maximal space eaten by iovec or ancilliary data plus some space */
Brian Haleyab32ea52006-09-22 14:15:41 -0700213int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
216{
217 struct timeval tv;
218
219 if (optlen < sizeof(tv))
220 return -EINVAL;
221 if (copy_from_user(&tv, optval, sizeof(tv)))
222 return -EFAULT;
Vasily Averinba780732007-05-24 16:58:54 -0700223 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
224 return -EDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
Vasily Averinba780732007-05-24 16:58:54 -0700226 if (tv.tv_sec < 0) {
Andrew Morton6f11df82007-07-09 13:16:00 -0700227 static int warned __read_mostly;
228
Vasily Averinba780732007-05-24 16:58:54 -0700229 *timeo_p = 0;
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700230 if (warned < 10 && net_ratelimit()) {
Vasily Averinba780732007-05-24 16:58:54 -0700231 warned++;
232 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
233 "tries to set negative timeout\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700234 current->comm, task_pid_nr(current));
Ilpo Järvinen50aab542008-05-02 16:20:10 -0700235 }
Vasily Averinba780732007-05-24 16:58:54 -0700236 return 0;
237 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 *timeo_p = MAX_SCHEDULE_TIMEOUT;
239 if (tv.tv_sec == 0 && tv.tv_usec == 0)
240 return 0;
241 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
242 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
243 return 0;
244}
245
246static void sock_warn_obsolete_bsdism(const char *name)
247{
248 static int warned;
249 static char warncomm[TASK_COMM_LEN];
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900250 if (strcmp(warncomm, current->comm) && warned < 5) {
251 strcpy(warncomm, current->comm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 printk(KERN_WARNING "process `%s' is using obsolete "
253 "%s SO_BSDCOMPAT\n", warncomm, name);
254 warned++;
255 }
256}
257
258static void sock_disable_timestamp(struct sock *sk)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900259{
260 if (sock_flag(sk, SOCK_TIMESTAMP)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 sock_reset_flag(sk, SOCK_TIMESTAMP);
262 net_disable_timestamp();
263 }
264}
265
266
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800267int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
268{
269 int err = 0;
270 int skb_len;
271
Rami Rosen9ee6b7f2008-05-14 03:50:03 -0700272 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800273 number of warnings when compiling with -W --ANK
274 */
275 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
276 (unsigned)sk->sk_rcvbuf) {
277 err = -ENOMEM;
278 goto out;
279 }
280
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700281 err = sk_filter(sk, skb);
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800282 if (err)
283 goto out;
284
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800285 if (!sk_rmem_schedule(sk, skb->truesize)) {
286 err = -ENOBUFS;
287 goto out;
288 }
289
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800290 skb->dev = NULL;
291 skb_set_owner_r(skb, sk);
David S. Miller49ad9592008-12-17 22:11:38 -0800292
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800293 /* Cache the SKB length before we tack it onto the receive
294 * queue. Once it is added it no longer belongs to us and
295 * may be freed by other threads of control pulling packets
296 * from the queue.
297 */
298 skb_len = skb->len;
299
300 skb_queue_tail(&sk->sk_receive_queue, skb);
301
302 if (!sock_flag(sk, SOCK_DEAD))
303 sk->sk_data_ready(sk, skb_len);
304out:
305 return err;
306}
307EXPORT_SYMBOL(sock_queue_rcv_skb);
308
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200309int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800310{
311 int rc = NET_RX_SUCCESS;
312
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700313 if (sk_filter(sk, skb))
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800314 goto discard_and_relse;
315
316 skb->dev = NULL;
317
Arnaldo Carvalho de Melo58a5a7b2006-11-16 14:06:06 -0200318 if (nested)
319 bh_lock_sock_nested(sk);
320 else
321 bh_lock_sock(sk);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700322 if (!sock_owned_by_user(sk)) {
323 /*
324 * trylock + unlock semantics:
325 */
326 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
327
Peter Zijlstrac57943a2008-10-07 14:18:42 -0700328 rc = sk_backlog_rcv(sk, skb);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700329
330 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
331 } else
Denis Vlasenkof0088a52006-03-28 01:08:21 -0800332 sk_add_backlog(sk, skb);
333 bh_unlock_sock(sk);
334out:
335 sock_put(sk);
336 return rc;
337discard_and_relse:
338 kfree_skb(skb);
339 goto out;
340}
341EXPORT_SYMBOL(sk_receive_skb);
342
343struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
344{
345 struct dst_entry *dst = sk->sk_dst_cache;
346
347 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
348 sk->sk_dst_cache = NULL;
349 dst_release(dst);
350 return NULL;
351 }
352
353 return dst;
354}
355EXPORT_SYMBOL(__sk_dst_check);
356
357struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
358{
359 struct dst_entry *dst = sk_dst_get(sk);
360
361 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
362 sk_dst_reset(sk);
363 dst_release(dst);
364 return NULL;
365 }
366
367 return dst;
368}
369EXPORT_SYMBOL(sk_dst_check);
370
David S. Miller48788092007-09-14 16:41:03 -0700371static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
372{
373 int ret = -ENOPROTOOPT;
374#ifdef CONFIG_NETDEVICES
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900375 struct net *net = sock_net(sk);
David S. Miller48788092007-09-14 16:41:03 -0700376 char devname[IFNAMSIZ];
377 int index;
378
379 /* Sorry... */
380 ret = -EPERM;
381 if (!capable(CAP_NET_RAW))
382 goto out;
383
384 ret = -EINVAL;
385 if (optlen < 0)
386 goto out;
387
388 /* Bind this socket to a particular device like "eth0",
389 * as specified in the passed interface name. If the
390 * name is "" or the option length is zero the socket
391 * is not bound.
392 */
393 if (optlen > IFNAMSIZ - 1)
394 optlen = IFNAMSIZ - 1;
395 memset(devname, 0, sizeof(devname));
396
397 ret = -EFAULT;
398 if (copy_from_user(devname, optval, optlen))
399 goto out;
400
401 if (devname[0] == '\0') {
402 index = 0;
403 } else {
Eric W. Biederman881d9662007-09-17 11:56:21 -0700404 struct net_device *dev = dev_get_by_name(net, devname);
David S. Miller48788092007-09-14 16:41:03 -0700405
406 ret = -ENODEV;
407 if (!dev)
408 goto out;
409
410 index = dev->ifindex;
411 dev_put(dev);
412 }
413
414 lock_sock(sk);
415 sk->sk_bound_dev_if = index;
416 sk_dst_reset(sk);
417 release_sock(sk);
418
419 ret = 0;
420
421out:
422#endif
423
424 return ret;
425}
426
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800427static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
428{
429 if (valbool)
430 sock_set_flag(sk, bit);
431 else
432 sock_reset_flag(sk, bit);
433}
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435/*
436 * This is meant for all protocols to use and covers goings on
437 * at the socket level. Everything here is generic.
438 */
439
440int sock_setsockopt(struct socket *sock, int level, int optname,
441 char __user *optval, int optlen)
442{
443 struct sock *sk=sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 int val;
445 int valbool;
446 struct linger ling;
447 int ret = 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 /*
450 * Options without arguments
451 */
452
David S. Miller48788092007-09-14 16:41:03 -0700453 if (optname == SO_BINDTODEVICE)
454 return sock_bindtodevice(sk, optval, optlen);
455
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700456 if (optlen < sizeof(int))
457 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900458
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 if (get_user(val, (int __user *)optval))
460 return -EFAULT;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900461
462 valbool = val?1:0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464 lock_sock(sk);
465
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700466 switch(optname) {
467 case SO_DEBUG:
468 if (val && !capable(CAP_NET_ADMIN)) {
469 ret = -EACCES;
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800470 } else
471 sock_valbool_flag(sk, SOCK_DBG, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700472 break;
473 case SO_REUSEADDR:
474 sk->sk_reuse = valbool;
475 break;
476 case SO_TYPE:
477 case SO_ERROR:
478 ret = -ENOPROTOOPT;
479 break;
480 case SO_DONTROUTE:
Pavel Emelyanovc0ef8772007-11-15 03:03:19 -0800481 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700482 break;
483 case SO_BROADCAST:
484 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
485 break;
486 case SO_SNDBUF:
487 /* Don't error on this BSD doesn't and if you think
488 about it this is right. Otherwise apps have to
489 play 'guess the biggest size' games. RCVBUF/SNDBUF
490 are treated in BSD as hints */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900491
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700492 if (val > sysctl_wmem_max)
493 val = sysctl_wmem_max;
Patrick McHardyb0573de2005-08-09 19:30:51 -0700494set_sndbuf:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700495 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
496 if ((val * 2) < SOCK_MIN_SNDBUF)
497 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
498 else
499 sk->sk_sndbuf = val * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700501 /*
502 * Wake up sending tasks if we
503 * upped the value.
504 */
505 sk->sk_write_space(sk);
506 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700508 case SO_SNDBUFFORCE:
509 if (!capable(CAP_NET_ADMIN)) {
510 ret = -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 break;
512 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700513 goto set_sndbuf;
514
515 case SO_RCVBUF:
516 /* Don't error on this BSD doesn't and if you think
517 about it this is right. Otherwise apps have to
518 play 'guess the biggest size' games. RCVBUF/SNDBUF
519 are treated in BSD as hints */
520
521 if (val > sysctl_rmem_max)
522 val = sysctl_rmem_max;
523set_rcvbuf:
524 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
525 /*
526 * We double it on the way in to account for
527 * "struct sk_buff" etc. overhead. Applications
528 * assume that the SO_RCVBUF setting they make will
529 * allow that much actual data to be received on that
530 * socket.
531 *
532 * Applications are unaware that "struct sk_buff" and
533 * other overheads allocate from the receive buffer
534 * during socket buffer allocation.
535 *
536 * And after considering the possible alternatives,
537 * returning the value we actually used in getsockopt
538 * is the most desirable behavior.
539 */
540 if ((val * 2) < SOCK_MIN_RCVBUF)
541 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
542 else
543 sk->sk_rcvbuf = val * 2;
544 break;
545
546 case SO_RCVBUFFORCE:
547 if (!capable(CAP_NET_ADMIN)) {
548 ret = -EPERM;
549 break;
550 }
551 goto set_rcvbuf;
552
553 case SO_KEEPALIVE:
554#ifdef CONFIG_INET
555 if (sk->sk_protocol == IPPROTO_TCP)
556 tcp_set_keepalive(sk, valbool);
557#endif
558 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
559 break;
560
561 case SO_OOBINLINE:
562 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
563 break;
564
565 case SO_NO_CHECK:
566 sk->sk_no_check = valbool;
567 break;
568
569 case SO_PRIORITY:
570 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
571 sk->sk_priority = val;
572 else
573 ret = -EPERM;
574 break;
575
576 case SO_LINGER:
577 if (optlen < sizeof(ling)) {
578 ret = -EINVAL; /* 1003.1g */
579 break;
580 }
581 if (copy_from_user(&ling,optval,sizeof(ling))) {
582 ret = -EFAULT;
583 break;
584 }
585 if (!ling.l_onoff)
586 sock_reset_flag(sk, SOCK_LINGER);
587 else {
588#if (BITS_PER_LONG == 32)
589 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
590 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
591 else
592#endif
593 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
594 sock_set_flag(sk, SOCK_LINGER);
595 }
596 break;
597
598 case SO_BSDCOMPAT:
599 sock_warn_obsolete_bsdism("setsockopt");
600 break;
601
602 case SO_PASSCRED:
603 if (valbool)
604 set_bit(SOCK_PASSCRED, &sock->flags);
605 else
606 clear_bit(SOCK_PASSCRED, &sock->flags);
607 break;
608
609 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700610 case SO_TIMESTAMPNS:
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700611 if (valbool) {
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700612 if (optname == SO_TIMESTAMP)
613 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
614 else
615 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700616 sock_set_flag(sk, SOCK_RCVTSTAMP);
617 sock_enable_timestamp(sk);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700618 } else {
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700619 sock_reset_flag(sk, SOCK_RCVTSTAMP);
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700620 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
621 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700622 break;
623
624 case SO_RCVLOWAT:
625 if (val < 0)
626 val = INT_MAX;
627 sk->sk_rcvlowat = val ? : 1;
628 break;
629
630 case SO_RCVTIMEO:
631 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
632 break;
633
634 case SO_SNDTIMEO:
635 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
636 break;
637
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700638 case SO_ATTACH_FILTER:
639 ret = -EINVAL;
640 if (optlen == sizeof(struct sock_fprog)) {
641 struct sock_fprog fprog;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700643 ret = -EFAULT;
644 if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700647 ret = sk_attach_filter(&fprog, sk);
648 }
649 break;
650
651 case SO_DETACH_FILTER:
Pavel Emelyanov55b33322007-10-17 21:21:26 -0700652 ret = sk_detach_filter(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700653 break;
654
655 case SO_PASSSEC:
656 if (valbool)
657 set_bit(SOCK_PASSSEC, &sock->flags);
658 else
659 clear_bit(SOCK_PASSSEC, &sock->flags);
660 break;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800661 case SO_MARK:
662 if (!capable(CAP_NET_ADMIN))
663 ret = -EPERM;
664 else {
665 sk->sk_mark = val;
666 }
667 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700668
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 /* We implement the SO_SNDLOWAT etc to
670 not be settable (1003.1g 5.3) */
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700671 default:
672 ret = -ENOPROTOOPT;
673 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900674 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 release_sock(sk);
676 return ret;
677}
678
679
680int sock_getsockopt(struct socket *sock, int level, int optname,
681 char __user *optval, int __user *optlen)
682{
683 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900684
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700685 union {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900686 int val;
687 struct linger ling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 struct timeval tm;
689 } v;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900690
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 unsigned int lv = sizeof(int);
692 int len;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900693
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700694 if (get_user(len, optlen))
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900695 return -EFAULT;
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700696 if (len < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 return -EINVAL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900698
Clément Lecignedf0bca02009-02-12 16:59:09 -0800699 v.val = 0;
700
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700701 switch(optname) {
702 case SO_DEBUG:
703 v.val = sock_flag(sk, SOCK_DBG);
704 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900705
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700706 case SO_DONTROUTE:
707 v.val = sock_flag(sk, SOCK_LOCALROUTE);
708 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900709
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700710 case SO_BROADCAST:
711 v.val = !!sock_flag(sk, SOCK_BROADCAST);
712 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700714 case SO_SNDBUF:
715 v.val = sk->sk_sndbuf;
716 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900717
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700718 case SO_RCVBUF:
719 v.val = sk->sk_rcvbuf;
720 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700722 case SO_REUSEADDR:
723 v.val = sk->sk_reuse;
724 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700726 case SO_KEEPALIVE:
727 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
728 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700730 case SO_TYPE:
731 v.val = sk->sk_type;
732 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700734 case SO_ERROR:
735 v.val = -sock_error(sk);
736 if (v.val==0)
737 v.val = xchg(&sk->sk_err_soft, 0);
738 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700740 case SO_OOBINLINE:
741 v.val = !!sock_flag(sk, SOCK_URGINLINE);
742 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900743
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700744 case SO_NO_CHECK:
745 v.val = sk->sk_no_check;
746 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700748 case SO_PRIORITY:
749 v.val = sk->sk_priority;
750 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900751
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700752 case SO_LINGER:
753 lv = sizeof(v.ling);
754 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
755 v.ling.l_linger = sk->sk_lingertime / HZ;
756 break;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900757
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700758 case SO_BSDCOMPAT:
759 sock_warn_obsolete_bsdism("getsockopt");
760 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700762 case SO_TIMESTAMP:
Eric Dumazet92f37fd2007-03-25 22:14:49 -0700763 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
764 !sock_flag(sk, SOCK_RCVTSTAMPNS);
765 break;
766
767 case SO_TIMESTAMPNS:
768 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700769 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700771 case SO_RCVTIMEO:
772 lv=sizeof(struct timeval);
773 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
774 v.tm.tv_sec = 0;
775 v.tm.tv_usec = 0;
776 } else {
777 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
778 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700780 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700782 case SO_SNDTIMEO:
783 lv=sizeof(struct timeval);
784 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
785 v.tm.tv_sec = 0;
786 v.tm.tv_usec = 0;
787 } else {
788 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
789 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
790 }
791 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700793 case SO_RCVLOWAT:
794 v.val = sk->sk_rcvlowat;
795 break;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700796
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700797 case SO_SNDLOWAT:
798 v.val=1;
799 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700801 case SO_PASSCRED:
802 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
803 break;
804
805 case SO_PEERCRED:
806 if (len > sizeof(sk->sk_peercred))
807 len = sizeof(sk->sk_peercred);
808 if (copy_to_user(optval, &sk->sk_peercred, len))
809 return -EFAULT;
810 goto lenout;
811
812 case SO_PEERNAME:
813 {
814 char address[128];
815
816 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
817 return -ENOTCONN;
818 if (lv < len)
819 return -EINVAL;
820 if (copy_to_user(optval, address, len))
821 return -EFAULT;
822 goto lenout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 }
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700824
825 /* Dubious BSD thing... Probably nobody even uses it, but
826 * the UNIX standard wants it for whatever reason... -DaveM
827 */
828 case SO_ACCEPTCONN:
829 v.val = sk->sk_state == TCP_LISTEN;
830 break;
831
832 case SO_PASSSEC:
833 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
834 break;
835
836 case SO_PEERSEC:
837 return security_socket_getpeersec_stream(sock, optval, optlen, len);
838
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800839 case SO_MARK:
840 v.val = sk->sk_mark;
841 break;
842
Stephen Hemmingere71a4782007-04-10 20:10:33 -0700843 default:
844 return -ENOPROTOOPT;
845 }
846
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 if (len > lv)
848 len = lv;
849 if (copy_to_user(optval, &v, len))
850 return -EFAULT;
851lenout:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900852 if (put_user(len, optlen))
853 return -EFAULT;
854 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855}
856
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700857/*
858 * Initialize an sk_lock.
859 *
860 * (We also register the sk_lock with the lock validator.)
861 */
Dave Jonesb6f99a22007-03-22 12:27:49 -0700862static inline void sock_lock_init(struct sock *sk)
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700863{
Peter Zijlstraed075362006-12-06 20:35:24 -0800864 sock_lock_init_class_and_name(sk,
865 af_family_slock_key_strings[sk->sk_family],
866 af_family_slock_keys + sk->sk_family,
867 af_family_key_strings[sk->sk_family],
868 af_family_keys + sk->sk_family);
Ingo Molnara5b5bb92006-07-03 00:25:35 -0700869}
870
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -0700871static void sock_copy(struct sock *nsk, const struct sock *osk)
872{
873#ifdef CONFIG_SECURITY_NETWORK
874 void *sptr = nsk->sk_security;
875#endif
876
877 memcpy(nsk, osk, osk->sk_prot->obj_size);
Pavel Emelyanovf1a6c4d2007-11-01 00:29:45 -0700878#ifdef CONFIG_SECURITY_NETWORK
879 nsk->sk_security = sptr;
880 security_sk_clone(osk, nsk);
881#endif
882}
883
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700884static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
885 int family)
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -0700886{
887 struct sock *sk;
888 struct kmem_cache *slab;
889
890 slab = prot->slab;
891 if (slab != NULL)
892 sk = kmem_cache_alloc(slab, priority);
893 else
894 sk = kmalloc(prot->obj_size, priority);
895
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700896 if (sk != NULL) {
897 if (security_sk_alloc(sk, family, priority))
898 goto out_free;
899
900 if (!try_module_get(prot->owner))
901 goto out_free_sec;
902 }
903
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -0700904 return sk;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700905
906out_free_sec:
907 security_sk_free(sk);
908out_free:
909 if (slab != NULL)
910 kmem_cache_free(slab, sk);
911 else
912 kfree(sk);
913 return NULL;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -0700914}
915
916static void sk_prot_free(struct proto *prot, struct sock *sk)
917{
918 struct kmem_cache *slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700919 struct module *owner;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -0700920
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700921 owner = prot->owner;
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -0700922 slab = prot->slab;
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700923
924 security_sk_free(sk);
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -0700925 if (slab != NULL)
926 kmem_cache_free(slab, sk);
927 else
928 kfree(sk);
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700929 module_put(owner);
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -0700930}
931
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932/**
933 * sk_alloc - All socket objects are allocated here
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700934 * @net: the applicable net namespace
Pavel Pisa4dc3b162005-05-01 08:59:25 -0700935 * @family: protocol family
936 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
937 * @prot: struct proto associated with this new sock instance
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 */
Eric W. Biederman1b8d7ae2007-10-08 23:24:22 -0700939struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
Pavel Emelyanov6257ff22007-11-01 00:39:31 -0700940 struct proto *prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941{
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -0700942 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
Pavel Emelyanov154adbc2007-11-01 00:38:43 -0700944 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 if (sk) {
Pavel Emelyanov154adbc2007-11-01 00:38:43 -0700946 sk->sk_family = family;
947 /*
948 * See comment in struct sock definition to understand
949 * why we need sk_prot_creator -acme
950 */
951 sk->sk_prot = sk->sk_prot_creator = prot;
952 sock_lock_init(sk);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900953 sock_net_set(sk, get_net(net));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 }
Frank Filza79af592005-09-27 15:23:38 -0700955
Pavel Emelyanov2e4afe72007-11-01 00:36:26 -0700956 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957}
958
959void sk_free(struct sock *sk)
960{
961 struct sk_filter *filter;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
963 if (sk->sk_destruct)
964 sk->sk_destruct(sk);
965
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700966 filter = rcu_dereference(sk->sk_filter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 if (filter) {
Pavel Emelyanov309dd5f2007-10-17 21:21:51 -0700968 sk_filter_uncharge(sk, filter);
Dmitry Mishinfda9ef52006-08-31 15:28:39 -0700969 rcu_assign_pointer(sk->sk_filter, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 }
971
972 sock_disable_timestamp(sk);
973
974 if (atomic_read(&sk->sk_omem_alloc))
975 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
Harvey Harrison0dc47872008-03-05 20:47:47 -0800976 __func__, atomic_read(&sk->sk_omem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900978 put_net(sock_net(sk));
Pavel Emelyanovc308c1b2007-11-01 00:33:50 -0700979 sk_prot_free(sk->sk_prot_creator, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980}
981
Denis V. Lunevedf02082008-02-29 11:18:32 -0800982/*
983 * Last sock_put should drop referrence to sk->sk_net. It has already
984 * been dropped in sk_change_net. Taking referrence to stopping namespace
985 * is not an option.
986 * Take referrence to a socket to remove it from hash _alive_ and after that
987 * destroy it in the context of init_net.
988 */
989void sk_release_kernel(struct sock *sk)
990{
991 if (sk == NULL || sk->sk_socket == NULL)
992 return;
993
994 sock_hold(sk);
995 sock_release(sk->sk_socket);
Denis V. Lunev65a18ec2008-04-16 01:59:46 -0700996 release_net(sock_net(sk));
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900997 sock_net_set(sk, get_net(&init_net));
Denis V. Lunevedf02082008-02-29 11:18:32 -0800998 sock_put(sk);
999}
David S. Miller45af1752008-02-29 11:33:19 -08001000EXPORT_SYMBOL(sk_release_kernel);
Denis V. Lunevedf02082008-02-29 11:18:32 -08001001
Al Virodd0fc662005-10-07 07:46:04 +01001002struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001003{
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001004 struct sock *newsk;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001005
Pavel Emelyanov8fd1d172007-11-01 00:37:32 -07001006 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001007 if (newsk != NULL) {
1008 struct sk_filter *filter;
1009
Venkat Yekkirala892c1412006-08-04 23:08:56 -07001010 sock_copy(newsk, sk);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001011
1012 /* SANITY */
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001013 get_net(sock_net(newsk));
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001014 sk_node_init(&newsk->sk_node);
1015 sock_lock_init(newsk);
1016 bh_lock_sock(newsk);
Eric Dumazetfa438cc2007-03-04 16:05:44 -08001017 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001018
1019 atomic_set(&newsk->sk_rmem_alloc, 0);
1020 atomic_set(&newsk->sk_wmem_alloc, 0);
1021 atomic_set(&newsk->sk_omem_alloc, 0);
1022 skb_queue_head_init(&newsk->sk_receive_queue);
1023 skb_queue_head_init(&newsk->sk_write_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001024#ifdef CONFIG_NET_DMA
1025 skb_queue_head_init(&newsk->sk_async_wait_queue);
1026#endif
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001027
1028 rwlock_init(&newsk->sk_dst_lock);
1029 rwlock_init(&newsk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001030 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1031 af_callback_keys + newsk->sk_family,
1032 af_family_clock_key_strings[newsk->sk_family]);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001033
1034 newsk->sk_dst_cache = NULL;
1035 newsk->sk_wmem_queued = 0;
1036 newsk->sk_forward_alloc = 0;
1037 newsk->sk_send_head = NULL;
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001038 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1039
1040 sock_reset_flag(newsk, SOCK_DONE);
1041 skb_queue_head_init(&newsk->sk_error_queue);
1042
1043 filter = newsk->sk_filter;
1044 if (filter != NULL)
1045 sk_filter_charge(newsk, filter);
1046
1047 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1048 /* It is still raw copy of parent, so invalidate
1049 * destructor and make plain sk_free() */
1050 newsk->sk_destruct = NULL;
1051 sk_free(newsk);
1052 newsk = NULL;
1053 goto out;
1054 }
1055
1056 newsk->sk_err = 0;
1057 newsk->sk_priority = 0;
1058 atomic_set(&newsk->sk_refcnt, 2);
1059
1060 /*
1061 * Increment the counter in the same struct proto as the master
1062 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1063 * is the same as sk->sk_prot->socks, as this field was copied
1064 * with memcpy).
1065 *
1066 * This _changes_ the previous behaviour, where
1067 * tcp_create_openreq_child always was incrementing the
1068 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1069 * to be taken into account in all callers. -acme
1070 */
1071 sk_refcnt_debug_inc(newsk);
David S. Miller972692e2008-06-17 22:41:38 -07001072 sk_set_socket(newsk, NULL);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001073 newsk->sk_sleep = NULL;
1074
1075 if (newsk->sk_prot->sockets_allocated)
Eric Dumazet17483762008-11-25 21:16:35 -08001076 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
Arnaldo Carvalho de Melo87d11ce2005-08-09 20:10:12 -07001077 }
1078out:
1079 return newsk;
1080}
1081
1082EXPORT_SYMBOL_GPL(sk_clone);
1083
Andi Kleen99580892007-04-20 17:12:43 -07001084void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1085{
1086 __sk_dst_set(sk, dst);
1087 sk->sk_route_caps = dst->dev->features;
1088 if (sk->sk_route_caps & NETIF_F_GSO)
Herbert Xu4fcd6b92007-05-31 22:15:50 -07001089 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
Andi Kleen99580892007-04-20 17:12:43 -07001090 if (sk_can_gso(sk)) {
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001091 if (dst->header_len) {
Andi Kleen99580892007-04-20 17:12:43 -07001092 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001093 } else {
Andi Kleen99580892007-04-20 17:12:43 -07001094 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07001095 sk->sk_gso_max_size = dst->dev->gso_max_size;
1096 }
Andi Kleen99580892007-04-20 17:12:43 -07001097 }
1098}
1099EXPORT_SYMBOL_GPL(sk_setup_caps);
1100
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101void __init sk_init(void)
1102{
1103 if (num_physpages <= 4096) {
1104 sysctl_wmem_max = 32767;
1105 sysctl_rmem_max = 32767;
1106 sysctl_wmem_default = 32767;
1107 sysctl_rmem_default = 32767;
1108 } else if (num_physpages >= 131072) {
1109 sysctl_wmem_max = 131071;
1110 sysctl_rmem_max = 131071;
1111 }
1112}
1113
1114/*
1115 * Simple resource managers for sockets.
1116 */
1117
1118
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001119/*
1120 * Write buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 */
1122void sock_wfree(struct sk_buff *skb)
1123{
1124 struct sock *sk = skb->sk;
1125
1126 /* In case it might be waiting for more memory. */
1127 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1128 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
1129 sk->sk_write_space(sk);
1130 sock_put(sk);
1131}
1132
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001133/*
1134 * Read buffer destructor automatically called from kfree_skb.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 */
1136void sock_rfree(struct sk_buff *skb)
1137{
1138 struct sock *sk = skb->sk;
1139
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001140 skb_truesize_check(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001142 sk_mem_uncharge(skb->sk, skb->truesize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143}
1144
1145
1146int sock_i_uid(struct sock *sk)
1147{
1148 int uid;
1149
1150 read_lock(&sk->sk_callback_lock);
1151 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1152 read_unlock(&sk->sk_callback_lock);
1153 return uid;
1154}
1155
1156unsigned long sock_i_ino(struct sock *sk)
1157{
1158 unsigned long ino;
1159
1160 read_lock(&sk->sk_callback_lock);
1161 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1162 read_unlock(&sk->sk_callback_lock);
1163 return ino;
1164}
1165
1166/*
1167 * Allocate a skb from the socket's send buffer.
1168 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001169struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001170 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171{
1172 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1173 struct sk_buff * skb = alloc_skb(size, priority);
1174 if (skb) {
1175 skb_set_owner_w(skb, sk);
1176 return skb;
1177 }
1178 }
1179 return NULL;
1180}
1181
1182/*
1183 * Allocate a skb from the socket's receive buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001184 */
Victor Fusco86a76ca2005-07-08 14:57:47 -07001185struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
Al Virodd0fc662005-10-07 07:46:04 +01001186 gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187{
1188 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1189 struct sk_buff *skb = alloc_skb(size, priority);
1190 if (skb) {
1191 skb_set_owner_r(skb, sk);
1192 return skb;
1193 }
1194 }
1195 return NULL;
1196}
1197
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001198/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 * Allocate a memory block from the socket's option memory buffer.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001200 */
Al Virodd0fc662005-10-07 07:46:04 +01001201void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202{
1203 if ((unsigned)size <= sysctl_optmem_max &&
1204 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1205 void *mem;
1206 /* First do the add, to avoid the race if kmalloc
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001207 * might sleep.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 */
1209 atomic_add(size, &sk->sk_omem_alloc);
1210 mem = kmalloc(size, priority);
1211 if (mem)
1212 return mem;
1213 atomic_sub(size, &sk->sk_omem_alloc);
1214 }
1215 return NULL;
1216}
1217
1218/*
1219 * Free an option memory block.
1220 */
1221void sock_kfree_s(struct sock *sk, void *mem, int size)
1222{
1223 kfree(mem);
1224 atomic_sub(size, &sk->sk_omem_alloc);
1225}
1226
1227/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1228 I think, these locks should be removed for datagram sockets.
1229 */
1230static long sock_wait_for_wmem(struct sock * sk, long timeo)
1231{
1232 DEFINE_WAIT(wait);
1233
1234 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1235 for (;;) {
1236 if (!timeo)
1237 break;
1238 if (signal_pending(current))
1239 break;
1240 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1241 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1242 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1243 break;
1244 if (sk->sk_shutdown & SEND_SHUTDOWN)
1245 break;
1246 if (sk->sk_err)
1247 break;
1248 timeo = schedule_timeout(timeo);
1249 }
1250 finish_wait(sk->sk_sleep, &wait);
1251 return timeo;
1252}
1253
1254
1255/*
1256 * Generic send/receive buffer handlers
1257 */
1258
1259static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
1260 unsigned long header_len,
1261 unsigned long data_len,
1262 int noblock, int *errcode)
1263{
1264 struct sk_buff *skb;
Al Viro7d877f32005-10-21 03:20:43 -04001265 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 long timeo;
1267 int err;
1268
1269 gfp_mask = sk->sk_allocation;
1270 if (gfp_mask & __GFP_WAIT)
1271 gfp_mask |= __GFP_REPEAT;
1272
1273 timeo = sock_sndtimeo(sk, noblock);
1274 while (1) {
1275 err = sock_error(sk);
1276 if (err != 0)
1277 goto failure;
1278
1279 err = -EPIPE;
1280 if (sk->sk_shutdown & SEND_SHUTDOWN)
1281 goto failure;
1282
1283 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
Larry Woodmandb38c1792006-11-03 16:05:45 -08001284 skb = alloc_skb(header_len, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 if (skb) {
1286 int npages;
1287 int i;
1288
1289 /* No pages, we're done... */
1290 if (!data_len)
1291 break;
1292
1293 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1294 skb->truesize += data_len;
1295 skb_shinfo(skb)->nr_frags = npages;
1296 for (i = 0; i < npages; i++) {
1297 struct page *page;
1298 skb_frag_t *frag;
1299
1300 page = alloc_pages(sk->sk_allocation, 0);
1301 if (!page) {
1302 err = -ENOBUFS;
1303 skb_shinfo(skb)->nr_frags = i;
1304 kfree_skb(skb);
1305 goto failure;
1306 }
1307
1308 frag = &skb_shinfo(skb)->frags[i];
1309 frag->page = page;
1310 frag->page_offset = 0;
1311 frag->size = (data_len >= PAGE_SIZE ?
1312 PAGE_SIZE :
1313 data_len);
1314 data_len -= PAGE_SIZE;
1315 }
1316
1317 /* Full success... */
1318 break;
1319 }
1320 err = -ENOBUFS;
1321 goto failure;
1322 }
1323 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1324 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1325 err = -EAGAIN;
1326 if (!timeo)
1327 goto failure;
1328 if (signal_pending(current))
1329 goto interrupted;
1330 timeo = sock_wait_for_wmem(sk, timeo);
1331 }
1332
1333 skb_set_owner_w(skb, sk);
1334 return skb;
1335
1336interrupted:
1337 err = sock_intr_errno(timeo);
1338failure:
1339 *errcode = err;
1340 return NULL;
1341}
1342
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001343struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 int noblock, int *errcode)
1345{
1346 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1347}
1348
1349static void __lock_sock(struct sock *sk)
1350{
1351 DEFINE_WAIT(wait);
1352
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001353 for (;;) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1355 TASK_UNINTERRUPTIBLE);
1356 spin_unlock_bh(&sk->sk_lock.slock);
1357 schedule();
1358 spin_lock_bh(&sk->sk_lock.slock);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001359 if (!sock_owned_by_user(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 break;
1361 }
1362 finish_wait(&sk->sk_lock.wq, &wait);
1363}
1364
1365static void __release_sock(struct sock *sk)
1366{
1367 struct sk_buff *skb = sk->sk_backlog.head;
1368
1369 do {
1370 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1371 bh_unlock_sock(sk);
1372
1373 do {
1374 struct sk_buff *next = skb->next;
1375
1376 skb->next = NULL;
Peter Zijlstrac57943a2008-10-07 14:18:42 -07001377 sk_backlog_rcv(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
1379 /*
1380 * We are in process context here with softirqs
1381 * disabled, use cond_resched_softirq() to preempt.
1382 * This is safe to do because we've taken the backlog
1383 * queue private:
1384 */
1385 cond_resched_softirq();
1386
1387 skb = next;
1388 } while (skb != NULL);
1389
1390 bh_lock_sock(sk);
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001391 } while ((skb = sk->sk_backlog.head) != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392}
1393
1394/**
1395 * sk_wait_data - wait for data to arrive at sk_receive_queue
Pavel Pisa4dc3b162005-05-01 08:59:25 -07001396 * @sk: sock to wait on
1397 * @timeo: for how long
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 *
1399 * Now socket state including sk->sk_err is changed only under lock,
1400 * hence we may omit checks after joining wait queue.
1401 * We check receive queue before schedule() only as optimization;
1402 * it is very likely that release_sock() added new data.
1403 */
1404int sk_wait_data(struct sock *sk, long *timeo)
1405{
1406 int rc;
1407 DEFINE_WAIT(wait);
1408
1409 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1410 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1411 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1412 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1413 finish_wait(sk->sk_sleep, &wait);
1414 return rc;
1415}
1416
1417EXPORT_SYMBOL(sk_wait_data);
1418
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001419/**
1420 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1421 * @sk: socket
1422 * @size: memory size to allocate
1423 * @kind: allocation type
1424 *
1425 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1426 * rmem allocation. This function assumes that protocols which have
1427 * memory_pressure use sk_wmem_queued as write buffer accounting.
1428 */
1429int __sk_mem_schedule(struct sock *sk, int size, int kind)
1430{
1431 struct proto *prot = sk->sk_prot;
1432 int amt = sk_mem_pages(size);
1433 int allocated;
1434
1435 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1436 allocated = atomic_add_return(amt, prot->memory_allocated);
1437
1438 /* Under limit. */
1439 if (allocated <= prot->sysctl_mem[0]) {
1440 if (prot->memory_pressure && *prot->memory_pressure)
1441 *prot->memory_pressure = 0;
1442 return 1;
1443 }
1444
1445 /* Under pressure. */
1446 if (allocated > prot->sysctl_mem[1])
1447 if (prot->enter_memory_pressure)
Pavel Emelyanov5c52ba12008-07-16 20:28:10 -07001448 prot->enter_memory_pressure(sk);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001449
1450 /* Over hard limit. */
1451 if (allocated > prot->sysctl_mem[2])
1452 goto suppress_allocation;
1453
1454 /* guarantee minimum buffer size under pressure */
1455 if (kind == SK_MEM_RECV) {
1456 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1457 return 1;
1458 } else { /* SK_MEM_SEND */
1459 if (sk->sk_type == SOCK_STREAM) {
1460 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1461 return 1;
1462 } else if (atomic_read(&sk->sk_wmem_alloc) <
1463 prot->sysctl_wmem[0])
1464 return 1;
1465 }
1466
1467 if (prot->memory_pressure) {
Eric Dumazet17483762008-11-25 21:16:35 -08001468 int alloc;
1469
1470 if (!*prot->memory_pressure)
1471 return 1;
1472 alloc = percpu_counter_read_positive(prot->sockets_allocated);
1473 if (prot->sysctl_mem[2] > alloc *
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001474 sk_mem_pages(sk->sk_wmem_queued +
1475 atomic_read(&sk->sk_rmem_alloc) +
1476 sk->sk_forward_alloc))
1477 return 1;
1478 }
1479
1480suppress_allocation:
1481
1482 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1483 sk_stream_moderate_sndbuf(sk);
1484
1485 /* Fail only if socket is _under_ its sndbuf.
1486 * In this case we cannot block, so that we have to fail.
1487 */
1488 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1489 return 1;
1490 }
1491
1492 /* Alas. Undo changes. */
1493 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1494 atomic_sub(amt, prot->memory_allocated);
1495 return 0;
1496}
1497
1498EXPORT_SYMBOL(__sk_mem_schedule);
1499
1500/**
1501 * __sk_reclaim - reclaim memory_allocated
1502 * @sk: socket
1503 */
1504void __sk_mem_reclaim(struct sock *sk)
1505{
1506 struct proto *prot = sk->sk_prot;
1507
Eric Dumazet680a5a52007-12-31 15:00:50 -08001508 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001509 prot->memory_allocated);
1510 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1511
1512 if (prot->memory_pressure && *prot->memory_pressure &&
1513 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1514 *prot->memory_pressure = 0;
1515}
1516
1517EXPORT_SYMBOL(__sk_mem_reclaim);
1518
1519
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520/*
1521 * Set of default routines for initialising struct proto_ops when
1522 * the protocol does not support a particular function. In certain
1523 * cases where it makes no sense for a protocol to have a "do nothing"
1524 * function, some default processing is provided.
1525 */
1526
1527int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1528{
1529 return -EOPNOTSUPP;
1530}
1531
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001532int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 int len, int flags)
1534{
1535 return -EOPNOTSUPP;
1536}
1537
1538int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1539{
1540 return -EOPNOTSUPP;
1541}
1542
1543int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1544{
1545 return -EOPNOTSUPP;
1546}
1547
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001548int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 int *len, int peer)
1550{
1551 return -EOPNOTSUPP;
1552}
1553
1554unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
1555{
1556 return 0;
1557}
1558
1559int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1560{
1561 return -EOPNOTSUPP;
1562}
1563
1564int sock_no_listen(struct socket *sock, int backlog)
1565{
1566 return -EOPNOTSUPP;
1567}
1568
1569int sock_no_shutdown(struct socket *sock, int how)
1570{
1571 return -EOPNOTSUPP;
1572}
1573
1574int sock_no_setsockopt(struct socket *sock, int level, int optname,
1575 char __user *optval, int optlen)
1576{
1577 return -EOPNOTSUPP;
1578}
1579
1580int sock_no_getsockopt(struct socket *sock, int level, int optname,
1581 char __user *optval, int __user *optlen)
1582{
1583 return -EOPNOTSUPP;
1584}
1585
1586int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1587 size_t len)
1588{
1589 return -EOPNOTSUPP;
1590}
1591
1592int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1593 size_t len, int flags)
1594{
1595 return -EOPNOTSUPP;
1596}
1597
1598int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1599{
1600 /* Mirror missing mmap method error code */
1601 return -ENODEV;
1602}
1603
1604ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1605{
1606 ssize_t res;
1607 struct msghdr msg = {.msg_flags = flags};
1608 struct kvec iov;
1609 char *kaddr = kmap(page);
1610 iov.iov_base = kaddr + offset;
1611 iov.iov_len = size;
1612 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1613 kunmap(page);
1614 return res;
1615}
1616
1617/*
1618 * Default Socket Callbacks
1619 */
1620
1621static void sock_def_wakeup(struct sock *sk)
1622{
1623 read_lock(&sk->sk_callback_lock);
1624 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1625 wake_up_interruptible_all(sk->sk_sleep);
1626 read_unlock(&sk->sk_callback_lock);
1627}
1628
1629static void sock_def_error_report(struct sock *sk)
1630{
1631 read_lock(&sk->sk_callback_lock);
1632 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1633 wake_up_interruptible(sk->sk_sleep);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001634 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 read_unlock(&sk->sk_callback_lock);
1636}
1637
1638static void sock_def_readable(struct sock *sk, int len)
1639{
1640 read_lock(&sk->sk_callback_lock);
1641 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
Ingo Molnar6f3d0922008-03-19 01:44:24 +01001642 wake_up_interruptible_sync(sk->sk_sleep);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001643 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 read_unlock(&sk->sk_callback_lock);
1645}
1646
1647static void sock_def_write_space(struct sock *sk)
1648{
1649 read_lock(&sk->sk_callback_lock);
1650
1651 /* Do not wake up a writer until he can make "significant"
1652 * progress. --DaveM
1653 */
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001654 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
Ingo Molnar6f3d0922008-03-19 01:44:24 +01001656 wake_up_interruptible_sync(sk->sk_sleep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657
1658 /* Should agree with poll, otherwise some programs break */
1659 if (sock_writeable(sk))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001660 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 }
1662
1663 read_unlock(&sk->sk_callback_lock);
1664}
1665
1666static void sock_def_destruct(struct sock *sk)
1667{
Jesper Juhla51482b2005-11-08 09:41:34 -08001668 kfree(sk->sk_protinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669}
1670
1671void sk_send_sigurg(struct sock *sk)
1672{
1673 if (sk->sk_socket && sk->sk_socket->file)
1674 if (send_sigurg(&sk->sk_socket->file->f_owner))
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +08001675 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676}
1677
1678void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1679 unsigned long expires)
1680{
1681 if (!mod_timer(timer, expires))
1682 sock_hold(sk);
1683}
1684
1685EXPORT_SYMBOL(sk_reset_timer);
1686
1687void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1688{
1689 if (timer_pending(timer) && del_timer(timer))
1690 __sock_put(sk);
1691}
1692
1693EXPORT_SYMBOL(sk_stop_timer);
1694
1695void sock_init_data(struct socket *sock, struct sock *sk)
1696{
1697 skb_queue_head_init(&sk->sk_receive_queue);
1698 skb_queue_head_init(&sk->sk_write_queue);
1699 skb_queue_head_init(&sk->sk_error_queue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001700#ifdef CONFIG_NET_DMA
1701 skb_queue_head_init(&sk->sk_async_wait_queue);
1702#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
1704 sk->sk_send_head = NULL;
1705
1706 init_timer(&sk->sk_timer);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001707
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 sk->sk_allocation = GFP_KERNEL;
1709 sk->sk_rcvbuf = sysctl_rmem_default;
1710 sk->sk_sndbuf = sysctl_wmem_default;
1711 sk->sk_state = TCP_CLOSE;
David S. Miller972692e2008-06-17 22:41:38 -07001712 sk_set_socket(sk, sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713
1714 sock_set_flag(sk, SOCK_ZAPPED);
1715
Stephen Hemmingere71a4782007-04-10 20:10:33 -07001716 if (sock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 sk->sk_type = sock->type;
1718 sk->sk_sleep = &sock->wait;
1719 sock->sk = sk;
1720 } else
1721 sk->sk_sleep = NULL;
1722
1723 rwlock_init(&sk->sk_dst_lock);
1724 rwlock_init(&sk->sk_callback_lock);
Peter Zijlstra443aef02007-07-19 01:49:00 -07001725 lockdep_set_class_and_name(&sk->sk_callback_lock,
1726 af_callback_keys + sk->sk_family,
1727 af_family_clock_key_strings[sk->sk_family]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
1729 sk->sk_state_change = sock_def_wakeup;
1730 sk->sk_data_ready = sock_def_readable;
1731 sk->sk_write_space = sock_def_write_space;
1732 sk->sk_error_report = sock_def_error_report;
1733 sk->sk_destruct = sock_def_destruct;
1734
1735 sk->sk_sndmsg_page = NULL;
1736 sk->sk_sndmsg_off = 0;
1737
1738 sk->sk_peercred.pid = 0;
1739 sk->sk_peercred.uid = -1;
1740 sk->sk_peercred.gid = -1;
1741 sk->sk_write_pending = 0;
1742 sk->sk_rcvlowat = 1;
1743 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1744 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1745
Eric Dumazetf37f0af2008-04-13 21:39:26 -07001746 sk->sk_stamp = ktime_set(-1L, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
1748 atomic_set(&sk->sk_refcnt, 1);
Wang Chen33c732c2007-11-13 20:30:01 -08001749 atomic_set(&sk->sk_drops, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750}
1751
Harvey Harrisonb5606c22008-02-13 15:03:16 -08001752void lock_sock_nested(struct sock *sk, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753{
1754 might_sleep();
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001755 spin_lock_bh(&sk->sk_lock.slock);
John Heffnerd2e91172007-09-12 10:44:19 +02001756 if (sk->sk_lock.owned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 __lock_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02001758 sk->sk_lock.owned = 1;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001759 spin_unlock(&sk->sk_lock.slock);
1760 /*
1761 * The sk_lock has mutex_lock() semantics here:
1762 */
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08001763 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001764 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765}
1766
Peter Zijlstrafcc70d52006-11-08 22:44:35 -08001767EXPORT_SYMBOL(lock_sock_nested);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
Harvey Harrisonb5606c22008-02-13 15:03:16 -08001769void release_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770{
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001771 /*
1772 * The sk_lock has mutex_unlock() semantics:
1773 */
1774 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
1775
1776 spin_lock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 if (sk->sk_backlog.tail)
1778 __release_sock(sk);
John Heffnerd2e91172007-09-12 10:44:19 +02001779 sk->sk_lock.owned = 0;
Ingo Molnara5b5bb92006-07-03 00:25:35 -07001780 if (waitqueue_active(&sk->sk_lock.wq))
1781 wake_up(&sk->sk_lock.wq);
1782 spin_unlock_bh(&sk->sk_lock.slock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783}
1784EXPORT_SYMBOL(release_sock);
1785
1786int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001787{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001788 struct timeval tv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 if (!sock_flag(sk, SOCK_TIMESTAMP))
1790 sock_enable_timestamp(sk);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001791 tv = ktime_to_timeval(sk->sk_stamp);
1792 if (tv.tv_sec == -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 return -ENOENT;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001794 if (tv.tv_sec == 0) {
1795 sk->sk_stamp = ktime_get_real();
1796 tv = ktime_to_timeval(sk->sk_stamp);
1797 }
1798 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001799}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800EXPORT_SYMBOL(sock_get_timestamp);
1801
Eric Dumazetae40eb12007-03-18 17:33:16 -07001802int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
1803{
1804 struct timespec ts;
1805 if (!sock_flag(sk, SOCK_TIMESTAMP))
1806 sock_enable_timestamp(sk);
1807 ts = ktime_to_timespec(sk->sk_stamp);
1808 if (ts.tv_sec == -1)
1809 return -ENOENT;
1810 if (ts.tv_sec == 0) {
1811 sk->sk_stamp = ktime_get_real();
1812 ts = ktime_to_timespec(sk->sk_stamp);
1813 }
1814 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
1815}
1816EXPORT_SYMBOL(sock_get_timestampns);
1817
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818void sock_enable_timestamp(struct sock *sk)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001819{
1820 if (!sock_flag(sk, SOCK_TIMESTAMP)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 sock_set_flag(sk, SOCK_TIMESTAMP);
1822 net_enable_timestamp();
1823 }
1824}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825
1826/*
1827 * Get a socket option on an socket.
1828 *
1829 * FIX: POSIX 1003.1g is very ambiguous here. It states that
1830 * asynchronous errors should be reported by getsockopt. We assume
1831 * this means if you specify SO_ERROR (otherwise whats the point of it).
1832 */
1833int sock_common_getsockopt(struct socket *sock, int level, int optname,
1834 char __user *optval, int __user *optlen)
1835{
1836 struct sock *sk = sock->sk;
1837
1838 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1839}
1840
1841EXPORT_SYMBOL(sock_common_getsockopt);
1842
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001843#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001844int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
1845 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001846{
1847 struct sock *sk = sock->sk;
1848
Johannes Berg1e51f952007-03-06 13:44:06 -08001849 if (sk->sk_prot->compat_getsockopt != NULL)
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001850 return sk->sk_prot->compat_getsockopt(sk, level, optname,
1851 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001852 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1853}
1854EXPORT_SYMBOL(compat_sock_common_getsockopt);
1855#endif
1856
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1858 struct msghdr *msg, size_t size, int flags)
1859{
1860 struct sock *sk = sock->sk;
1861 int addr_len = 0;
1862 int err;
1863
1864 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
1865 flags & ~MSG_DONTWAIT, &addr_len);
1866 if (err >= 0)
1867 msg->msg_namelen = addr_len;
1868 return err;
1869}
1870
1871EXPORT_SYMBOL(sock_common_recvmsg);
1872
1873/*
1874 * Set socket options on an inet socket.
1875 */
1876int sock_common_setsockopt(struct socket *sock, int level, int optname,
1877 char __user *optval, int optlen)
1878{
1879 struct sock *sk = sock->sk;
1880
1881 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1882}
1883
1884EXPORT_SYMBOL(sock_common_setsockopt);
1885
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001886#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001887int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
1888 char __user *optval, int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001889{
1890 struct sock *sk = sock->sk;
1891
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001892 if (sk->sk_prot->compat_setsockopt != NULL)
1893 return sk->sk_prot->compat_setsockopt(sk, level, optname,
1894 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001895 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1896}
1897EXPORT_SYMBOL(compat_sock_common_setsockopt);
1898#endif
1899
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900void sk_common_release(struct sock *sk)
1901{
1902 if (sk->sk_prot->destroy)
1903 sk->sk_prot->destroy(sk);
1904
1905 /*
1906 * Observation: when sock_common_release is called, processes have
1907 * no access to socket. But net still has.
1908 * Step one, detach it from networking:
1909 *
1910 * A. Remove from hash tables.
1911 */
1912
1913 sk->sk_prot->unhash(sk);
1914
1915 /*
1916 * In this point socket cannot receive new packets, but it is possible
1917 * that some packets are in flight because some CPU runs receiver and
1918 * did hash table lookup before we unhashed socket. They will achieve
1919 * receive queue and will be purged by socket destructor.
1920 *
1921 * Also we still have packets pending on receive queue and probably,
1922 * our own packets waiting in device queues. sock_destroy will drain
1923 * receive queue, but transmitted packets will delay socket destruction
1924 * until the last reference will be released.
1925 */
1926
1927 sock_orphan(sk);
1928
1929 xfrm_sk_free_policy(sk);
1930
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001931 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 sock_put(sk);
1933}
1934
1935EXPORT_SYMBOL(sk_common_release);
1936
1937static DEFINE_RWLOCK(proto_list_lock);
1938static LIST_HEAD(proto_list);
1939
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07001940#ifdef CONFIG_PROC_FS
1941#define PROTO_INUSE_NR 64 /* should be enough for the first time */
Pavel Emelyanov1338d462008-03-28 16:38:43 -07001942struct prot_inuse {
1943 int val[PROTO_INUSE_NR];
1944};
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07001945
1946static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07001947
1948#ifdef CONFIG_NET_NS
1949void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
1950{
1951 int cpu = smp_processor_id();
1952 per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val;
1953}
1954EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
1955
1956int sock_prot_inuse_get(struct net *net, struct proto *prot)
1957{
1958 int cpu, idx = prot->inuse_idx;
1959 int res = 0;
1960
1961 for_each_possible_cpu(cpu)
1962 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
1963
1964 return res >= 0 ? res : 0;
1965}
1966EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
1967
1968static int sock_inuse_init_net(struct net *net)
1969{
1970 net->core.inuse = alloc_percpu(struct prot_inuse);
1971 return net->core.inuse ? 0 : -ENOMEM;
1972}
1973
1974static void sock_inuse_exit_net(struct net *net)
1975{
1976 free_percpu(net->core.inuse);
1977}
1978
1979static struct pernet_operations net_inuse_ops = {
1980 .init = sock_inuse_init_net,
1981 .exit = sock_inuse_exit_net,
1982};
1983
1984static __init int net_inuse_init(void)
1985{
1986 if (register_pernet_subsys(&net_inuse_ops))
1987 panic("Cannot initialize net inuse counters");
1988
1989 return 0;
1990}
1991
1992core_initcall(net_inuse_init);
1993#else
Pavel Emelyanov1338d462008-03-28 16:38:43 -07001994static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
1995
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07001996void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07001997{
1998 __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val;
1999}
2000EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2001
Pavel Emelyanovc29a0bc2008-03-31 19:41:46 -07002002int sock_prot_inuse_get(struct net *net, struct proto *prot)
Pavel Emelyanov1338d462008-03-28 16:38:43 -07002003{
2004 int cpu, idx = prot->inuse_idx;
2005 int res = 0;
2006
2007 for_each_possible_cpu(cpu)
2008 res += per_cpu(prot_inuse, cpu).val[idx];
2009
2010 return res >= 0 ? res : 0;
2011}
2012EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
Pavel Emelyanov70ee1152008-03-31 19:42:16 -07002013#endif
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002014
2015static void assign_proto_idx(struct proto *prot)
2016{
2017 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2018
2019 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2020 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2021 return;
2022 }
2023
2024 set_bit(prot->inuse_idx, proto_inuse_idx);
2025}
2026
2027static void release_proto_idx(struct proto *prot)
2028{
2029 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2030 clear_bit(prot->inuse_idx, proto_inuse_idx);
2031}
2032#else
2033static inline void assign_proto_idx(struct proto *prot)
2034{
2035}
2036
2037static inline void release_proto_idx(struct proto *prot)
2038{
2039}
2040#endif
2041
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042int proto_register(struct proto *prot, int alloc_slab)
2043{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 if (alloc_slab) {
2045 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
Eric Dumazet271b72c2008-10-29 02:11:14 -07002046 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2047 NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048
2049 if (prot->slab == NULL) {
2050 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2051 prot->name);
Pavel Emelyanov60e76632008-03-28 16:39:10 -07002052 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002054
2055 if (prot->rsk_prot != NULL) {
2056 static const char mask[] = "request_sock_%s";
2057
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002058 prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
2059 if (prot->rsk_prot->slab_name == NULL)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002060 goto out_free_sock_slab;
2061
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002062 sprintf(prot->rsk_prot->slab_name, mask, prot->name);
2063 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002064 prot->rsk_prot->obj_size, 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002065 SLAB_HWCACHE_ALIGN, NULL);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002066
2067 if (prot->rsk_prot->slab == NULL) {
2068 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2069 prot->name);
2070 goto out_free_request_sock_slab_name;
2071 }
2072 }
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002073
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002074 if (prot->twsk_prot != NULL) {
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002075 static const char mask[] = "tw_sock_%s";
2076
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002077 prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002078
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002079 if (prot->twsk_prot->twsk_slab_name == NULL)
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002080 goto out_free_request_sock_slab;
2081
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002082 sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002083 prot->twsk_prot->twsk_slab =
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002084 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002085 prot->twsk_prot->twsk_obj_size,
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002086 0,
2087 SLAB_HWCACHE_ALIGN |
2088 prot->slab_flags,
Paul Mundt20c2df82007-07-20 10:11:58 +09002089 NULL);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002090 if (prot->twsk_prot->twsk_slab == NULL)
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002091 goto out_free_timewait_sock_slab_name;
2092 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 }
2094
Arnaldo Carvalho de Melo2a278052005-04-16 15:24:09 -07002095 write_lock(&proto_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 list_add(&prot->node, &proto_list);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002097 assign_proto_idx(prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 write_unlock(&proto_list_lock);
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002099 return 0;
2100
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002101out_free_timewait_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002102 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002103out_free_request_sock_slab:
2104 if (prot->rsk_prot && prot->rsk_prot->slab) {
2105 kmem_cache_destroy(prot->rsk_prot->slab);
2106 prot->rsk_prot->slab = NULL;
2107 }
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002108out_free_request_sock_slab_name:
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002109 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002110out_free_sock_slab:
2111 kmem_cache_destroy(prot->slab);
2112 prot->slab = NULL;
Pavel Emelyanovb733c002007-11-07 02:23:38 -08002113out:
2114 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115}
2116
2117EXPORT_SYMBOL(proto_register);
2118
2119void proto_unregister(struct proto *prot)
2120{
2121 write_lock(&proto_list_lock);
Pavel Emelyanov13ff3d62008-03-28 16:38:17 -07002122 release_proto_idx(prot);
Patrick McHardy0a3f4352005-09-06 19:47:50 -07002123 list_del(&prot->node);
2124 write_unlock(&proto_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
2126 if (prot->slab != NULL) {
2127 kmem_cache_destroy(prot->slab);
2128 prot->slab = NULL;
2129 }
2130
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002131 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002132 kmem_cache_destroy(prot->rsk_prot->slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002133 kfree(prot->rsk_prot->slab_name);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002134 prot->rsk_prot->slab = NULL;
2135 }
2136
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002137 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002138 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
Catalin Marinas7e56b5d2008-11-21 16:45:22 -08002139 kfree(prot->twsk_prot->twsk_slab_name);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002140 prot->twsk_prot->twsk_slab = NULL;
Arnaldo Carvalho de Melo8feaf0c2005-08-09 20:09:30 -07002141 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142}
2143
2144EXPORT_SYMBOL(proto_unregister);
2145
2146#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002148 __acquires(proto_list_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149{
2150 read_lock(&proto_list_lock);
Pavel Emelianov60f04382007-07-09 13:15:14 -07002151 return seq_list_start_head(&proto_list, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152}
2153
2154static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2155{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002156 return seq_list_next(v, &proto_list, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157}
2158
2159static void proto_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002160 __releases(proto_list_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161{
2162 read_unlock(&proto_list_lock);
2163}
2164
2165static char proto_method_implemented(const void *method)
2166{
2167 return method == NULL ? 'n' : 'y';
2168}
2169
2170static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2171{
2172 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
2173 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2174 proto->name,
2175 proto->obj_size,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002176 sock_prot_inuse_get(seq_file_net(seq), proto),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
2178 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2179 proto->max_header,
2180 proto->slab == NULL ? "no" : "yes",
2181 module_name(proto->owner),
2182 proto_method_implemented(proto->close),
2183 proto_method_implemented(proto->connect),
2184 proto_method_implemented(proto->disconnect),
2185 proto_method_implemented(proto->accept),
2186 proto_method_implemented(proto->ioctl),
2187 proto_method_implemented(proto->init),
2188 proto_method_implemented(proto->destroy),
2189 proto_method_implemented(proto->shutdown),
2190 proto_method_implemented(proto->setsockopt),
2191 proto_method_implemented(proto->getsockopt),
2192 proto_method_implemented(proto->sendmsg),
2193 proto_method_implemented(proto->recvmsg),
2194 proto_method_implemented(proto->sendpage),
2195 proto_method_implemented(proto->bind),
2196 proto_method_implemented(proto->backlog_rcv),
2197 proto_method_implemented(proto->hash),
2198 proto_method_implemented(proto->unhash),
2199 proto_method_implemented(proto->get_port),
2200 proto_method_implemented(proto->enter_memory_pressure));
2201}
2202
2203static int proto_seq_show(struct seq_file *seq, void *v)
2204{
Pavel Emelianov60f04382007-07-09 13:15:14 -07002205 if (v == &proto_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2207 "protocol",
2208 "size",
2209 "sockets",
2210 "memory",
2211 "press",
2212 "maxhdr",
2213 "slab",
2214 "module",
2215 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2216 else
Pavel Emelianov60f04382007-07-09 13:15:14 -07002217 proto_seq_printf(seq, list_entry(v, struct proto, node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 return 0;
2219}
2220
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002221static const struct seq_operations proto_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 .start = proto_seq_start,
2223 .next = proto_seq_next,
2224 .stop = proto_seq_stop,
2225 .show = proto_seq_show,
2226};
2227
2228static int proto_seq_open(struct inode *inode, struct file *file)
2229{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002230 return seq_open_net(inode, file, &proto_seq_ops,
2231 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232}
2233
Arjan van de Ven9a321442007-02-12 00:55:35 -08002234static const struct file_operations proto_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 .owner = THIS_MODULE,
2236 .open = proto_seq_open,
2237 .read = seq_read,
2238 .llseek = seq_lseek,
Eric Dumazet14e943d2008-11-19 15:14:01 -08002239 .release = seq_release_net,
2240};
2241
2242static __net_init int proto_init_net(struct net *net)
2243{
2244 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2245 return -ENOMEM;
2246
2247 return 0;
2248}
2249
2250static __net_exit void proto_exit_net(struct net *net)
2251{
2252 proc_net_remove(net, "protocols");
2253}
2254
2255
2256static __net_initdata struct pernet_operations proto_net_ops = {
2257 .init = proto_init_net,
2258 .exit = proto_exit_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259};
2260
2261static int __init proto_init(void)
2262{
Eric Dumazet14e943d2008-11-19 15:14:01 -08002263 return register_pernet_subsys(&proto_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264}
2265
2266subsys_initcall(proto_init);
2267
2268#endif /* PROC_FS */
2269
2270EXPORT_SYMBOL(sk_alloc);
2271EXPORT_SYMBOL(sk_free);
2272EXPORT_SYMBOL(sk_send_sigurg);
2273EXPORT_SYMBOL(sock_alloc_send_skb);
2274EXPORT_SYMBOL(sock_init_data);
2275EXPORT_SYMBOL(sock_kfree_s);
2276EXPORT_SYMBOL(sock_kmalloc);
2277EXPORT_SYMBOL(sock_no_accept);
2278EXPORT_SYMBOL(sock_no_bind);
2279EXPORT_SYMBOL(sock_no_connect);
2280EXPORT_SYMBOL(sock_no_getname);
2281EXPORT_SYMBOL(sock_no_getsockopt);
2282EXPORT_SYMBOL(sock_no_ioctl);
2283EXPORT_SYMBOL(sock_no_listen);
2284EXPORT_SYMBOL(sock_no_mmap);
2285EXPORT_SYMBOL(sock_no_poll);
2286EXPORT_SYMBOL(sock_no_recvmsg);
2287EXPORT_SYMBOL(sock_no_sendmsg);
2288EXPORT_SYMBOL(sock_no_sendpage);
2289EXPORT_SYMBOL(sock_no_setsockopt);
2290EXPORT_SYMBOL(sock_no_shutdown);
2291EXPORT_SYMBOL(sock_no_socketpair);
2292EXPORT_SYMBOL(sock_rfree);
2293EXPORT_SYMBOL(sock_setsockopt);
2294EXPORT_SYMBOL(sock_wfree);
2295EXPORT_SYMBOL(sock_wmalloc);
2296EXPORT_SYMBOL(sock_i_uid);
2297EXPORT_SYMBOL(sock_i_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298EXPORT_SYMBOL(sysctl_optmem_max);