blob: 94f658235fb49a0c644a84867302cc00a75243d5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
Alan Cox113aa832008-10-13 19:01:08 -07004 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
wangweidong5cc208b2013-12-06 18:03:36 +080083#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include <linux/signal.h>
88#include <linux/sched.h>
89#include <linux/errno.h>
90#include <linux/string.h>
91#include <linux/stat.h>
92#include <linux/dcache.h>
93#include <linux/namei.h>
94#include <linux/socket.h>
95#include <linux/un.h>
96#include <linux/fcntl.h>
97#include <linux/termios.h>
98#include <linux/sockios.h>
99#include <linux/net.h>
100#include <linux/in.h>
101#include <linux/fs.h>
102#include <linux/slab.h>
103#include <asm/uaccess.h>
104#include <linux/skbuff.h>
105#include <linux/netdevice.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +0200106#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <net/sock.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -0700108#include <net/tcp_states.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <net/af_unix.h>
110#include <linux/proc_fs.h>
111#include <linux/seq_file.h>
112#include <net/scm.h>
113#include <linux/init.h>
114#include <linux/poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <linux/rtnetlink.h>
116#include <linux/mount.h>
117#include <net/checksum.h>
118#include <linux/security.h>
Colin Cross2b15af62013-05-06 23:50:21 +0000119#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000121struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000122EXPORT_SYMBOL_GPL(unix_socket_table);
123DEFINE_SPINLOCK(unix_table_lock);
124EXPORT_SYMBOL_GPL(unix_table_lock);
Eric Dumazet518de9b2010-10-26 14:22:44 -0700125static atomic_long_t unix_nr_socks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000128static struct hlist_head *unix_sockets_unbound(void *addr)
129{
130 unsigned long hash = (unsigned long)addr;
131
132 hash ^= hash >> 16;
133 hash ^= hash >> 8;
134 hash %= UNIX_HASH_SIZE;
135 return &unix_socket_table[UNIX_HASH_SIZE + hash];
136}
137
138#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700140#ifdef CONFIG_SECURITY_NETWORK
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700141static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700142{
Stephen Smalley37a9a8d2015-06-10 08:44:59 -0400143 UNIXCB(skb).secid = scm->secid;
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700144}
145
146static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
147{
Stephen Smalley37a9a8d2015-06-10 08:44:59 -0400148 scm->secid = UNIXCB(skb).secid;
149}
150
151static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
152{
153 return (scm->secid == UNIXCB(skb).secid);
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700154}
155#else
Catherine Zhangdc49c1f2006-08-02 14:12:06 -0700156static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700157{ }
158
159static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
160{ }
Stephen Smalley37a9a8d2015-06-10 08:44:59 -0400161
162static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
163{
164 return true;
165}
Catherine Zhang877ce7c2006-06-29 12:27:47 -0700166#endif /* CONFIG_SECURITY_NETWORK */
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168/*
169 * SMP locking strategy:
David S. Millerfbe9cc42005-12-13 23:26:29 -0800170 * hash table is protected with spinlock unix_table_lock
Stephen Hemminger663717f2010-02-18 14:12:06 -0800171 * each socket state is protected by separate spin lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 */
173
Eric Dumazet95c96172012-04-15 05:58:06 +0000174static inline unsigned int unix_hash_fold(__wsum n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
Anton Blanchard0a134042014-03-05 14:29:58 +1100176 unsigned int hash = (__force unsigned int)csum_fold(n);
Eric Dumazet95c96172012-04-15 05:58:06 +0000177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 hash ^= hash>>8;
179 return hash&(UNIX_HASH_SIZE-1);
180}
181
182#define unix_peer(sk) (unix_sk(sk)->peer)
183
184static inline int unix_our_peer(struct sock *sk, struct sock *osk)
185{
186 return unix_peer(osk) == sk;
187}
188
189static inline int unix_may_send(struct sock *sk, struct sock *osk)
190{
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800191 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192}
193
Rainer Weikusat3c734192008-06-17 22:28:05 -0700194static inline int unix_recvq_full(struct sock const *sk)
195{
196 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
197}
198
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000199struct sock *unix_peer_get(struct sock *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200{
201 struct sock *peer;
202
David S. Miller1c92b4e2007-05-31 13:24:26 -0700203 unix_state_lock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 peer = unix_peer(s);
205 if (peer)
206 sock_hold(peer);
David S. Miller1c92b4e2007-05-31 13:24:26 -0700207 unix_state_unlock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 return peer;
209}
Pavel Emelyanovfa7ff562011-12-15 02:44:03 +0000210EXPORT_SYMBOL_GPL(unix_peer_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212static inline void unix_release_addr(struct unix_address *addr)
213{
214 if (atomic_dec_and_test(&addr->refcnt))
215 kfree(addr);
216}
217
218/*
219 * Check unix socket name:
220 * - should be not zero length.
221 * - if started by not zero, should be NULL terminated (FS object)
222 * - if started by zero, it is abstract name.
223 */
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900224
Eric Dumazet95c96172012-04-15 05:58:06 +0000225static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
227 if (len <= sizeof(short) || len > sizeof(*sunaddr))
228 return -EINVAL;
229 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
230 return -EINVAL;
231 if (sunaddr->sun_path[0]) {
232 /*
233 * This may look like an off by one error but it is a bit more
234 * subtle. 108 is the longest valid AF_UNIX path for a binding.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300235 * sun_path[108] doesn't as such exist. However in kernel space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 * we are guaranteed that it is a valid memory location in our
237 * kernel address buffer.
238 */
Jianjun Konge27dfce2008-11-01 21:38:31 -0700239 ((char *)sunaddr)[len] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 len = strlen(sunaddr->sun_path)+1+sizeof(short);
241 return len;
242 }
243
Joe Perches07f07572008-11-19 15:44:53 -0800244 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 return len;
246}
247
248static void __unix_remove_socket(struct sock *sk)
249{
250 sk_del_node_init(sk);
251}
252
253static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
254{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700255 WARN_ON(!sk_unhashed(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 sk_add_node(sk, list);
257}
258
259static inline void unix_remove_socket(struct sock *sk)
260{
David S. Millerfbe9cc42005-12-13 23:26:29 -0800261 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 __unix_remove_socket(sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800263 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
267{
David S. Millerfbe9cc42005-12-13 23:26:29 -0800268 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 __unix_insert_socket(list, sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800270 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271}
272
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800273static struct sock *__unix_find_socket_byname(struct net *net,
274 struct sockaddr_un *sunname,
Eric Dumazet95c96172012-04-15 05:58:06 +0000275 int len, int type, unsigned int hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276{
277 struct sock *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Sasha Levinb67bfe02013-02-27 17:06:00 -0800279 sk_for_each(s, &unix_socket_table[hash ^ type]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 struct unix_sock *u = unix_sk(s);
281
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +0900282 if (!net_eq(sock_net(s), net))
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800283 continue;
284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 if (u->addr->len == len &&
286 !memcmp(u->addr->name, sunname, len))
287 goto found;
288 }
289 s = NULL;
290found:
291 return s;
292}
293
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800294static inline struct sock *unix_find_socket_byname(struct net *net,
295 struct sockaddr_un *sunname,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 int len, int type,
Eric Dumazet95c96172012-04-15 05:58:06 +0000297 unsigned int hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298{
299 struct sock *s;
300
David S. Millerfbe9cc42005-12-13 23:26:29 -0800301 spin_lock(&unix_table_lock);
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800302 s = __unix_find_socket_byname(net, sunname, len, type, hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 if (s)
304 sock_hold(s);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800305 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 return s;
307}
308
Eric W. Biederman6616f782010-06-13 03:35:48 +0000309static struct sock *unix_find_socket_byinode(struct inode *i)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310{
311 struct sock *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
David S. Millerfbe9cc42005-12-13 23:26:29 -0800313 spin_lock(&unix_table_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800314 sk_for_each(s,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
Al Viro40ffe672012-03-14 21:54:32 -0400316 struct dentry *dentry = unix_sk(s)->path.dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
David Howellsa25b3762015-03-17 22:26:21 +0000318 if (dentry && d_backing_inode(dentry) == i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 sock_hold(s);
320 goto found;
321 }
322 }
323 s = NULL;
324found:
David S. Millerfbe9cc42005-12-13 23:26:29 -0800325 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 return s;
327}
328
329static inline int unix_writable(struct sock *sk)
330{
331 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
332}
333
334static void unix_write_space(struct sock *sk)
335{
Eric Dumazet43815482010-04-29 11:01:49 +0000336 struct socket_wq *wq;
337
338 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 if (unix_writable(sk)) {
Eric Dumazet43815482010-04-29 11:01:49 +0000340 wq = rcu_dereference(sk->sk_wq);
341 if (wq_has_sleeper(wq))
Eric Dumazet67426b72010-10-29 20:44:44 +0000342 wake_up_interruptible_sync_poll(&wq->wait,
343 POLLOUT | POLLWRNORM | POLLWRBAND);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +0800344 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 }
Eric Dumazet43815482010-04-29 11:01:49 +0000346 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}
348
349/* When dgram socket disconnects (or changes its peer), we clear its receive
350 * queue of packets arrived from previous peer. First, it allows to do
351 * flow control based only on wmem_alloc; second, sk connected to peer
352 * may receive messages only from that peer. */
353static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
354{
David S. Millerb03efcf2005-07-08 14:57:23 -0700355 if (!skb_queue_empty(&sk->sk_receive_queue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 skb_queue_purge(&sk->sk_receive_queue);
357 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
358
359 /* If one link of bidirectional dgram pipe is disconnected,
360 * we signal error. Messages are lost. Do not make this,
361 * when peer was not connected to us.
362 */
363 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
364 other->sk_err = ECONNRESET;
365 other->sk_error_report(other);
366 }
367 }
368}
369
370static void unix_sock_destructor(struct sock *sk)
371{
372 struct unix_sock *u = unix_sk(sk);
373
374 skb_queue_purge(&sk->sk_receive_queue);
375
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700376 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
377 WARN_ON(!sk_unhashed(sk));
378 WARN_ON(sk->sk_socket);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 if (!sock_flag(sk, SOCK_DEAD)) {
wangweidong5cc208b2013-12-06 18:03:36 +0800380 pr_info("Attempt to release alive unix socket: %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 return;
382 }
383
384 if (u->addr)
385 unix_release_addr(u->addr);
386
Eric Dumazet518de9b2010-10-26 14:22:44 -0700387 atomic_long_dec(&unix_nr_socks);
David S. Miller6f756a82008-11-23 17:34:03 -0800388 local_bh_disable();
Eric Dumazeta8076d82008-11-17 02:38:49 -0800389 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
David S. Miller6f756a82008-11-23 17:34:03 -0800390 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391#ifdef UNIX_REFCNT_DEBUG
wangweidong5cc208b2013-12-06 18:03:36 +0800392 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
Eric Dumazet518de9b2010-10-26 14:22:44 -0700393 atomic_long_read(&unix_nr_socks));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394#endif
395}
396
Paul Mooreded34e02013-03-25 03:18:33 +0000397static void unix_release_sock(struct sock *sk, int embrion)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
399 struct unix_sock *u = unix_sk(sk);
Al Viro40ffe672012-03-14 21:54:32 -0400400 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 struct sock *skpair;
402 struct sk_buff *skb;
403 int state;
404
405 unix_remove_socket(sk);
406
407 /* Clear state */
David S. Miller1c92b4e2007-05-31 13:24:26 -0700408 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 sock_orphan(sk);
410 sk->sk_shutdown = SHUTDOWN_MASK;
Al Viro40ffe672012-03-14 21:54:32 -0400411 path = u->path;
412 u->path.dentry = NULL;
413 u->path.mnt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 state = sk->sk_state;
415 sk->sk_state = TCP_CLOSE;
David S. Miller1c92b4e2007-05-31 13:24:26 -0700416 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
418 wake_up_interruptible_all(&u->peer_wait);
419
Jianjun Konge27dfce2008-11-01 21:38:31 -0700420 skpair = unix_peer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
Jianjun Konge27dfce2008-11-01 21:38:31 -0700422 if (skpair != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
David S. Miller1c92b4e2007-05-31 13:24:26 -0700424 unix_state_lock(skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 /* No more writes */
426 skpair->sk_shutdown = SHUTDOWN_MASK;
427 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
428 skpair->sk_err = ECONNRESET;
David S. Miller1c92b4e2007-05-31 13:24:26 -0700429 unix_state_unlock(skpair);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 skpair->sk_state_change(skpair);
Pavel Emelyanov8d8ad9d2007-11-26 20:10:50 +0800431 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 }
433 sock_put(skpair); /* It may now die */
434 unix_peer(sk) = NULL;
435 }
436
437 /* Try to flush out this socket. Throw out buffers at least */
438
439 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
Jianjun Konge27dfce2008-11-01 21:38:31 -0700440 if (state == TCP_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 unix_release_sock(skb->sk, 1);
442 /* passed fds are erased in the kfree_skb hook */
443 kfree_skb(skb);
444 }
445
Al Viro40ffe672012-03-14 21:54:32 -0400446 if (path.dentry)
447 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449 sock_put(sk);
450
451 /* ---- Socket is dead now and most probably destroyed ---- */
452
453 /*
Alan Coxe04dae82012-09-17 00:52:41 +0000454 * Fixme: BSD difference: In BSD all sockets connected to us get
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 * ECONNRESET and we die on the spot. In Linux we behave
456 * like files and pipes do and wait for the last
457 * dereference.
458 *
459 * Can't we simply set sock->err?
460 *
461 * What the above comment does talk about? --ANK(980817)
462 */
463
Pavel Emelyanov9305cfa2007-11-10 22:06:01 -0800464 if (unix_tot_inflight)
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900465 unix_gc(); /* Garbage collect fds */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466}
467
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000468static void init_peercred(struct sock *sk)
469{
470 put_pid(sk->sk_peer_pid);
471 if (sk->sk_peer_cred)
472 put_cred(sk->sk_peer_cred);
473 sk->sk_peer_pid = get_pid(task_tgid(current));
474 sk->sk_peer_cred = get_current_cred();
475}
476
477static void copy_peercred(struct sock *sk, struct sock *peersk)
478{
479 put_pid(sk->sk_peer_pid);
480 if (sk->sk_peer_cred)
481 put_cred(sk->sk_peer_cred);
482 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
483 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
484}
485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486static int unix_listen(struct socket *sock, int backlog)
487{
488 int err;
489 struct sock *sk = sock->sk;
490 struct unix_sock *u = unix_sk(sk);
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000491 struct pid *old_pid = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493 err = -EOPNOTSUPP;
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800494 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
495 goto out; /* Only stream/seqpacket sockets accept */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 err = -EINVAL;
497 if (!u->addr)
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800498 goto out; /* No listens on an unbound socket */
David S. Miller1c92b4e2007-05-31 13:24:26 -0700499 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
501 goto out_unlock;
502 if (backlog > sk->sk_max_ack_backlog)
503 wake_up_interruptible_all(&u->peer_wait);
504 sk->sk_max_ack_backlog = backlog;
505 sk->sk_state = TCP_LISTEN;
506 /* set credentials so connect can copy them */
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000507 init_peercred(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 err = 0;
509
510out_unlock:
David S. Miller1c92b4e2007-05-31 13:24:26 -0700511 unix_state_unlock(sk);
Eric W. Biederman109f6e32010-06-13 03:30:14 +0000512 put_pid(old_pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513out:
514 return err;
515}
516
517static int unix_release(struct socket *);
518static int unix_bind(struct socket *, struct sockaddr *, int);
519static int unix_stream_connect(struct socket *, struct sockaddr *,
520 int addr_len, int flags);
521static int unix_socketpair(struct socket *, struct socket *);
522static int unix_accept(struct socket *, struct socket *, int);
523static int unix_getname(struct socket *, struct sockaddr *, int *, int);
524static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
Rainer Weikusatec0d2152008-06-27 19:34:18 -0700525static unsigned int unix_dgram_poll(struct file *, struct socket *,
526 poll_table *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527static int unix_ioctl(struct socket *, unsigned int, unsigned long);
528static int unix_shutdown(struct socket *, int);
Ying Xue1b784142015-03-02 15:37:48 +0800529static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
530static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +0200531static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
532 size_t size, int flags);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +0200533static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
534 struct pipe_inode_info *, size_t size,
535 unsigned int flags);
Ying Xue1b784142015-03-02 15:37:48 +0800536static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
537static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538static int unix_dgram_connect(struct socket *, struct sockaddr *,
539 int, int);
Ying Xue1b784142015-03-02 15:37:48 +0800540static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
541static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
542 int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Sasha Levin12663bf2013-12-07 17:26:27 -0500544static int unix_set_peek_off(struct sock *sk, int val)
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000545{
546 struct unix_sock *u = unix_sk(sk);
547
Sasha Levin12663bf2013-12-07 17:26:27 -0500548 if (mutex_lock_interruptible(&u->readlock))
549 return -EINTR;
550
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000551 sk->sk_peek_off = val;
552 mutex_unlock(&u->readlock);
Sasha Levin12663bf2013-12-07 17:26:27 -0500553
554 return 0;
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000555}
556
557
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800558static const struct proto_ops unix_stream_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 .family = PF_UNIX,
560 .owner = THIS_MODULE,
561 .release = unix_release,
562 .bind = unix_bind,
563 .connect = unix_stream_connect,
564 .socketpair = unix_socketpair,
565 .accept = unix_accept,
566 .getname = unix_getname,
567 .poll = unix_poll,
568 .ioctl = unix_ioctl,
569 .listen = unix_listen,
570 .shutdown = unix_shutdown,
571 .setsockopt = sock_no_setsockopt,
572 .getsockopt = sock_no_getsockopt,
573 .sendmsg = unix_stream_sendmsg,
574 .recvmsg = unix_stream_recvmsg,
575 .mmap = sock_no_mmap,
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +0200576 .sendpage = unix_stream_sendpage,
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +0200577 .splice_read = unix_stream_splice_read,
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +0000578 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579};
580
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800581static const struct proto_ops unix_dgram_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 .family = PF_UNIX,
583 .owner = THIS_MODULE,
584 .release = unix_release,
585 .bind = unix_bind,
586 .connect = unix_dgram_connect,
587 .socketpair = unix_socketpair,
588 .accept = sock_no_accept,
589 .getname = unix_getname,
Rainer Weikusatec0d2152008-06-27 19:34:18 -0700590 .poll = unix_dgram_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 .ioctl = unix_ioctl,
592 .listen = sock_no_listen,
593 .shutdown = unix_shutdown,
594 .setsockopt = sock_no_setsockopt,
595 .getsockopt = sock_no_getsockopt,
596 .sendmsg = unix_dgram_sendmsg,
597 .recvmsg = unix_dgram_recvmsg,
598 .mmap = sock_no_mmap,
599 .sendpage = sock_no_sendpage,
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000600 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601};
602
Eric Dumazet90ddc4f2005-12-22 12:49:22 -0800603static const struct proto_ops unix_seqpacket_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 .family = PF_UNIX,
605 .owner = THIS_MODULE,
606 .release = unix_release,
607 .bind = unix_bind,
608 .connect = unix_stream_connect,
609 .socketpair = unix_socketpair,
610 .accept = unix_accept,
611 .getname = unix_getname,
Rainer Weikusatec0d2152008-06-27 19:34:18 -0700612 .poll = unix_dgram_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 .ioctl = unix_ioctl,
614 .listen = unix_listen,
615 .shutdown = unix_shutdown,
616 .setsockopt = sock_no_setsockopt,
617 .getsockopt = sock_no_getsockopt,
618 .sendmsg = unix_seqpacket_sendmsg,
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +0000619 .recvmsg = unix_seqpacket_recvmsg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 .mmap = sock_no_mmap,
621 .sendpage = sock_no_sendpage,
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +0000622 .set_peek_off = unix_set_peek_off,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623};
624
625static struct proto unix_proto = {
Eric Dumazet248969a2008-11-17 00:00:30 -0800626 .name = "UNIX",
627 .owner = THIS_MODULE,
Eric Dumazet248969a2008-11-17 00:00:30 -0800628 .obj_size = sizeof(struct unix_sock),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629};
630
Ingo Molnara09785a2006-07-03 00:25:12 -0700631/*
632 * AF_UNIX sockets do not interact with hardware, hence they
633 * dont trigger interrupts - so it's safe for them to have
634 * bh-unsafe locking for their sk_receive_queue.lock. Split off
635 * this special lock-class by reinitializing the spinlock key:
636 */
637static struct lock_class_key af_unix_sk_receive_queue_lock_key;
638
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500639static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640{
641 struct sock *sk = NULL;
642 struct unix_sock *u;
643
Eric Dumazet518de9b2010-10-26 14:22:44 -0700644 atomic_long_inc(&unix_nr_socks);
645 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 goto out;
647
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500648 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 if (!sk)
650 goto out;
651
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800652 sock_init_data(sock, sk);
Ingo Molnara09785a2006-07-03 00:25:12 -0700653 lockdep_set_class(&sk->sk_receive_queue.lock,
654 &af_unix_sk_receive_queue_lock_key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
656 sk->sk_write_space = unix_write_space;
Denis V. Luneva0a53c82007-12-11 04:19:17 -0800657 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 sk->sk_destruct = unix_sock_destructor;
659 u = unix_sk(sk);
Al Viro40ffe672012-03-14 21:54:32 -0400660 u->path.dentry = NULL;
661 u->path.mnt = NULL;
Benjamin LaHaisefd19f322006-01-03 14:10:46 -0800662 spin_lock_init(&u->lock);
Al Viro516e0cc2008-07-26 00:39:17 -0400663 atomic_long_set(&u->inflight, 0);
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700664 INIT_LIST_HEAD(&u->link);
Ingo Molnar57b47a52006-03-20 22:35:41 -0800665 mutex_init(&u->readlock); /* single task reading lock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 init_waitqueue_head(&u->peer_wait);
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000667 unix_insert_socket(unix_sockets_unbound(sk), sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668out:
Pavel Emelyanov284b3272007-11-10 22:08:30 -0800669 if (sk == NULL)
Eric Dumazet518de9b2010-10-26 14:22:44 -0700670 atomic_long_dec(&unix_nr_socks);
Eric Dumazet920de802008-11-24 00:09:29 -0800671 else {
672 local_bh_disable();
Eric Dumazeta8076d82008-11-17 02:38:49 -0800673 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
Eric Dumazet920de802008-11-24 00:09:29 -0800674 local_bh_enable();
675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 return sk;
677}
678
Eric Paris3f378b62009-11-05 22:18:14 -0800679static int unix_create(struct net *net, struct socket *sock, int protocol,
680 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681{
682 if (protocol && protocol != PF_UNIX)
683 return -EPROTONOSUPPORT;
684
685 sock->state = SS_UNCONNECTED;
686
687 switch (sock->type) {
688 case SOCK_STREAM:
689 sock->ops = &unix_stream_ops;
690 break;
691 /*
692 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
693 * nothing uses it.
694 */
695 case SOCK_RAW:
Jianjun Konge27dfce2008-11-01 21:38:31 -0700696 sock->type = SOCK_DGRAM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 case SOCK_DGRAM:
698 sock->ops = &unix_dgram_ops;
699 break;
700 case SOCK_SEQPACKET:
701 sock->ops = &unix_seqpacket_ops;
702 break;
703 default:
704 return -ESOCKTNOSUPPORT;
705 }
706
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500707 return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708}
709
710static int unix_release(struct socket *sock)
711{
712 struct sock *sk = sock->sk;
713
714 if (!sk)
715 return 0;
716
Paul Mooreded34e02013-03-25 03:18:33 +0000717 unix_release_sock(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 sock->sk = NULL;
719
Paul Mooreded34e02013-03-25 03:18:33 +0000720 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721}
722
723static int unix_autobind(struct socket *sock)
724{
725 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900726 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 struct unix_sock *u = unix_sk(sk);
728 static u32 ordernum = 1;
Eric Dumazet6eba6a32008-11-16 22:58:44 -0800729 struct unix_address *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 int err;
Tetsuo Handa8df73ff2010-09-04 01:34:28 +0000731 unsigned int retries = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
Sasha Levin37ab4fa2013-12-13 10:54:22 -0500733 err = mutex_lock_interruptible(&u->readlock);
734 if (err)
735 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
737 err = 0;
738 if (u->addr)
739 goto out;
740
741 err = -ENOMEM;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700742 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 if (!addr)
744 goto out;
745
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 addr->name->sun_family = AF_UNIX;
747 atomic_set(&addr->refcnt, 1);
748
749retry:
750 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
Joe Perches07f07572008-11-19 15:44:53 -0800751 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
David S. Millerfbe9cc42005-12-13 23:26:29 -0800753 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 ordernum = (ordernum+1)&0xFFFFF;
755
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800756 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 addr->hash)) {
David S. Millerfbe9cc42005-12-13 23:26:29 -0800758 spin_unlock(&unix_table_lock);
Tetsuo Handa8df73ff2010-09-04 01:34:28 +0000759 /*
760 * __unix_find_socket_byname() may take long time if many names
761 * are already in use.
762 */
763 cond_resched();
764 /* Give up if all names seems to be in use. */
765 if (retries++ == 0xFFFFF) {
766 err = -ENOSPC;
767 kfree(addr);
768 goto out;
769 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 goto retry;
771 }
772 addr->hash ^= sk->sk_type;
773
774 __unix_remove_socket(sk);
775 u->addr = addr;
776 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
David S. Millerfbe9cc42005-12-13 23:26:29 -0800777 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 err = 0;
779
Ingo Molnar57b47a52006-03-20 22:35:41 -0800780out: mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 return err;
782}
783
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800784static struct sock *unix_find_other(struct net *net,
785 struct sockaddr_un *sunname, int len,
Eric Dumazet95c96172012-04-15 05:58:06 +0000786 int type, unsigned int hash, int *error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787{
788 struct sock *u;
Al Viro421748e2008-08-02 01:04:36 -0400789 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 int err = 0;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +0900791
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 if (sunname->sun_path[0]) {
Al Viro421748e2008-08-02 01:04:36 -0400793 struct inode *inode;
794 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 if (err)
796 goto fail;
David Howellsa25b3762015-03-17 22:26:21 +0000797 inode = d_backing_inode(path.dentry);
Al Viro421748e2008-08-02 01:04:36 -0400798 err = inode_permission(inode, MAY_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 if (err)
800 goto put_fail;
801
802 err = -ECONNREFUSED;
Al Viro421748e2008-08-02 01:04:36 -0400803 if (!S_ISSOCK(inode->i_mode))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 goto put_fail;
Eric W. Biederman6616f782010-06-13 03:35:48 +0000805 u = unix_find_socket_byinode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 if (!u)
807 goto put_fail;
808
809 if (u->sk_type == type)
Al Viro68ac1232012-03-15 08:21:57 -0400810 touch_atime(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Al Viro421748e2008-08-02 01:04:36 -0400812 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Jianjun Konge27dfce2008-11-01 21:38:31 -0700814 err = -EPROTOTYPE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 if (u->sk_type != type) {
816 sock_put(u);
817 goto fail;
818 }
819 } else {
820 err = -ECONNREFUSED;
Jianjun Konge27dfce2008-11-01 21:38:31 -0700821 u = unix_find_socket_byname(net, sunname, len, type, hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 if (u) {
823 struct dentry *dentry;
Al Viro40ffe672012-03-14 21:54:32 -0400824 dentry = unix_sk(u)->path.dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 if (dentry)
Al Viro68ac1232012-03-15 08:21:57 -0400826 touch_atime(&unix_sk(u)->path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 } else
828 goto fail;
829 }
830 return u;
831
832put_fail:
Al Viro421748e2008-08-02 01:04:36 -0400833 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834fail:
Jianjun Konge27dfce2008-11-01 21:38:31 -0700835 *error = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 return NULL;
837}
838
Al Virofaf02012012-07-20 02:37:29 +0400839static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
840{
841 struct dentry *dentry;
842 struct path path;
843 int err = 0;
844 /*
845 * Get the parent directory, calculate the hash for last
846 * component.
847 */
848 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
849 err = PTR_ERR(dentry);
850 if (IS_ERR(dentry))
851 return err;
852
853 /*
854 * All right, let's create it.
855 */
856 err = security_path_mknod(&path, dentry, mode, 0);
857 if (!err) {
David Howellsee8ac4d2015-03-06 14:05:26 +0000858 err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
Al Virofaf02012012-07-20 02:37:29 +0400859 if (!err) {
860 res->mnt = mntget(path.mnt);
861 res->dentry = dget(dentry);
862 }
863 }
864 done_path_create(&path, dentry);
865 return err;
866}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
869{
870 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900871 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 struct unix_sock *u = unix_sk(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -0700873 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
Al Virodae6ad82011-06-26 11:50:15 -0400874 char *sun_path = sunaddr->sun_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 int err;
Eric Dumazet95c96172012-04-15 05:58:06 +0000876 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 struct unix_address *addr;
878 struct hlist_head *list;
879
880 err = -EINVAL;
881 if (sunaddr->sun_family != AF_UNIX)
882 goto out;
883
Jianjun Konge27dfce2008-11-01 21:38:31 -0700884 if (addr_len == sizeof(short)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 err = unix_autobind(sock);
886 goto out;
887 }
888
889 err = unix_mkname(sunaddr, addr_len, &hash);
890 if (err < 0)
891 goto out;
892 addr_len = err;
893
Sasha Levin37ab4fa2013-12-13 10:54:22 -0500894 err = mutex_lock_interruptible(&u->readlock);
895 if (err)
896 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
898 err = -EINVAL;
899 if (u->addr)
900 goto out_up;
901
902 err = -ENOMEM;
903 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
904 if (!addr)
905 goto out_up;
906
907 memcpy(addr->name, sunaddr, addr_len);
908 addr->len = addr_len;
909 addr->hash = hash ^ sk->sk_type;
910 atomic_set(&addr->refcnt, 1);
911
Al Virodae6ad82011-06-26 11:50:15 -0400912 if (sun_path[0]) {
Al Virofaf02012012-07-20 02:37:29 +0400913 struct path path;
914 umode_t mode = S_IFSOCK |
Al Viroce3b0f82009-03-29 19:08:22 -0400915 (SOCK_INODE(sock)->i_mode & ~current_umask());
Al Virofaf02012012-07-20 02:37:29 +0400916 err = unix_mknod(sun_path, mode, &path);
917 if (err) {
918 if (err == -EEXIST)
919 err = -EADDRINUSE;
920 unix_release_addr(addr);
921 goto out_up;
922 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 addr->hash = UNIX_HASH_SIZE;
David Howellsa25b3762015-03-17 22:26:21 +0000924 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE-1);
Al Virofaf02012012-07-20 02:37:29 +0400925 spin_lock(&unix_table_lock);
926 u->path = path;
927 list = &unix_socket_table[hash];
928 } else {
929 spin_lock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 err = -EADDRINUSE;
Denis V. Lunev097e66c2007-11-19 22:29:30 -0800931 if (__unix_find_socket_byname(net, sunaddr, addr_len,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 sk->sk_type, hash)) {
933 unix_release_addr(addr);
934 goto out_unlock;
935 }
936
937 list = &unix_socket_table[addr->hash];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 }
939
940 err = 0;
941 __unix_remove_socket(sk);
942 u->addr = addr;
943 __unix_insert_socket(list, sk);
944
945out_unlock:
David S. Millerfbe9cc42005-12-13 23:26:29 -0800946 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947out_up:
Ingo Molnar57b47a52006-03-20 22:35:41 -0800948 mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949out:
950 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
952
David S. Miller278a3de2007-05-31 15:19:20 -0700953static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
954{
955 if (unlikely(sk1 == sk2) || !sk2) {
956 unix_state_lock(sk1);
957 return;
958 }
959 if (sk1 < sk2) {
960 unix_state_lock(sk1);
961 unix_state_lock_nested(sk2);
962 } else {
963 unix_state_lock(sk2);
964 unix_state_lock_nested(sk1);
965 }
966}
967
968static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
969{
970 if (unlikely(sk1 == sk2) || !sk2) {
971 unix_state_unlock(sk1);
972 return;
973 }
974 unix_state_unlock(sk1);
975 unix_state_unlock(sk2);
976}
977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
979 int alen, int flags)
980{
981 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900982 struct net *net = sock_net(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -0700983 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 struct sock *other;
Eric Dumazet95c96172012-04-15 05:58:06 +0000985 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 int err;
987
988 if (addr->sa_family != AF_UNSPEC) {
989 err = unix_mkname(sunaddr, alen, &hash);
990 if (err < 0)
991 goto out;
992 alen = err;
993
994 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
995 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
996 goto out;
997
David S. Miller278a3de2007-05-31 15:19:20 -0700998restart:
Jianjun Konge27dfce2008-11-01 21:38:31 -0700999 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 if (!other)
1001 goto out;
1002
David S. Miller278a3de2007-05-31 15:19:20 -07001003 unix_state_double_lock(sk, other);
1004
1005 /* Apparently VFS overslept socket death. Retry. */
1006 if (sock_flag(other, SOCK_DEAD)) {
1007 unix_state_double_unlock(sk, other);
1008 sock_put(other);
1009 goto restart;
1010 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
1012 err = -EPERM;
1013 if (!unix_may_send(sk, other))
1014 goto out_unlock;
1015
1016 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1017 if (err)
1018 goto out_unlock;
1019
1020 } else {
1021 /*
1022 * 1003.1g breaking connected state with AF_UNSPEC
1023 */
1024 other = NULL;
David S. Miller278a3de2007-05-31 15:19:20 -07001025 unix_state_double_lock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 }
1027
1028 /*
1029 * If it was connected, reconnect.
1030 */
1031 if (unix_peer(sk)) {
1032 struct sock *old_peer = unix_peer(sk);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001033 unix_peer(sk) = other;
David S. Miller278a3de2007-05-31 15:19:20 -07001034 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035
1036 if (other != old_peer)
1037 unix_dgram_disconnected(sk, old_peer);
1038 sock_put(old_peer);
1039 } else {
Jianjun Konge27dfce2008-11-01 21:38:31 -07001040 unix_peer(sk) = other;
David S. Miller278a3de2007-05-31 15:19:20 -07001041 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 }
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001043 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
1045out_unlock:
David S. Miller278a3de2007-05-31 15:19:20 -07001046 unix_state_double_unlock(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 sock_put(other);
1048out:
1049 return err;
1050}
1051
1052static long unix_wait_for_peer(struct sock *other, long timeo)
1053{
1054 struct unix_sock *u = unix_sk(other);
1055 int sched;
1056 DEFINE_WAIT(wait);
1057
1058 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1059
1060 sched = !sock_flag(other, SOCK_DEAD) &&
1061 !(other->sk_shutdown & RCV_SHUTDOWN) &&
Rainer Weikusat3c734192008-06-17 22:28:05 -07001062 unix_recvq_full(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063
David S. Miller1c92b4e2007-05-31 13:24:26 -07001064 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065
1066 if (sched)
1067 timeo = schedule_timeout(timeo);
1068
1069 finish_wait(&u->peer_wait, &wait);
1070 return timeo;
1071}
1072
1073static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1074 int addr_len, int flags)
1075{
Jianjun Konge27dfce2008-11-01 21:38:31 -07001076 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001078 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1080 struct sock *newsk = NULL;
1081 struct sock *other = NULL;
1082 struct sk_buff *skb = NULL;
Eric Dumazet95c96172012-04-15 05:58:06 +00001083 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 int st;
1085 int err;
1086 long timeo;
1087
1088 err = unix_mkname(sunaddr, addr_len, &hash);
1089 if (err < 0)
1090 goto out;
1091 addr_len = err;
1092
Joe Perchesf64f9e72009-11-29 16:55:45 -08001093 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1094 (err = unix_autobind(sock)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 goto out;
1096
1097 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1098
1099 /* First of all allocate resources.
1100 If we will make it after state is locked,
1101 we will have to recheck all again in any case.
1102 */
1103
1104 err = -ENOMEM;
1105
1106 /* create new sock for complete connection */
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05001107 newsk = unix_create1(sock_net(sk), NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 if (newsk == NULL)
1109 goto out;
1110
1111 /* Allocate skb for sending to listening sock */
1112 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1113 if (skb == NULL)
1114 goto out;
1115
1116restart:
1117 /* Find listening sock. */
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001118 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 if (!other)
1120 goto out;
1121
1122 /* Latch state of peer */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001123 unix_state_lock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
1125 /* Apparently VFS overslept socket death. Retry. */
1126 if (sock_flag(other, SOCK_DEAD)) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001127 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 sock_put(other);
1129 goto restart;
1130 }
1131
1132 err = -ECONNREFUSED;
1133 if (other->sk_state != TCP_LISTEN)
1134 goto out_unlock;
Tomoki Sekiyama77238f22009-10-18 23:17:37 -07001135 if (other->sk_shutdown & RCV_SHUTDOWN)
1136 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
Rainer Weikusat3c734192008-06-17 22:28:05 -07001138 if (unix_recvq_full(other)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 err = -EAGAIN;
1140 if (!timeo)
1141 goto out_unlock;
1142
1143 timeo = unix_wait_for_peer(other, timeo);
1144
1145 err = sock_intr_errno(timeo);
1146 if (signal_pending(current))
1147 goto out;
1148 sock_put(other);
1149 goto restart;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
1152 /* Latch our state.
1153
Daniel Balutae5537bf2011-03-14 15:25:33 -07001154 It is tricky place. We need to grab our state lock and cannot
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 drop lock on peer. It is dangerous because deadlock is
1156 possible. Connect to self case and simultaneous
1157 attempt to connect are eliminated by checking socket
1158 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1159 check this before attempt to grab lock.
1160
1161 Well, and we have to recheck the state after socket locked.
1162 */
1163 st = sk->sk_state;
1164
1165 switch (st) {
1166 case TCP_CLOSE:
1167 /* This is ok... continue with connect */
1168 break;
1169 case TCP_ESTABLISHED:
1170 /* Socket is already connected */
1171 err = -EISCONN;
1172 goto out_unlock;
1173 default:
1174 err = -EINVAL;
1175 goto out_unlock;
1176 }
1177
David S. Miller1c92b4e2007-05-31 13:24:26 -07001178 unix_state_lock_nested(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
1180 if (sk->sk_state != st) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001181 unix_state_unlock(sk);
1182 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 sock_put(other);
1184 goto restart;
1185 }
1186
David S. Miller3610cda2011-01-05 15:38:53 -08001187 err = security_unix_stream_connect(sk, other, newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 if (err) {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001189 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 goto out_unlock;
1191 }
1192
1193 /* The way is open! Fastly set all the necessary fields... */
1194
1195 sock_hold(sk);
1196 unix_peer(newsk) = sk;
1197 newsk->sk_state = TCP_ESTABLISHED;
1198 newsk->sk_type = sk->sk_type;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001199 init_peercred(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 newu = unix_sk(newsk);
Eric Dumazeteaefd112011-02-18 03:26:36 +00001201 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 otheru = unix_sk(other);
1203
1204 /* copy address information from listening to new sock*/
1205 if (otheru->addr) {
1206 atomic_inc(&otheru->addr->refcnt);
1207 newu->addr = otheru->addr;
1208 }
Al Viro40ffe672012-03-14 21:54:32 -04001209 if (otheru->path.dentry) {
1210 path_get(&otheru->path);
1211 newu->path = otheru->path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 }
1213
1214 /* Set credentials */
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001215 copy_peercred(sk, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 sock->state = SS_CONNECTED;
1218 sk->sk_state = TCP_ESTABLISHED;
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001219 sock_hold(newsk);
1220
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001221 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001222 unix_peer(sk) = newsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223
David S. Miller1c92b4e2007-05-31 13:24:26 -07001224 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
1226 /* take ten and and send info to listening sock */
1227 spin_lock(&other->sk_receive_queue.lock);
1228 __skb_queue_tail(&other->sk_receive_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 spin_unlock(&other->sk_receive_queue.lock);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001230 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001231 other->sk_data_ready(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 sock_put(other);
1233 return 0;
1234
1235out_unlock:
1236 if (other)
David S. Miller1c92b4e2007-05-31 13:24:26 -07001237 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
1239out:
Wei Yongjun40d44442009-02-25 00:32:45 +00001240 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 if (newsk)
1242 unix_release_sock(newsk, 0);
1243 if (other)
1244 sock_put(other);
1245 return err;
1246}
1247
1248static int unix_socketpair(struct socket *socka, struct socket *sockb)
1249{
Jianjun Konge27dfce2008-11-01 21:38:31 -07001250 struct sock *ska = socka->sk, *skb = sockb->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
1252 /* Join our sockets back to back */
1253 sock_hold(ska);
1254 sock_hold(skb);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001255 unix_peer(ska) = skb;
1256 unix_peer(skb) = ska;
Eric W. Biederman109f6e32010-06-13 03:30:14 +00001257 init_peercred(ska);
1258 init_peercred(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
1260 if (ska->sk_type != SOCK_DGRAM) {
1261 ska->sk_state = TCP_ESTABLISHED;
1262 skb->sk_state = TCP_ESTABLISHED;
1263 socka->state = SS_CONNECTED;
1264 sockb->state = SS_CONNECTED;
1265 }
1266 return 0;
1267}
1268
Daniel Borkmann90c6bd32013-10-17 22:51:31 +02001269static void unix_sock_inherit_flags(const struct socket *old,
1270 struct socket *new)
1271{
1272 if (test_bit(SOCK_PASSCRED, &old->flags))
1273 set_bit(SOCK_PASSCRED, &new->flags);
1274 if (test_bit(SOCK_PASSSEC, &old->flags))
1275 set_bit(SOCK_PASSSEC, &new->flags);
1276}
1277
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1279{
1280 struct sock *sk = sock->sk;
1281 struct sock *tsk;
1282 struct sk_buff *skb;
1283 int err;
1284
1285 err = -EOPNOTSUPP;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001286 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 goto out;
1288
1289 err = -EINVAL;
1290 if (sk->sk_state != TCP_LISTEN)
1291 goto out;
1292
1293 /* If socket state is TCP_LISTEN it cannot change (for now...),
1294 * so that no locks are necessary.
1295 */
1296
1297 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1298 if (!skb) {
1299 /* This means receive shutdown. */
1300 if (err == 0)
1301 err = -EINVAL;
1302 goto out;
1303 }
1304
1305 tsk = skb->sk;
1306 skb_free_datagram(sk, skb);
1307 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1308
1309 /* attach accepted sock to socket */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001310 unix_state_lock(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 newsock->state = SS_CONNECTED;
Daniel Borkmann90c6bd32013-10-17 22:51:31 +02001312 unix_sock_inherit_flags(sock, newsock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 sock_graft(tsk, newsock);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001314 unix_state_unlock(tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 return 0;
1316
1317out:
1318 return err;
1319}
1320
1321
1322static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1323{
1324 struct sock *sk = sock->sk;
1325 struct unix_sock *u;
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00001326 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 int err = 0;
1328
1329 if (peer) {
1330 sk = unix_peer_get(sk);
1331
1332 err = -ENOTCONN;
1333 if (!sk)
1334 goto out;
1335 err = 0;
1336 } else {
1337 sock_hold(sk);
1338 }
1339
1340 u = unix_sk(sk);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001341 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 if (!u->addr) {
1343 sunaddr->sun_family = AF_UNIX;
1344 sunaddr->sun_path[0] = 0;
1345 *uaddr_len = sizeof(short);
1346 } else {
1347 struct unix_address *addr = u->addr;
1348
1349 *uaddr_len = addr->len;
1350 memcpy(sunaddr, addr->name, *uaddr_len);
1351 }
David S. Miller1c92b4e2007-05-31 13:24:26 -07001352 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 sock_put(sk);
1354out:
1355 return err;
1356}
1357
1358static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1359{
1360 int i;
1361
1362 scm->fp = UNIXCB(skb).fp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 UNIXCB(skb).fp = NULL;
1364
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001365 for (i = scm->fp->count-1; i >= 0; i--)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 unix_notinflight(scm->fp->fp[i]);
1367}
1368
Eric W. Biederman7361c362010-06-13 03:34:33 +00001369static void unix_destruct_scm(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370{
1371 struct scm_cookie scm;
1372 memset(&scm, 0, sizeof(scm));
Eric W. Biederman7361c362010-06-13 03:34:33 +00001373 scm.pid = UNIXCB(skb).pid;
Eric W. Biederman7361c362010-06-13 03:34:33 +00001374 if (UNIXCB(skb).fp)
1375 unix_detach_fds(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
1377 /* Alas, it calls VFS */
1378 /* So fscking what? fput() had been SMP-safe since the last Summer */
1379 scm_destroy(&scm);
1380 sock_wfree(skb);
1381}
1382
Eric Dumazet25888e32010-11-25 04:11:39 +00001383#define MAX_RECURSION_LEVEL 4
1384
Miklos Szeredi62093442008-11-09 15:23:57 +01001385static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386{
1387 int i;
Eric Dumazet25888e32010-11-25 04:11:39 +00001388 unsigned char max_level = 0;
1389 int unix_sock_count = 0;
1390
1391 for (i = scm->fp->count - 1; i >= 0; i--) {
1392 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1393
1394 if (sk) {
1395 unix_sock_count++;
1396 max_level = max(max_level,
1397 unix_sk(sk)->recursion_level);
1398 }
1399 }
1400 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1401 return -ETOOMANYREFS;
Miklos Szeredi62093442008-11-09 15:23:57 +01001402
1403 /*
1404 * Need to duplicate file references for the sake of garbage
1405 * collection. Otherwise a socket in the fps might become a
1406 * candidate for GC while the skb is not yet queued.
1407 */
1408 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1409 if (!UNIXCB(skb).fp)
1410 return -ENOMEM;
1411
Eric Dumazet25888e32010-11-25 04:11:39 +00001412 if (unix_sock_count) {
1413 for (i = scm->fp->count - 1; i >= 0; i--)
1414 unix_inflight(scm->fp->fp[i]);
1415 }
1416 return max_level;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417}
1418
David S. Millerf78a5fd2011-09-16 19:34:00 -04001419static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
Eric W. Biederman7361c362010-06-13 03:34:33 +00001420{
1421 int err = 0;
Eric Dumazet16e57262011-09-19 05:52:27 +00001422
David S. Millerf78a5fd2011-09-16 19:34:00 -04001423 UNIXCB(skb).pid = get_pid(scm->pid);
Eric W. Biederman6b0ee8c02013-04-03 17:28:16 +00001424 UNIXCB(skb).uid = scm->creds.uid;
1425 UNIXCB(skb).gid = scm->creds.gid;
Eric W. Biederman7361c362010-06-13 03:34:33 +00001426 UNIXCB(skb).fp = NULL;
Stephen Smalley37a9a8d2015-06-10 08:44:59 -04001427 unix_get_secdata(scm, skb);
Eric W. Biederman7361c362010-06-13 03:34:33 +00001428 if (scm->fp && send_fds)
1429 err = unix_attach_fds(scm, skb);
1430
1431 skb->destructor = unix_destruct_scm;
1432 return err;
1433}
1434
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435/*
Eric Dumazet16e57262011-09-19 05:52:27 +00001436 * Some apps rely on write() giving SCM_CREDENTIALS
1437 * We include credentials if source or destination socket
1438 * asserted SOCK_PASSCRED.
1439 */
1440static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1441 const struct sock *other)
1442{
Eric W. Biederman6b0ee8c02013-04-03 17:28:16 +00001443 if (UNIXCB(skb).pid)
Eric Dumazet16e57262011-09-19 05:52:27 +00001444 return;
1445 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
Eric W. Biederman25da0e32013-04-03 16:13:35 +00001446 !other->sk_socket ||
1447 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
Eric Dumazet16e57262011-09-19 05:52:27 +00001448 UNIXCB(skb).pid = get_pid(task_tgid(current));
David S. Miller6e0895c2013-04-22 20:32:51 -04001449 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
Eric Dumazet16e57262011-09-19 05:52:27 +00001450 }
1451}
1452
1453/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 * Send AF_UNIX data.
1455 */
1456
Ying Xue1b784142015-03-02 15:37:48 +08001457static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1458 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001461 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 struct unix_sock *u = unix_sk(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +01001463 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 struct sock *other = NULL;
1465 int namelen = 0; /* fake GCC */
1466 int err;
Eric Dumazet95c96172012-04-15 05:58:06 +00001467 unsigned int hash;
David S. Millerf78a5fd2011-09-16 19:34:00 -04001468 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 long timeo;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001470 struct scm_cookie scm;
Eric Dumazet25888e32010-11-25 04:11:39 +00001471 int max_level;
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001472 int data_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
dann frazier5f23b732008-11-26 15:32:27 -08001474 wait_for_unix_gc();
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001475 err = scm_send(sock, msg, &scm, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 if (err < 0)
1477 return err;
1478
1479 err = -EOPNOTSUPP;
1480 if (msg->msg_flags&MSG_OOB)
1481 goto out;
1482
1483 if (msg->msg_namelen) {
1484 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1485 if (err < 0)
1486 goto out;
1487 namelen = err;
1488 } else {
1489 sunaddr = NULL;
1490 err = -ENOTCONN;
1491 other = unix_peer_get(sk);
1492 if (!other)
1493 goto out;
1494 }
1495
Joe Perchesf64f9e72009-11-29 16:55:45 -08001496 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1497 && (err = unix_autobind(sock)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 goto out;
1499
1500 err = -EMSGSIZE;
1501 if (len > sk->sk_sndbuf - 32)
1502 goto out;
1503
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001504 if (len > SKB_MAX_ALLOC) {
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001505 data_len = min_t(size_t,
1506 len - SKB_MAX_ALLOC,
1507 MAX_SKB_FRAGS * PAGE_SIZE);
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001508 data_len = PAGE_ALIGN(data_len);
1509
1510 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1511 }
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001512
1513 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
Eric Dumazet28d64272013-08-08 14:38:47 -07001514 msg->msg_flags & MSG_DONTWAIT, &err,
1515 PAGE_ALLOC_COSTLY_ORDER);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001516 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 goto out;
1518
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001519 err = unix_scm_to_skb(&scm, skb, true);
Eric Dumazet25888e32010-11-25 04:11:39 +00001520 if (err < 0)
Eric W. Biederman7361c362010-06-13 03:34:33 +00001521 goto out_free;
Eric Dumazet25888e32010-11-25 04:11:39 +00001522 max_level = err + 1;
Catherine Zhang877ce7c2006-06-29 12:27:47 -07001523
Eric Dumazeteb6a2482012-04-03 05:28:28 +00001524 skb_put(skb, len - data_len);
1525 skb->data_len = data_len;
1526 skb->len = len;
Al Viroc0371da2014-11-24 10:42:55 -05001527 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 if (err)
1529 goto out_free;
1530
1531 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1532
1533restart:
1534 if (!other) {
1535 err = -ECONNRESET;
1536 if (sunaddr == NULL)
1537 goto out_free;
1538
Denis V. Lunev097e66c2007-11-19 22:29:30 -08001539 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 hash, &err);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001541 if (other == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 goto out_free;
1543 }
1544
Alban Crequyd6ae3ba2011-01-18 06:39:15 +00001545 if (sk_filter(other, skb) < 0) {
1546 /* Toss the packet but do not return any error to the sender */
1547 err = len;
1548 goto out_free;
1549 }
1550
David S. Miller1c92b4e2007-05-31 13:24:26 -07001551 unix_state_lock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 err = -EPERM;
1553 if (!unix_may_send(sk, other))
1554 goto out_unlock;
1555
1556 if (sock_flag(other, SOCK_DEAD)) {
1557 /*
1558 * Check with 1003.1g - what should
1559 * datagram error
1560 */
David S. Miller1c92b4e2007-05-31 13:24:26 -07001561 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 sock_put(other);
1563
1564 err = 0;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001565 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 if (unix_peer(sk) == other) {
Jianjun Konge27dfce2008-11-01 21:38:31 -07001567 unix_peer(sk) = NULL;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001568 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
1570 unix_dgram_disconnected(sk, other);
1571 sock_put(other);
1572 err = -ECONNREFUSED;
1573 } else {
David S. Miller1c92b4e2007-05-31 13:24:26 -07001574 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 }
1576
1577 other = NULL;
1578 if (err)
1579 goto out_free;
1580 goto restart;
1581 }
1582
1583 err = -EPIPE;
1584 if (other->sk_shutdown & RCV_SHUTDOWN)
1585 goto out_unlock;
1586
1587 if (sk->sk_type != SOCK_SEQPACKET) {
1588 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1589 if (err)
1590 goto out_unlock;
1591 }
1592
Rainer Weikusat3c734192008-06-17 22:28:05 -07001593 if (unix_peer(other) != sk && unix_recvq_full(other)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 if (!timeo) {
1595 err = -EAGAIN;
1596 goto out_unlock;
1597 }
1598
1599 timeo = unix_wait_for_peer(other, timeo);
1600
1601 err = sock_intr_errno(timeo);
1602 if (signal_pending(current))
1603 goto out_free;
1604
1605 goto restart;
1606 }
1607
Alban Crequy3f661162010-10-04 08:48:28 +00001608 if (sock_flag(other, SOCK_RCVTSTAMP))
1609 __net_timestamp(skb);
Eric Dumazet16e57262011-09-19 05:52:27 +00001610 maybe_add_creds(skb, sock, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 skb_queue_tail(&other->sk_receive_queue, skb);
Eric Dumazet25888e32010-11-25 04:11:39 +00001612 if (max_level > unix_sk(other)->recursion_level)
1613 unix_sk(other)->recursion_level = max_level;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001614 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001615 other->sk_data_ready(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 sock_put(other);
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001617 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 return len;
1619
1620out_unlock:
David S. Miller1c92b4e2007-05-31 13:24:26 -07001621 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622out_free:
1623 kfree_skb(skb);
1624out:
1625 if (other)
1626 sock_put(other);
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001627 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 return err;
1629}
1630
Eric Dumazete370a722013-08-08 14:37:32 -07001631/* We use paged skbs for stream sockets, and limit occupancy to 32768
1632 * bytes, and a minimun of a full page.
1633 */
1634#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001635
Ying Xue1b784142015-03-02 15:37:48 +08001636static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1637 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 struct sock *sk = sock->sk;
1640 struct sock *other = NULL;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001641 int err, size;
David S. Millerf78a5fd2011-09-16 19:34:00 -04001642 struct sk_buff *skb;
Jianjun Konge27dfce2008-11-01 21:38:31 -07001643 int sent = 0;
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001644 struct scm_cookie scm;
Miklos Szeredi8ba69ba2009-09-11 11:31:45 -07001645 bool fds_sent = false;
Eric Dumazet25888e32010-11-25 04:11:39 +00001646 int max_level;
Eric Dumazete370a722013-08-08 14:37:32 -07001647 int data_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
dann frazier5f23b732008-11-26 15:32:27 -08001649 wait_for_unix_gc();
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001650 err = scm_send(sock, msg, &scm, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 if (err < 0)
1652 return err;
1653
1654 err = -EOPNOTSUPP;
1655 if (msg->msg_flags&MSG_OOB)
1656 goto out_err;
1657
1658 if (msg->msg_namelen) {
1659 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1660 goto out_err;
1661 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 err = -ENOTCONN;
Benjamin LaHaise830a1e52005-12-13 23:22:32 -08001663 other = unix_peer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 if (!other)
1665 goto out_err;
1666 }
1667
1668 if (sk->sk_shutdown & SEND_SHUTDOWN)
1669 goto pipe_err;
1670
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001671 while (sent < len) {
Eric Dumazete370a722013-08-08 14:37:32 -07001672 size = len - sent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
1674 /* Keep two messages in the pipe so it schedules better */
Eric Dumazete370a722013-08-08 14:37:32 -07001675 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
Eric Dumazete370a722013-08-08 14:37:32 -07001677 /* allow fallback to order-0 allocations */
1678 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001679
Eric Dumazete370a722013-08-08 14:37:32 -07001680 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001681
Kirill Tkhai31ff6aa2014-05-15 19:56:28 +04001682 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1683
Eric Dumazete370a722013-08-08 14:37:32 -07001684 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
Eric Dumazet28d64272013-08-08 14:38:47 -07001685 msg->msg_flags & MSG_DONTWAIT, &err,
1686 get_order(UNIX_SKB_FRAGS_SZ));
Eric Dumazete370a722013-08-08 14:37:32 -07001687 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 goto out_err;
1689
David S. Millerf78a5fd2011-09-16 19:34:00 -04001690 /* Only send the fds in the first buffer */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001691 err = unix_scm_to_skb(&scm, skb, !fds_sent);
Eric Dumazet25888e32010-11-25 04:11:39 +00001692 if (err < 0) {
Eric W. Biederman7361c362010-06-13 03:34:33 +00001693 kfree_skb(skb);
David S. Millerf78a5fd2011-09-16 19:34:00 -04001694 goto out_err;
Miklos Szeredi62093442008-11-09 15:23:57 +01001695 }
Eric Dumazet25888e32010-11-25 04:11:39 +00001696 max_level = err + 1;
Eric W. Biederman7361c362010-06-13 03:34:33 +00001697 fds_sent = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698
Eric Dumazete370a722013-08-08 14:37:32 -07001699 skb_put(skb, size - data_len);
1700 skb->data_len = data_len;
1701 skb->len = size;
Al Viroc0371da2014-11-24 10:42:55 -05001702 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001703 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 kfree_skb(skb);
David S. Millerf78a5fd2011-09-16 19:34:00 -04001705 goto out_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 }
1707
David S. Miller1c92b4e2007-05-31 13:24:26 -07001708 unix_state_lock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
1710 if (sock_flag(other, SOCK_DEAD) ||
1711 (other->sk_shutdown & RCV_SHUTDOWN))
1712 goto pipe_err_free;
1713
Eric Dumazet16e57262011-09-19 05:52:27 +00001714 maybe_add_creds(skb, sock, other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 skb_queue_tail(&other->sk_receive_queue, skb);
Eric Dumazet25888e32010-11-25 04:11:39 +00001716 if (max_level > unix_sk(other)->recursion_level)
1717 unix_sk(other)->recursion_level = max_level;
David S. Miller1c92b4e2007-05-31 13:24:26 -07001718 unix_state_unlock(other);
David S. Miller676d2362014-04-11 16:15:36 -04001719 other->sk_data_ready(other);
Jianjun Konge27dfce2008-11-01 21:38:31 -07001720 sent += size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001723 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
1725 return sent;
1726
1727pipe_err_free:
David S. Miller1c92b4e2007-05-31 13:24:26 -07001728 unix_state_unlock(other);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 kfree_skb(skb);
1730pipe_err:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001731 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1732 send_sig(SIGPIPE, current, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 err = -EPIPE;
1734out_err:
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001735 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 return sent ? : err;
1737}
1738
Hannes Frederic Sowa869e7c62015-05-21 16:59:59 +02001739static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1740 int offset, size_t size, int flags)
1741{
1742 int err = 0;
1743 bool send_sigpipe = true;
1744 struct sock *other, *sk = socket->sk;
1745 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1746
1747 if (flags & MSG_OOB)
1748 return -EOPNOTSUPP;
1749
1750 other = unix_peer(sk);
1751 if (!other || sk->sk_state != TCP_ESTABLISHED)
1752 return -ENOTCONN;
1753
1754 if (false) {
1755alloc_skb:
1756 unix_state_unlock(other);
1757 mutex_unlock(&unix_sk(other)->readlock);
1758 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1759 &err, 0);
1760 if (!newskb)
1761 return err;
1762 }
1763
1764 /* we must acquire readlock as we modify already present
1765 * skbs in the sk_receive_queue and mess with skb->len
1766 */
1767 err = mutex_lock_interruptible(&unix_sk(other)->readlock);
1768 if (err) {
1769 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1770 send_sigpipe = false;
1771 goto err;
1772 }
1773
1774 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1775 err = -EPIPE;
1776 goto err_unlock;
1777 }
1778
1779 unix_state_lock(other);
1780
1781 if (sock_flag(other, SOCK_DEAD) ||
1782 other->sk_shutdown & RCV_SHUTDOWN) {
1783 err = -EPIPE;
1784 goto err_state_unlock;
1785 }
1786
1787 skb = skb_peek_tail(&other->sk_receive_queue);
1788 if (tail && tail == skb) {
1789 skb = newskb;
1790 } else if (!skb) {
1791 if (newskb)
1792 skb = newskb;
1793 else
1794 goto alloc_skb;
1795 } else if (newskb) {
1796 /* this is fast path, we don't necessarily need to
1797 * call to kfree_skb even though with newskb == NULL
1798 * this - does no harm
1799 */
1800 consume_skb(newskb);
1801 }
1802
1803 if (skb_append_pagefrags(skb, page, offset, size)) {
1804 tail = skb;
1805 goto alloc_skb;
1806 }
1807
1808 skb->len += size;
1809 skb->data_len += size;
1810 skb->truesize += size;
1811 atomic_add(size, &sk->sk_wmem_alloc);
1812
1813 if (newskb)
1814 __skb_queue_tail(&other->sk_receive_queue, newskb);
1815
1816 unix_state_unlock(other);
1817 mutex_unlock(&unix_sk(other)->readlock);
1818
1819 other->sk_data_ready(other);
1820
1821 return size;
1822
1823err_state_unlock:
1824 unix_state_unlock(other);
1825err_unlock:
1826 mutex_unlock(&unix_sk(other)->readlock);
1827err:
1828 kfree_skb(newskb);
1829 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
1830 send_sig(SIGPIPE, current, 0);
1831 return err;
1832}
1833
Ying Xue1b784142015-03-02 15:37:48 +08001834static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
1835 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836{
1837 int err;
1838 struct sock *sk = sock->sk;
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001839
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 err = sock_error(sk);
1841 if (err)
1842 return err;
1843
1844 if (sk->sk_state != TCP_ESTABLISHED)
1845 return -ENOTCONN;
1846
1847 if (msg->msg_namelen)
1848 msg->msg_namelen = 0;
1849
Ying Xue1b784142015-03-02 15:37:48 +08001850 return unix_dgram_sendmsg(sock, msg, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851}
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001852
Ying Xue1b784142015-03-02 15:37:48 +08001853static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
1854 size_t size, int flags)
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +00001855{
1856 struct sock *sk = sock->sk;
1857
1858 if (sk->sk_state != TCP_ESTABLISHED)
1859 return -ENOTCONN;
1860
Ying Xue1b784142015-03-02 15:37:48 +08001861 return unix_dgram_recvmsg(sock, msg, size, flags);
Eric W. Biedermana05d2ad2011-04-24 01:54:57 +00001862}
1863
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1865{
1866 struct unix_sock *u = unix_sk(sk);
1867
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 if (u->addr) {
1869 msg->msg_namelen = u->addr->len;
1870 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1871 }
1872}
1873
Ying Xue1b784142015-03-02 15:37:48 +08001874static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1875 size_t size, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876{
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001877 struct scm_cookie scm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 struct sock *sk = sock->sk;
1879 struct unix_sock *u = unix_sk(sk);
1880 int noblock = flags & MSG_DONTWAIT;
1881 struct sk_buff *skb;
1882 int err;
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001883 int peeked, skip;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
1885 err = -EOPNOTSUPP;
1886 if (flags&MSG_OOB)
1887 goto out;
1888
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00001889 err = mutex_lock_interruptible(&u->readlock);
Eric Dumazetde144392014-03-25 18:42:27 -07001890 if (unlikely(err)) {
1891 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
1892 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
1893 */
1894 err = noblock ? -EAGAIN : -ERESTARTSYS;
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00001895 goto out;
1896 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001898 skip = sk_peek_offset(sk, flags);
1899
1900 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
Florian Zumbiehl0a112252007-11-29 23:19:23 +11001901 if (!skb) {
1902 unix_state_lock(sk);
1903 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1904 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1905 (sk->sk_shutdown & RCV_SHUTDOWN))
1906 err = 0;
1907 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 goto out_unlock;
Florian Zumbiehl0a112252007-11-29 23:19:23 +11001909 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910
Eric Dumazet67426b72010-10-29 20:44:44 +00001911 wake_up_interruptible_sync_poll(&u->peer_wait,
1912 POLLOUT | POLLWRNORM | POLLWRBAND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
1914 if (msg->msg_name)
1915 unix_copy_addr(msg, skb->sk);
1916
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001917 if (size > skb->len - skip)
1918 size = skb->len - skip;
1919 else if (size < skb->len - skip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 msg->msg_flags |= MSG_TRUNC;
1921
David S. Miller51f3d022014-11-05 16:46:40 -05001922 err = skb_copy_datagram_msg(skb, skip, msg, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 if (err)
1924 goto out_free;
1925
Alban Crequy3f661162010-10-04 08:48:28 +00001926 if (sock_flag(sk, SOCK_RCVTSTAMP))
1927 __sock_recv_timestamp(msg, sk, skb);
1928
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001929 memset(&scm, 0, sizeof(scm));
1930
1931 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
1932 unix_set_secdata(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001934 if (!(flags & MSG_PEEK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001936 unix_detach_fds(&scm, skb);
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001937
1938 sk_peek_offset_bwd(sk, skb->len);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001939 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 /* It is questionable: on PEEK we could:
1941 - do not return fds - good, but too simple 8)
1942 - return fds, and do not return them on read (old strategy,
1943 apparently wrong)
1944 - clone fds (I chose it for now, it is the most universal
1945 solution)
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09001946
1947 POSIX 1003.1g does not actually define this clearly
1948 at all. POSIX 1003.1g doesn't define a lot of things
1949 clearly however!
1950
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 */
Pavel Emelyanovf55bb7f2012-02-21 07:31:51 +00001952
1953 sk_peek_offset_fwd(sk, size);
1954
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001956 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 }
Eric Dumazet9f6f9af2012-02-21 23:24:55 +00001958 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
Christoph Hellwig7cc05662015-01-28 18:04:53 +01001960 scm_recv(sock, msg, &scm, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
1962out_free:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08001963 skb_free_datagram(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964out_unlock:
Ingo Molnar57b47a52006-03-20 22:35:41 -08001965 mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966out:
1967 return err;
1968}
1969
1970/*
Benjamin Poirier79f632c2013-04-29 11:42:14 +00001971 * Sleep until more data has arrived. But check for races..
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 */
Benjamin Poirier79f632c2013-04-29 11:42:14 +00001973static long unix_stream_data_wait(struct sock *sk, long timeo,
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02001974 struct sk_buff *last, unsigned int last_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975{
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02001976 struct sk_buff *tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 DEFINE_WAIT(wait);
1978
David S. Miller1c92b4e2007-05-31 13:24:26 -07001979 unix_state_lock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
1981 for (;;) {
Eric Dumazetaa395142010-04-20 13:03:51 +00001982 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02001984 tail = skb_peek_tail(&sk->sk_receive_queue);
1985 if (tail != last ||
1986 (tail && tail->len != last_len) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 sk->sk_err ||
1988 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1989 signal_pending(current) ||
1990 !timeo)
1991 break;
1992
1993 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001994 unix_state_unlock(sk);
Colin Cross2b15af62013-05-06 23:50:21 +00001995 timeo = freezable_schedule_timeout(timeo);
David S. Miller1c92b4e2007-05-31 13:24:26 -07001996 unix_state_lock(sk);
Mark Salyzynb48732e2015-05-26 08:22:19 -07001997
1998 if (sock_flag(sk, SOCK_DEAD))
1999 break;
2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2002 }
2003
Eric Dumazetaa395142010-04-20 13:03:51 +00002004 finish_wait(sk_sleep(sk), &wait);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002005 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 return timeo;
2007}
2008
Eric Dumazete370a722013-08-08 14:37:32 -07002009static unsigned int unix_skb_len(const struct sk_buff *skb)
2010{
2011 return skb->len - UNIXCB(skb).consumed;
2012}
2013
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002014struct unix_stream_read_state {
2015 int (*recv_actor)(struct sk_buff *, int, int,
2016 struct unix_stream_read_state *);
2017 struct socket *socket;
2018 struct msghdr *msg;
2019 struct pipe_inode_info *pipe;
2020 size_t size;
2021 int flags;
2022 unsigned int splice_flags;
2023};
2024
2025static int unix_stream_read_generic(struct unix_stream_read_state *state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026{
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002027 struct scm_cookie scm;
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002028 struct socket *sock = state->socket;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 struct sock *sk = sock->sk;
2030 struct unix_sock *u = unix_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 int copied = 0;
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002032 int flags = state->flags;
Eric Dumazetde144392014-03-25 18:42:27 -07002033 int noblock = flags & MSG_DONTWAIT;
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002034 bool check_creds = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 int target;
2036 int err = 0;
2037 long timeo;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002038 int skip;
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002039 size_t size = state->size;
2040 unsigned int last_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
2042 err = -EINVAL;
2043 if (sk->sk_state != TCP_ESTABLISHED)
2044 goto out;
2045
2046 err = -EOPNOTSUPP;
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002047 if (flags & MSG_OOB)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 goto out;
2049
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002050 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
Eric Dumazetde144392014-03-25 18:42:27 -07002051 timeo = sock_rcvtimeo(sk, noblock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002053 memset(&scm, 0, sizeof(scm));
2054
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 /* Lock the socket to prevent queue disordering
2056 * while sleeps in memcpy_tomsg
2057 */
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00002058 err = mutex_lock_interruptible(&u->readlock);
Eric Dumazetde144392014-03-25 18:42:27 -07002059 if (unlikely(err)) {
2060 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2061 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2062 */
2063 err = noblock ? -EAGAIN : -ERESTARTSYS;
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00002064 goto out;
2065 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
Andrey Vagine9193d62015-10-02 00:05:36 +03002067 if (flags & MSG_PEEK)
2068 skip = sk_peek_offset(sk, flags);
2069 else
2070 skip = 0;
2071
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002072 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 int chunk;
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002074 struct sk_buff *skb, *last;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002076 unix_state_lock(sk);
Mark Salyzynb48732e2015-05-26 08:22:19 -07002077 if (sock_flag(sk, SOCK_DEAD)) {
2078 err = -ECONNRESET;
2079 goto unlock;
2080 }
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002081 last = skb = skb_peek(&sk->sk_receive_queue);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002082 last_len = last ? last->len : 0;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002083again:
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002084 if (skb == NULL) {
Eric Dumazet25888e32010-11-25 04:11:39 +00002085 unix_sk(sk)->recursion_level = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 if (copied >= target)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002087 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088
2089 /*
2090 * POSIX 1003.1g mandates this order.
2091 */
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002092
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002093 err = sock_error(sk);
2094 if (err)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002095 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 if (sk->sk_shutdown & RCV_SHUTDOWN)
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002097 goto unlock;
2098
2099 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 err = -EAGAIN;
2101 if (!timeo)
2102 break;
Ingo Molnar57b47a52006-03-20 22:35:41 -08002103 mutex_unlock(&u->readlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002105 timeo = unix_stream_data_wait(sk, timeo, last,
2106 last_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002108 if (signal_pending(current) ||
2109 mutex_lock_interruptible(&u->readlock)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 err = sock_intr_errno(timeo);
2111 goto out;
2112 }
Rainer Weikusatb3ca9b02011-02-28 04:50:55 +00002113
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 continue;
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002115unlock:
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002116 unix_state_unlock(sk);
2117 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 }
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002119
Eric Dumazete370a722013-08-08 14:37:32 -07002120 while (skip >= unix_skb_len(skb)) {
2121 skip -= unix_skb_len(skb);
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002122 last = skb;
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002123 last_len = skb->len;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002124 skb = skb_peek_next(skb, &sk->sk_receive_queue);
Benjamin Poirier79f632c2013-04-29 11:42:14 +00002125 if (!skb)
2126 goto again;
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002127 }
2128
Miklos Szeredi3c0d2f32007-06-05 13:10:29 -07002129 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
2131 if (check_creds) {
2132 /* Never glue messages from different writers */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002133 if ((UNIXCB(skb).pid != scm.pid) ||
2134 !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
Stephen Smalley37a9a8d2015-06-10 08:44:59 -04002135 !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
2136 !unix_secdata_eq(&scm, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 break;
Eric W. Biederman0e82e7f6d2013-04-03 16:14:47 +00002138 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 /* Copy credentials */
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002140 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
Stephen Smalley37a9a8d2015-06-10 08:44:59 -04002141 unix_set_secdata(&scm, skb);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002142 check_creds = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 }
2144
2145 /* Copy address just once */
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002146 if (state->msg && state->msg->msg_name) {
2147 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2148 state->msg->msg_name);
2149 unix_copy_addr(state->msg, skb->sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 sunaddr = NULL;
2151 }
2152
Eric Dumazete370a722013-08-08 14:37:32 -07002153 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002154 chunk = state->recv_actor(skb, skip, chunk, state);
2155 if (chunk < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 if (copied == 0)
2157 copied = -EFAULT;
2158 break;
2159 }
2160 copied += chunk;
2161 size -= chunk;
2162
2163 /* Mark read part of skb as used */
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002164 if (!(flags & MSG_PEEK)) {
Eric Dumazete370a722013-08-08 14:37:32 -07002165 UNIXCB(skb).consumed += chunk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002167 sk_peek_offset_bwd(sk, chunk);
2168
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002170 unix_detach_fds(&scm, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
Eric Dumazete370a722013-08-08 14:37:32 -07002172 if (unix_skb_len(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
Eric Dumazet6f01fd62012-01-28 16:11:03 +00002175 skb_unlink(skb, &sk->sk_receive_queue);
Neil Horman70d4bf62010-07-20 06:45:56 +00002176 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002178 if (scm.fp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 break;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002180 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 /* It is questionable, see note in unix_dgram_recvmsg.
2182 */
2183 if (UNIXCB(skb).fp)
Christoph Hellwig7cc05662015-01-28 18:04:53 +01002184 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
Andrey Vagine9193d62015-10-02 00:05:36 +03002186 sk_peek_offset_fwd(sk, chunk);
Pavel Emelyanovfc0d7532012-02-21 07:32:06 +00002187
Aaron Conole9f389e32015-09-26 18:50:43 -04002188 if (UNIXCB(skb).fp)
2189 break;
2190
Andrey Vagine9193d62015-10-02 00:05:36 +03002191 skip = 0;
Aaron Conole9f389e32015-09-26 18:50:43 -04002192 last = skb;
2193 last_len = skb->len;
2194 unix_state_lock(sk);
2195 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2196 if (skb)
2197 goto again;
2198 unix_state_unlock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 break;
2200 }
2201 } while (size);
2202
Ingo Molnar57b47a52006-03-20 22:35:41 -08002203 mutex_unlock(&u->readlock);
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002204 if (state->msg)
2205 scm_recv(sock, state->msg, &scm, flags);
2206 else
2207 scm_destroy(&scm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208out:
2209 return copied ? : err;
2210}
2211
Hannes Frederic Sowa2b514572015-05-21 17:00:01 +02002212static int unix_stream_read_actor(struct sk_buff *skb,
2213 int skip, int chunk,
2214 struct unix_stream_read_state *state)
2215{
2216 int ret;
2217
2218 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2219 state->msg, chunk);
2220 return ret ?: chunk;
2221}
2222
2223static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2224 size_t size, int flags)
2225{
2226 struct unix_stream_read_state state = {
2227 .recv_actor = unix_stream_read_actor,
2228 .socket = sock,
2229 .msg = msg,
2230 .size = size,
2231 .flags = flags
2232 };
2233
2234 return unix_stream_read_generic(&state);
2235}
2236
2237static ssize_t skb_unix_socket_splice(struct sock *sk,
2238 struct pipe_inode_info *pipe,
2239 struct splice_pipe_desc *spd)
2240{
2241 int ret;
2242 struct unix_sock *u = unix_sk(sk);
2243
2244 mutex_unlock(&u->readlock);
2245 ret = splice_to_pipe(pipe, spd);
2246 mutex_lock(&u->readlock);
2247
2248 return ret;
2249}
2250
2251static int unix_stream_splice_actor(struct sk_buff *skb,
2252 int skip, int chunk,
2253 struct unix_stream_read_state *state)
2254{
2255 return skb_splice_bits(skb, state->socket->sk,
2256 UNIXCB(skb).consumed + skip,
2257 state->pipe, chunk, state->splice_flags,
2258 skb_unix_socket_splice);
2259}
2260
2261static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2262 struct pipe_inode_info *pipe,
2263 size_t size, unsigned int flags)
2264{
2265 struct unix_stream_read_state state = {
2266 .recv_actor = unix_stream_splice_actor,
2267 .socket = sock,
2268 .pipe = pipe,
2269 .size = size,
2270 .splice_flags = flags,
2271 };
2272
2273 if (unlikely(*ppos))
2274 return -ESPIPE;
2275
2276 if (sock->file->f_flags & O_NONBLOCK ||
2277 flags & SPLICE_F_NONBLOCK)
2278 state.flags = MSG_DONTWAIT;
2279
2280 return unix_stream_read_generic(&state);
2281}
2282
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283static int unix_shutdown(struct socket *sock, int mode)
2284{
2285 struct sock *sk = sock->sk;
2286 struct sock *other;
2287
Xi Wangfc61b922012-08-26 16:47:13 +00002288 if (mode < SHUT_RD || mode > SHUT_RDWR)
2289 return -EINVAL;
2290 /* This maps:
2291 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2292 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2293 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2294 */
2295 ++mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
Alban Crequy7180a032011-01-19 04:56:36 +00002297 unix_state_lock(sk);
2298 sk->sk_shutdown |= mode;
2299 other = unix_peer(sk);
2300 if (other)
2301 sock_hold(other);
2302 unix_state_unlock(sk);
2303 sk->sk_state_change(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304
Alban Crequy7180a032011-01-19 04:56:36 +00002305 if (other &&
2306 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
Alban Crequy7180a032011-01-19 04:56:36 +00002308 int peer_mode = 0;
2309
2310 if (mode&RCV_SHUTDOWN)
2311 peer_mode |= SEND_SHUTDOWN;
2312 if (mode&SEND_SHUTDOWN)
2313 peer_mode |= RCV_SHUTDOWN;
2314 unix_state_lock(other);
2315 other->sk_shutdown |= peer_mode;
2316 unix_state_unlock(other);
2317 other->sk_state_change(other);
2318 if (peer_mode == SHUTDOWN_MASK)
2319 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2320 else if (peer_mode & RCV_SHUTDOWN)
2321 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 }
Alban Crequy7180a032011-01-19 04:56:36 +00002323 if (other)
2324 sock_put(other);
2325
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 return 0;
2327}
2328
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002329long unix_inq_len(struct sock *sk)
2330{
2331 struct sk_buff *skb;
2332 long amount = 0;
2333
2334 if (sk->sk_state == TCP_LISTEN)
2335 return -EINVAL;
2336
2337 spin_lock(&sk->sk_receive_queue.lock);
2338 if (sk->sk_type == SOCK_STREAM ||
2339 sk->sk_type == SOCK_SEQPACKET) {
2340 skb_queue_walk(&sk->sk_receive_queue, skb)
Eric Dumazete370a722013-08-08 14:37:32 -07002341 amount += unix_skb_len(skb);
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002342 } else {
2343 skb = skb_peek(&sk->sk_receive_queue);
2344 if (skb)
2345 amount = skb->len;
2346 }
2347 spin_unlock(&sk->sk_receive_queue.lock);
2348
2349 return amount;
2350}
2351EXPORT_SYMBOL_GPL(unix_inq_len);
2352
2353long unix_outq_len(struct sock *sk)
2354{
2355 return sk_wmem_alloc_get(sk);
2356}
2357EXPORT_SYMBOL_GPL(unix_outq_len);
2358
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2360{
2361 struct sock *sk = sock->sk;
Jianjun Konge27dfce2008-11-01 21:38:31 -07002362 long amount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 int err;
2364
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002365 switch (cmd) {
2366 case SIOCOUTQ:
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002367 amount = unix_outq_len(sk);
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002368 err = put_user(amount, (int __user *)arg);
2369 break;
2370 case SIOCINQ:
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002371 amount = unix_inq_len(sk);
2372 if (amount < 0)
2373 err = amount;
2374 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 err = put_user(amount, (int __user *)arg);
Pavel Emelyanov885ee742011-12-30 00:54:11 +00002376 break;
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002377 default:
2378 err = -ENOIOCTLCMD;
2379 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 }
2381 return err;
2382}
2383
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002384static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385{
2386 struct sock *sk = sock->sk;
2387 unsigned int mask;
2388
Eric Dumazetaa395142010-04-20 13:03:51 +00002389 sock_poll_wait(file, sk_sleep(sk), wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 mask = 0;
2391
2392 /* exceptional events? */
2393 if (sk->sk_err)
2394 mask |= POLLERR;
2395 if (sk->sk_shutdown == SHUTDOWN_MASK)
2396 mask |= POLLHUP;
Davide Libenzif348d702006-03-25 03:07:39 -08002397 if (sk->sk_shutdown & RCV_SHUTDOWN)
Eric Dumazetdb409802010-09-06 11:13:50 +00002398 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399
2400 /* readable? */
Eric Dumazetdb409802010-09-06 11:13:50 +00002401 if (!skb_queue_empty(&sk->sk_receive_queue))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 mask |= POLLIN | POLLRDNORM;
2403
2404 /* Connection-based need to check for termination and startup */
Eric Dumazet6eba6a32008-11-16 22:58:44 -08002405 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2406 sk->sk_state == TCP_CLOSE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 mask |= POLLHUP;
2408
2409 /*
2410 * we set writable also when the other side has shut down the
2411 * connection. This prevents stuck sockets.
2412 */
2413 if (unix_writable(sk))
2414 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2415
2416 return mask;
2417}
2418
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002419static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2420 poll_table *wait)
Rainer Weikusat3c734192008-06-17 22:28:05 -07002421{
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002422 struct sock *sk = sock->sk, *other;
2423 unsigned int mask, writable;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002424
Eric Dumazetaa395142010-04-20 13:03:51 +00002425 sock_poll_wait(file, sk_sleep(sk), wait);
Rainer Weikusat3c734192008-06-17 22:28:05 -07002426 mask = 0;
2427
2428 /* exceptional events? */
2429 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00002430 mask |= POLLERR |
Jacob Keller8facd5f2013-04-02 13:55:40 -07002431 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
Keller, Jacob E7d4c04f2013-03-28 11:19:25 +00002432
Rainer Weikusat3c734192008-06-17 22:28:05 -07002433 if (sk->sk_shutdown & RCV_SHUTDOWN)
Eric Dumazet5456f092010-10-31 05:36:23 +00002434 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
Rainer Weikusat3c734192008-06-17 22:28:05 -07002435 if (sk->sk_shutdown == SHUTDOWN_MASK)
2436 mask |= POLLHUP;
2437
2438 /* readable? */
Eric Dumazet5456f092010-10-31 05:36:23 +00002439 if (!skb_queue_empty(&sk->sk_receive_queue))
Rainer Weikusat3c734192008-06-17 22:28:05 -07002440 mask |= POLLIN | POLLRDNORM;
2441
2442 /* Connection-based need to check for termination and startup */
2443 if (sk->sk_type == SOCK_SEQPACKET) {
2444 if (sk->sk_state == TCP_CLOSE)
2445 mask |= POLLHUP;
2446 /* connection hasn't started yet? */
2447 if (sk->sk_state == TCP_SYN_SENT)
2448 return mask;
2449 }
2450
Eric Dumazet973a34a2010-10-31 05:38:25 +00002451 /* No write status requested, avoid expensive OUT tests. */
Hans Verkuil626cf232012-03-23 15:02:27 -07002452 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
Eric Dumazet973a34a2010-10-31 05:38:25 +00002453 return mask;
2454
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002455 writable = unix_writable(sk);
Eric Dumazet5456f092010-10-31 05:36:23 +00002456 other = unix_peer_get(sk);
2457 if (other) {
2458 if (unix_peer(other) != sk) {
2459 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2460 if (unix_recvq_full(other))
2461 writable = 0;
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002462 }
Eric Dumazet5456f092010-10-31 05:36:23 +00002463 sock_put(other);
Rainer Weikusatec0d2152008-06-27 19:34:18 -07002464 }
2465
2466 if (writable)
Rainer Weikusat3c734192008-06-17 22:28:05 -07002467 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2468 else
2469 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2470
Rainer Weikusat3c734192008-06-17 22:28:05 -07002471 return mask;
2472}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473
2474#ifdef CONFIG_PROC_FS
Pavel Emelyanova53eb3f2007-11-23 20:30:01 +08002475
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002476#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2477
2478#define get_bucket(x) ((x) >> BUCKET_SPACE)
2479#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2480#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
Pavel Emelyanova53eb3f2007-11-23 20:30:01 +08002481
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002482static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483{
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002484 unsigned long offset = get_offset(*pos);
2485 unsigned long bucket = get_bucket(*pos);
2486 struct sock *sk;
2487 unsigned long count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002489 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2490 if (sock_net(sk) != seq_file_net(seq))
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002491 continue;
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002492 if (++count == offset)
2493 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 }
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002495
2496 return sk;
2497}
2498
2499static struct sock *unix_next_socket(struct seq_file *seq,
2500 struct sock *sk,
2501 loff_t *pos)
2502{
2503 unsigned long bucket;
2504
2505 while (sk > (struct sock *)SEQ_START_TOKEN) {
2506 sk = sk_next(sk);
2507 if (!sk)
2508 goto next_bucket;
2509 if (sock_net(sk) == seq_file_net(seq))
2510 return sk;
2511 }
2512
2513 do {
2514 sk = unix_from_bucket(seq, pos);
2515 if (sk)
2516 return sk;
2517
2518next_bucket:
2519 bucket = get_bucket(*pos) + 1;
2520 *pos = set_bucket_offset(bucket, 1);
2521 } while (bucket < ARRAY_SIZE(unix_socket_table));
2522
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 return NULL;
2524}
2525
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002527 __acquires(unix_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528{
David S. Millerfbe9cc42005-12-13 23:26:29 -08002529 spin_lock(&unix_table_lock);
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002530
2531 if (!*pos)
2532 return SEQ_START_TOKEN;
2533
2534 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2535 return NULL;
2536
2537 return unix_next_socket(seq, NULL, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538}
2539
2540static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2541{
2542 ++*pos;
Eric Dumazet7123aaa2012-06-08 05:03:21 +00002543 return unix_next_socket(seq, v, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544}
2545
2546static void unix_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002547 __releases(unix_table_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548{
David S. Millerfbe9cc42005-12-13 23:26:29 -08002549 spin_unlock(&unix_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550}
2551
2552static int unix_seq_show(struct seq_file *seq, void *v)
2553{
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002554
Joe Perchesb9f31242008-04-12 19:04:38 -07002555 if (v == SEQ_START_TOKEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2557 "Inode Path\n");
2558 else {
2559 struct sock *s = v;
2560 struct unix_sock *u = unix_sk(s);
David S. Miller1c92b4e2007-05-31 13:24:26 -07002561 unix_state_lock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562
Dan Rosenberg71338aa2011-05-23 12:17:35 +00002563 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 s,
2565 atomic_read(&s->sk_refcnt),
2566 0,
2567 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2568 s->sk_type,
2569 s->sk_socket ?
2570 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2571 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2572 sock_i_ino(s));
2573
2574 if (u->addr) {
2575 int i, len;
2576 seq_putc(seq, ' ');
2577
2578 i = 0;
2579 len = u->addr->len - sizeof(short);
2580 if (!UNIX_ABSTRACT(s))
2581 len--;
2582 else {
2583 seq_putc(seq, '@');
2584 i++;
2585 }
2586 for ( ; i < len; i++)
2587 seq_putc(seq, u->addr->name->sun_path[i]);
2588 }
David S. Miller1c92b4e2007-05-31 13:24:26 -07002589 unix_state_unlock(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 seq_putc(seq, '\n');
2591 }
2592
2593 return 0;
2594}
2595
Philippe De Muyter56b3d972007-07-10 23:07:31 -07002596static const struct seq_operations unix_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 .start = unix_seq_start,
2598 .next = unix_seq_next,
2599 .stop = unix_seq_stop,
2600 .show = unix_seq_show,
2601};
2602
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603static int unix_seq_open(struct inode *inode, struct file *file)
2604{
Denis V. Luneve372c412007-11-19 22:31:54 -08002605 return seq_open_net(inode, file, &unix_seq_ops,
Eric Dumazet8b51b062012-06-08 22:10:20 +00002606 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607}
2608
Arjan van de Venda7071d2007-02-12 00:55:36 -08002609static const struct file_operations unix_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 .owner = THIS_MODULE,
2611 .open = unix_seq_open,
2612 .read = seq_read,
2613 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08002614 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615};
2616
2617#endif
2618
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00002619static const struct net_proto_family unix_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 .family = PF_UNIX,
2621 .create = unix_create,
2622 .owner = THIS_MODULE,
2623};
2624
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002625
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002626static int __net_init unix_net_init(struct net *net)
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002627{
2628 int error = -ENOMEM;
2629
Denis V. Luneva0a53c82007-12-11 04:19:17 -08002630 net->unx.sysctl_max_dgram_qlen = 10;
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002631 if (unix_sysctl_register(net))
2632 goto out;
Pavel Emelyanovd392e492007-12-01 23:44:15 +11002633
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002634#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00002635 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002636 unix_sysctl_unregister(net);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002637 goto out;
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002638 }
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002639#endif
2640 error = 0;
2641out:
Jianjun Kong48dcc33e2008-11-01 21:37:27 -07002642 return error;
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002643}
2644
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002645static void __net_exit unix_net_exit(struct net *net)
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002646{
Pavel Emelyanov1597fbc2007-12-01 23:51:01 +11002647 unix_sysctl_unregister(net);
Gao fengece31ff2013-02-18 01:34:56 +00002648 remove_proc_entry("unix", net->proc_net);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002649}
2650
2651static struct pernet_operations unix_net_ops = {
2652 .init = unix_net_init,
2653 .exit = unix_net_exit,
2654};
2655
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656static int __init af_unix_init(void)
2657{
2658 int rc = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659
YOSHIFUJI Hideaki / 吉藤英明b4fff5f2013-01-09 07:20:07 +00002660 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661
2662 rc = proto_register(&unix_proto, 1);
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +09002663 if (rc != 0) {
wangweidong5cc208b2013-12-06 18:03:36 +08002664 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 goto out;
2666 }
2667
2668 sock_register(&unix_family_ops);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002669 register_pernet_subsys(&unix_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670out:
2671 return rc;
2672}
2673
2674static void __exit af_unix_exit(void)
2675{
2676 sock_unregister(PF_UNIX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 proto_unregister(&unix_proto);
Denis V. Lunev097e66c2007-11-19 22:29:30 -08002678 unregister_pernet_subsys(&unix_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679}
2680
David Woodhouse3d366962008-04-24 00:59:25 -07002681/* Earlier than device_initcall() so that other drivers invoking
2682 request_module() don't end up in a loop when modprobe tries
2683 to use a UNIX socket. But later than subsys_initcall() because
2684 we depend on stuff initialised there */
2685fs_initcall(af_unix_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686module_exit(af_unix_exit);
2687
2688MODULE_LICENSE("GPL");
2689MODULE_ALIAS_NETPROTO(PF_UNIX);