blob: 86fa0f3b2cafa46d47db9cd03e46dfe89c22d238 [file] [log] [blame]
Pavel Emelyanov22931d32011-12-15 02:44:35 +00001#include <linux/types.h>
2#include <linux/spinlock.h>
3#include <linux/sock_diag.h>
4#include <linux/unix_diag.h>
5#include <linux/skbuff.h>
Cyrill Gorcunov2ea744a2011-12-20 04:33:03 +00006#include <linux/module.h>
Pavel Emelyanov22931d32011-12-15 02:44:35 +00007#include <net/netlink.h>
8#include <net/af_unix.h>
9#include <net/tcp_states.h>
10
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000011static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
12{
13 struct unix_address *addr = unix_sk(sk)->addr;
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000014
Thomas Graf42453752012-06-26 23:36:10 +000015 if (!addr)
16 return 0;
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000017
Thomas Graf42453752012-06-26 23:36:10 +000018 return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short),
19 addr->name->sun_path);
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000020}
21
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000022static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
23{
Al Viro40ffe672012-03-14 21:54:32 -040024 struct dentry *dentry = unix_sk(sk)->path.dentry;
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000025
26 if (dentry) {
Thomas Graf42453752012-06-26 23:36:10 +000027 struct unix_diag_vfs uv = {
28 .udiag_vfs_ino = dentry->d_inode->i_ino,
29 .udiag_vfs_dev = dentry->d_sb->s_dev,
30 };
31
32 return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000033 }
34
35 return 0;
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000036}
37
Pavel Emelyanovac02be82011-12-15 02:45:58 +000038static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
39{
40 struct sock *peer;
41 int ino;
42
43 peer = unix_peer_get(sk);
44 if (peer) {
45 unix_state_lock(peer);
46 ino = sock_i_ino(peer);
47 unix_state_unlock(peer);
48 sock_put(peer);
49
Thomas Graf42453752012-06-26 23:36:10 +000050 return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
Pavel Emelyanovac02be82011-12-15 02:45:58 +000051 }
52
53 return 0;
Pavel Emelyanovac02be82011-12-15 02:45:58 +000054}
55
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000056static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
57{
58 struct sk_buff *skb;
Thomas Graf42453752012-06-26 23:36:10 +000059 struct nlattr *attr;
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000060 u32 *buf;
61 int i;
62
63 if (sk->sk_state == TCP_LISTEN) {
64 spin_lock(&sk->sk_receive_queue.lock);
Thomas Graf42453752012-06-26 23:36:10 +000065
66 attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
67 sk->sk_receive_queue.qlen * sizeof(u32));
68 if (!attr)
69 goto errout;
70
71 buf = nla_data(attr);
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000072 i = 0;
73 skb_queue_walk(&sk->sk_receive_queue, skb) {
74 struct sock *req, *peer;
75
76 req = skb->sk;
77 /*
78 * The state lock is outer for the same sk's
79 * queue lock. With the other's queue locked it's
80 * OK to lock the state.
81 */
82 unix_state_lock_nested(req);
83 peer = unix_sk(req)->peer;
David S. Millere09e9d12011-12-26 14:41:55 -050084 buf[i++] = (peer ? sock_i_ino(peer) : 0);
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000085 unix_state_unlock(req);
86 }
87 spin_unlock(&sk->sk_receive_queue.lock);
88 }
89
90 return 0;
91
Thomas Graf42453752012-06-26 23:36:10 +000092errout:
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000093 spin_unlock(&sk->sk_receive_queue.lock);
94 return -EMSGSIZE;
95}
96
Pavel Emelyanovcbf39192011-12-15 02:46:31 +000097static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
98{
Thomas Graf42453752012-06-26 23:36:10 +000099 struct unix_diag_rqlen rql;
Pavel Emelyanovc9da99e2011-12-30 00:54:39 +0000100
101 if (sk->sk_state == TCP_LISTEN) {
Thomas Graf42453752012-06-26 23:36:10 +0000102 rql.udiag_rqueue = sk->sk_receive_queue.qlen;
103 rql.udiag_wqueue = sk->sk_max_ack_backlog;
Pavel Emelyanovc9da99e2011-12-30 00:54:39 +0000104 } else {
Thomas Graf42453752012-06-26 23:36:10 +0000105 rql.udiag_rqueue = (u32) unix_inq_len(sk);
106 rql.udiag_wqueue = (u32) unix_outq_len(sk);
Pavel Emelyanovc9da99e2011-12-30 00:54:39 +0000107 }
108
Thomas Graf42453752012-06-26 23:36:10 +0000109 return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000110}
111
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000112static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
Eric W. Biederman15e47302012-09-07 20:12:54 +0000113 u32 portid, u32 seq, u32 flags, int sk_ino)
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000114{
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000115 struct nlmsghdr *nlh;
116 struct unix_diag_msg *rep;
117
Eric W. Biederman15e47302012-09-07 20:12:54 +0000118 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
Thomas Graf42453752012-06-26 23:36:10 +0000119 flags);
David S. Millerb61bb012012-06-26 21:41:00 -0700120 if (!nlh)
Thomas Graf42453752012-06-26 23:36:10 +0000121 return -EMSGSIZE;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000122
David S. Millerb61bb012012-06-26 21:41:00 -0700123 rep = nlmsg_data(nlh);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000124 rep->udiag_family = AF_UNIX;
125 rep->udiag_type = sk->sk_type;
126 rep->udiag_state = sk->sk_state;
Mathias Krause6865d1e2013-09-30 22:05:40 +0200127 rep->pad = 0;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000128 rep->udiag_ino = sk_ino;
129 sock_diag_save_cookie(sk, rep->udiag_cookie);
130
Pavel Emelyanovf5248b42011-12-15 02:45:24 +0000131 if ((req->udiag_show & UDIAG_SHOW_NAME) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000132 sk_diag_dump_name(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700133 goto out_nlmsg_trim;
Pavel Emelyanovf5248b42011-12-15 02:45:24 +0000134
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +0000135 if ((req->udiag_show & UDIAG_SHOW_VFS) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000136 sk_diag_dump_vfs(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700137 goto out_nlmsg_trim;
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +0000138
Pavel Emelyanovac02be82011-12-15 02:45:58 +0000139 if ((req->udiag_show & UDIAG_SHOW_PEER) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000140 sk_diag_dump_peer(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700141 goto out_nlmsg_trim;
Pavel Emelyanovac02be82011-12-15 02:45:58 +0000142
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +0000143 if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000144 sk_diag_dump_icons(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700145 goto out_nlmsg_trim;
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +0000146
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000147 if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000148 sk_diag_show_rqlen(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700149 goto out_nlmsg_trim;
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000150
151 if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
152 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
David S. Millerb61bb012012-06-26 21:41:00 -0700153 goto out_nlmsg_trim;
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000154
Pavel Emelyanove4e541a2012-10-23 22:29:56 +0400155 if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
156 goto out_nlmsg_trim;
157
Thomas Graf42453752012-06-26 23:36:10 +0000158 return nlmsg_end(skb, nlh);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000159
David S. Millerb61bb012012-06-26 21:41:00 -0700160out_nlmsg_trim:
Thomas Graf42453752012-06-26 23:36:10 +0000161 nlmsg_cancel(skb, nlh);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000162 return -EMSGSIZE;
163}
164
165static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
Eric W. Biederman15e47302012-09-07 20:12:54 +0000166 u32 portid, u32 seq, u32 flags)
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000167{
168 int sk_ino;
169
170 unix_state_lock(sk);
171 sk_ino = sock_i_ino(sk);
172 unix_state_unlock(sk);
173
174 if (!sk_ino)
175 return 0;
176
Eric W. Biederman15e47302012-09-07 20:12:54 +0000177 return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000178}
179
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000180static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
181{
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000182 struct unix_diag_req *req;
183 int num, s_num, slot, s_slot;
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000184 struct net *net = sock_net(skb->sk);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000185
David S. Millerb61bb012012-06-26 21:41:00 -0700186 req = nlmsg_data(cb->nlh);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000187
188 s_slot = cb->args[0];
189 num = s_num = cb->args[1];
190
191 spin_lock(&unix_table_lock);
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000192 for (slot = s_slot;
193 slot < ARRAY_SIZE(unix_socket_table);
194 s_num = 0, slot++) {
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000195 struct sock *sk;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000196
197 num = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800198 sk_for_each(sk, &unix_socket_table[slot]) {
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000199 if (!net_eq(sock_net(sk), net))
200 continue;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000201 if (num < s_num)
202 goto next;
203 if (!(req->udiag_states & (1 << sk->sk_state)))
204 goto next;
205 if (sk_diag_dump(sk, skb, req,
Eric W. Biederman15e47302012-09-07 20:12:54 +0000206 NETLINK_CB(cb->skb).portid,
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000207 cb->nlh->nlmsg_seq,
208 NLM_F_MULTI) < 0)
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000209 goto done;
210next:
211 num++;
212 }
213 }
214done:
215 spin_unlock(&unix_table_lock);
216 cb->args[0] = slot;
217 cb->args[1] = num;
218
219 return skb->len;
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000220}
221
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000222static struct sock *unix_lookup_by_ino(int ino)
223{
224 int i;
225 struct sock *sk;
226
227 spin_lock(&unix_table_lock);
Eric Dumazet7123aaa2012-06-08 05:03:21 +0000228 for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800229 sk_for_each(sk, &unix_socket_table[i])
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000230 if (ino == sock_i_ino(sk)) {
231 sock_hold(sk);
232 spin_unlock(&unix_table_lock);
233
234 return sk;
235 }
236 }
237
238 spin_unlock(&unix_table_lock);
239 return NULL;
240}
241
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000242static int unix_diag_get_exact(struct sk_buff *in_skb,
243 const struct nlmsghdr *nlh,
244 struct unix_diag_req *req)
245{
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000246 int err = -EINVAL;
247 struct sock *sk;
248 struct sk_buff *rep;
249 unsigned int extra_len;
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000250 struct net *net = sock_net(in_skb->sk);
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000251
252 if (req->udiag_ino == 0)
253 goto out_nosk;
254
255 sk = unix_lookup_by_ino(req->udiag_ino);
256 err = -ENOENT;
257 if (sk == NULL)
258 goto out_nosk;
259
260 err = sock_diag_check_cookie(sk, req->udiag_cookie);
261 if (err)
262 goto out;
263
264 extra_len = 256;
265again:
266 err = -ENOMEM;
Thomas Graf42453752012-06-26 23:36:10 +0000267 rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000268 if (!rep)
269 goto out;
270
Eric W. Biederman15e47302012-09-07 20:12:54 +0000271 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000272 nlh->nlmsg_seq, 0, req->udiag_ino);
273 if (err < 0) {
Thomas Graf42453752012-06-26 23:36:10 +0000274 nlmsg_free(rep);
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000275 extra_len += 256;
276 if (extra_len >= PAGE_SIZE)
277 goto out;
278
279 goto again;
280 }
Eric W. Biederman15e47302012-09-07 20:12:54 +0000281 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000282 MSG_DONTWAIT);
283 if (err > 0)
284 err = 0;
285out:
286 if (sk)
287 sock_put(sk);
288out_nosk:
289 return err;
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000290}
291
292static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
293{
294 int hdrlen = sizeof(struct unix_diag_req);
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000295 struct net *net = sock_net(skb->sk);
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000296
297 if (nlmsg_len(h) < hdrlen)
298 return -EINVAL;
299
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +0000300 if (h->nlmsg_flags & NLM_F_DUMP) {
301 struct netlink_dump_control c = {
302 .dump = unix_diag_dump,
303 };
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000304 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +0000305 } else
David S. Millerb61bb012012-06-26 21:41:00 -0700306 return unix_diag_get_exact(skb, h, nlmsg_data(h));
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000307}
308
Shan Wei8dcf01f2012-04-24 18:21:07 +0000309static const struct sock_diag_handler unix_diag_handler = {
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000310 .family = AF_UNIX,
311 .dump = unix_diag_handler_dump,
312};
313
314static int __init unix_diag_init(void)
315{
316 return sock_diag_register(&unix_diag_handler);
317}
318
319static void __exit unix_diag_exit(void)
320{
321 sock_diag_unregister(&unix_diag_handler);
322}
323
324module_init(unix_diag_init);
325module_exit(unix_diag_exit);
326MODULE_LICENSE("GPL");
327MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);