blob: 47d3002737f5dcccd222ef80f84ab9345094800b [file] [log] [blame]
Pavel Emelyanov22931d32011-12-15 02:44:35 +00001#include <linux/types.h>
2#include <linux/spinlock.h>
3#include <linux/sock_diag.h>
4#include <linux/unix_diag.h>
5#include <linux/skbuff.h>
Cyrill Gorcunov2ea744a2011-12-20 04:33:03 +00006#include <linux/module.h>
Pavel Emelyanov22931d32011-12-15 02:44:35 +00007#include <net/netlink.h>
8#include <net/af_unix.h>
9#include <net/tcp_states.h>
10
11#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
12 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
13
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000014static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
15{
16 struct unix_address *addr = unix_sk(sk)->addr;
17 char *s;
18
19 if (addr) {
20 s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short));
21 memcpy(s, addr->name->sun_path, addr->len - sizeof(short));
22 }
23
24 return 0;
25
26rtattr_failure:
27 return -EMSGSIZE;
28}
29
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000030static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
31{
Al Viro40ffe672012-03-14 21:54:32 -040032 struct dentry *dentry = unix_sk(sk)->path.dentry;
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000033 struct unix_diag_vfs *uv;
34
35 if (dentry) {
36 uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv));
37 uv->udiag_vfs_ino = dentry->d_inode->i_ino;
38 uv->udiag_vfs_dev = dentry->d_sb->s_dev;
39 }
40
41 return 0;
42
43rtattr_failure:
44 return -EMSGSIZE;
45}
46
Pavel Emelyanovac02be82011-12-15 02:45:58 +000047static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
48{
49 struct sock *peer;
50 int ino;
51
52 peer = unix_peer_get(sk);
53 if (peer) {
54 unix_state_lock(peer);
55 ino = sock_i_ino(peer);
56 unix_state_unlock(peer);
57 sock_put(peer);
58
59 RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino);
60 }
61
62 return 0;
63rtattr_failure:
64 return -EMSGSIZE;
65}
66
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000067static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
68{
69 struct sk_buff *skb;
70 u32 *buf;
71 int i;
72
73 if (sk->sk_state == TCP_LISTEN) {
74 spin_lock(&sk->sk_receive_queue.lock);
Pavel Emelyanov3b0723c2011-12-26 14:08:47 -050075 buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS,
76 sk->sk_receive_queue.qlen * sizeof(u32));
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000077 i = 0;
78 skb_queue_walk(&sk->sk_receive_queue, skb) {
79 struct sock *req, *peer;
80
81 req = skb->sk;
82 /*
83 * The state lock is outer for the same sk's
84 * queue lock. With the other's queue locked it's
85 * OK to lock the state.
86 */
87 unix_state_lock_nested(req);
88 peer = unix_sk(req)->peer;
David S. Millere09e9d12011-12-26 14:41:55 -050089 buf[i++] = (peer ? sock_i_ino(peer) : 0);
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000090 unix_state_unlock(req);
91 }
92 spin_unlock(&sk->sk_receive_queue.lock);
93 }
94
95 return 0;
96
97rtattr_failure:
98 spin_unlock(&sk->sk_receive_queue.lock);
99 return -EMSGSIZE;
100}
101
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000102static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
103{
Pavel Emelyanovc9da99e2011-12-30 00:54:39 +0000104 struct unix_diag_rqlen *rql;
105
106 rql = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_RQLEN, sizeof(*rql));
107
108 if (sk->sk_state == TCP_LISTEN) {
109 rql->udiag_rqueue = sk->sk_receive_queue.qlen;
110 rql->udiag_wqueue = sk->sk_max_ack_backlog;
111 } else {
112 rql->udiag_rqueue = (__u32)unix_inq_len(sk);
113 rql->udiag_wqueue = (__u32)unix_outq_len(sk);
114 }
115
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000116 return 0;
117
118rtattr_failure:
119 return -EMSGSIZE;
120}
121
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000122static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
123 u32 pid, u32 seq, u32 flags, int sk_ino)
124{
125 unsigned char *b = skb_tail_pointer(skb);
126 struct nlmsghdr *nlh;
127 struct unix_diag_msg *rep;
128
129 nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep));
130 nlh->nlmsg_flags = flags;
131
132 rep = NLMSG_DATA(nlh);
133
134 rep->udiag_family = AF_UNIX;
135 rep->udiag_type = sk->sk_type;
136 rep->udiag_state = sk->sk_state;
137 rep->udiag_ino = sk_ino;
138 sock_diag_save_cookie(sk, rep->udiag_cookie);
139
Pavel Emelyanovf5248b42011-12-15 02:45:24 +0000140 if ((req->udiag_show & UDIAG_SHOW_NAME) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000141 sk_diag_dump_name(sk, skb))
Pavel Emelyanovf5248b42011-12-15 02:45:24 +0000142 goto nlmsg_failure;
143
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +0000144 if ((req->udiag_show & UDIAG_SHOW_VFS) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000145 sk_diag_dump_vfs(sk, skb))
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +0000146 goto nlmsg_failure;
147
Pavel Emelyanovac02be82011-12-15 02:45:58 +0000148 if ((req->udiag_show & UDIAG_SHOW_PEER) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000149 sk_diag_dump_peer(sk, skb))
Pavel Emelyanovac02be82011-12-15 02:45:58 +0000150 goto nlmsg_failure;
151
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +0000152 if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000153 sk_diag_dump_icons(sk, skb))
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +0000154 goto nlmsg_failure;
155
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000156 if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000157 sk_diag_show_rqlen(sk, skb))
158 goto nlmsg_failure;
159
160 if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
161 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000162 goto nlmsg_failure;
163
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000164 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
165 return skb->len;
166
167nlmsg_failure:
168 nlmsg_trim(skb, b);
169 return -EMSGSIZE;
170}
171
172static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
173 u32 pid, u32 seq, u32 flags)
174{
175 int sk_ino;
176
177 unix_state_lock(sk);
178 sk_ino = sock_i_ino(sk);
179 unix_state_unlock(sk);
180
181 if (!sk_ino)
182 return 0;
183
184 return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino);
185}
186
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000187static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
188{
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000189 struct unix_diag_req *req;
190 int num, s_num, slot, s_slot;
191
192 req = NLMSG_DATA(cb->nlh);
193
194 s_slot = cb->args[0];
195 num = s_num = cb->args[1];
196
197 spin_lock(&unix_table_lock);
198 for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) {
199 struct sock *sk;
200 struct hlist_node *node;
201
202 num = 0;
203 sk_for_each(sk, node, &unix_socket_table[slot]) {
204 if (num < s_num)
205 goto next;
206 if (!(req->udiag_states & (1 << sk->sk_state)))
207 goto next;
208 if (sk_diag_dump(sk, skb, req,
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000209 NETLINK_CB(cb->skb).pid,
210 cb->nlh->nlmsg_seq,
211 NLM_F_MULTI) < 0)
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000212 goto done;
213next:
214 num++;
215 }
216 }
217done:
218 spin_unlock(&unix_table_lock);
219 cb->args[0] = slot;
220 cb->args[1] = num;
221
222 return skb->len;
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000223}
224
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000225static struct sock *unix_lookup_by_ino(int ino)
226{
227 int i;
228 struct sock *sk;
229
230 spin_lock(&unix_table_lock);
231 for (i = 0; i <= UNIX_HASH_SIZE; i++) {
232 struct hlist_node *node;
233
234 sk_for_each(sk, node, &unix_socket_table[i])
235 if (ino == sock_i_ino(sk)) {
236 sock_hold(sk);
237 spin_unlock(&unix_table_lock);
238
239 return sk;
240 }
241 }
242
243 spin_unlock(&unix_table_lock);
244 return NULL;
245}
246
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000247static int unix_diag_get_exact(struct sk_buff *in_skb,
248 const struct nlmsghdr *nlh,
249 struct unix_diag_req *req)
250{
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000251 int err = -EINVAL;
252 struct sock *sk;
253 struct sk_buff *rep;
254 unsigned int extra_len;
255
256 if (req->udiag_ino == 0)
257 goto out_nosk;
258
259 sk = unix_lookup_by_ino(req->udiag_ino);
260 err = -ENOENT;
261 if (sk == NULL)
262 goto out_nosk;
263
264 err = sock_diag_check_cookie(sk, req->udiag_cookie);
265 if (err)
266 goto out;
267
268 extra_len = 256;
269again:
270 err = -ENOMEM;
271 rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)),
272 GFP_KERNEL);
273 if (!rep)
274 goto out;
275
276 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
277 nlh->nlmsg_seq, 0, req->udiag_ino);
278 if (err < 0) {
279 kfree_skb(rep);
280 extra_len += 256;
281 if (extra_len >= PAGE_SIZE)
282 goto out;
283
284 goto again;
285 }
286 err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
287 MSG_DONTWAIT);
288 if (err > 0)
289 err = 0;
290out:
291 if (sk)
292 sock_put(sk);
293out_nosk:
294 return err;
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000295}
296
297static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
298{
299 int hdrlen = sizeof(struct unix_diag_req);
300
301 if (nlmsg_len(h) < hdrlen)
302 return -EINVAL;
303
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +0000304 if (h->nlmsg_flags & NLM_F_DUMP) {
305 struct netlink_dump_control c = {
306 .dump = unix_diag_dump,
307 };
308 return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
309 } else
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000310 return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
311}
312
Shan Wei8dcf01f2012-04-24 18:21:07 +0000313static const struct sock_diag_handler unix_diag_handler = {
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000314 .family = AF_UNIX,
315 .dump = unix_diag_handler_dump,
316};
317
318static int __init unix_diag_init(void)
319{
320 return sock_diag_register(&unix_diag_handler);
321}
322
323static void __exit unix_diag_exit(void)
324{
325 sock_diag_unregister(&unix_diag_handler);
326}
327
328module_init(unix_diag_init);
329module_exit(unix_diag_exit);
330MODULE_LICENSE("GPL");
331MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);